parent
1271980051
commit
2807ab709a
@ -1,290 +0,0 @@
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import List, Optional
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel, Field
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
from swarms import Agent
|
||||
from swarms.prompts.finance_agent_sys_prompt import (
|
||||
FINANCIAL_AGENT_SYS_PROMPT,
|
||||
)
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
openai_api_key=api_key,
|
||||
model_name="gpt-4o-mini",
|
||||
temperature=0.1,
|
||||
max_tokens=2000,
|
||||
)
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Financial-Analysis-Agent",
|
||||
system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent.json",
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
return_step_meta=False,
|
||||
# output_type="json",
|
||||
output_type=str,
|
||||
)
|
||||
|
||||
|
||||
class ThoughtLog(BaseModel):
|
||||
"""
|
||||
Pydantic model to log each thought generated by the agent.
|
||||
"""
|
||||
|
||||
thought: str
|
||||
timestamp: datetime = Field(default_factory=datetime.now)
|
||||
recursion_depth: int
|
||||
|
||||
|
||||
class MemoryLog(BaseModel):
|
||||
"""
|
||||
Pydantic model to log memory states during the agent's execution.
|
||||
"""
|
||||
|
||||
thoughts: List[ThoughtLog] = []
|
||||
final_result: Optional[str] = None
|
||||
completion_status: bool = False
|
||||
task: str
|
||||
|
||||
|
||||
class RecursiveAgent(Agent):
|
||||
"""
|
||||
An autonomous agent built on top of the Swarms Agent framework.
|
||||
Capable of recursively exploring tasks using a Tree of Thoughts mechanism.
|
||||
|
||||
Attributes:
|
||||
- agent_name (str): The name of the agent.
|
||||
- system_prompt (str): The system prompt guiding the agent's behavior.
|
||||
- max_loops (int): The maximum depth for recursion in the Tree of Thoughts.
|
||||
- memory_limit (int): The maximum number of thought logs to store.
|
||||
- memory (MemoryLog): Pydantic model to store thoughts and logs.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
agent_name: str,
|
||||
system_prompt: str,
|
||||
max_loops: int,
|
||||
memory_limit: int = 5,
|
||||
agent: Agent = agent,
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize the RecursiveAgent.
|
||||
|
||||
:param agent_name: Name of the agent.
|
||||
:param system_prompt: The prompt guiding the agent's behavior.
|
||||
:param max_loops: The maximum number of recursive loops allowed.
|
||||
:param memory_limit: Maximum number of memory entries.
|
||||
:param kwargs: Additional arguments passed to the base Agent.
|
||||
"""
|
||||
super().__init__(agent_name=agent_name, **kwargs)
|
||||
self.system_prompt = system_prompt
|
||||
self.max_loops = max_loops
|
||||
self.memory = MemoryLog(task="")
|
||||
self.memory_limit = memory_limit # Max thoughts to store
|
||||
self.finished = False # Task completion flag
|
||||
self.agent = agent(
|
||||
agent_name=agent_name,
|
||||
system_prompt=system_prompt,
|
||||
max_loops=max_loops,
|
||||
)
|
||||
logger.info(
|
||||
f"Initialized agent {self.agent_name} with recursion limit of {self.max_loops}"
|
||||
)
|
||||
|
||||
def add_to_memory(
|
||||
self, thought: str, recursion_depth: int
|
||||
) -> None:
|
||||
"""
|
||||
Add a thought to the agent's memory using the Pydantic ThoughtLog model.
|
||||
|
||||
:param thought: The thought generated by the agent.
|
||||
:param recursion_depth: The depth of the current recursion.
|
||||
"""
|
||||
if len(self.memory.thoughts) >= self.memory_limit:
|
||||
logger.debug(
|
||||
"Memory limit reached, discarding the oldest memory entry."
|
||||
)
|
||||
self.memory.thoughts.pop(0) # Maintain memory size
|
||||
thought_log = ThoughtLog(
|
||||
thought=thought, recursion_depth=recursion_depth
|
||||
)
|
||||
self.memory.thoughts.append(thought_log)
|
||||
logger.info(
|
||||
f"Added thought to memory at depth {recursion_depth}: {thought}"
|
||||
)
|
||||
|
||||
def check_if_finished(self, current_thought: str) -> bool:
|
||||
"""
|
||||
Check if the task is finished by evaluating the current thought.
|
||||
|
||||
:param current_thought: The current thought or reasoning result.
|
||||
:return: True if task completion keywords are found, else False.
|
||||
"""
|
||||
# Define criteria for task completion based on keywords
|
||||
completion_criteria = [
|
||||
"criteria met",
|
||||
"task completed",
|
||||
"done",
|
||||
"fully solved",
|
||||
]
|
||||
if any(
|
||||
keyword in current_thought.lower()
|
||||
for keyword in completion_criteria
|
||||
):
|
||||
self.finished = True
|
||||
self.memory.completion_status = True
|
||||
logger.info(
|
||||
f"Task completed with thought: {current_thought}"
|
||||
)
|
||||
return self.finished
|
||||
|
||||
def run_tree_of_thoughts(
|
||||
self, task: str, current_depth: int = 0
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Recursively explore thought branches based on the Tree of Thoughts mechanism.
|
||||
|
||||
:param task: The task or query to be reasoned upon.
|
||||
:param current_depth: The current recursion depth.
|
||||
:return: The final solution or message indicating task completion or failure.
|
||||
"""
|
||||
logger.debug(f"Current recursion depth: {current_depth}")
|
||||
if current_depth >= self.max_loops:
|
||||
logger.warning(
|
||||
"Max recursion depth reached, task incomplete."
|
||||
)
|
||||
return "Max recursion depth reached, task incomplete."
|
||||
|
||||
# Generate multiple possible thoughts/branches using Swarms logic
|
||||
response = self.generate_thoughts(task)
|
||||
thoughts = self.extract_thoughts(response)
|
||||
self.memory.task = task # Log the task in memory
|
||||
|
||||
# Store thoughts in memory
|
||||
for idx, thought in enumerate(thoughts):
|
||||
logger.info(
|
||||
f"Exploring thought {idx + 1}/{len(thoughts)}: {thought}"
|
||||
)
|
||||
self.add_to_memory(thought, current_depth)
|
||||
|
||||
if self.check_if_finished(thought):
|
||||
self.memory.final_result = (
|
||||
thought # Log the final result
|
||||
)
|
||||
return f"Task completed with thought: {thought}"
|
||||
|
||||
# Recursive exploration
|
||||
result = self.run_tree_of_thoughts(
|
||||
thought, current_depth + 1
|
||||
)
|
||||
|
||||
if self.finished:
|
||||
return result
|
||||
|
||||
return "Exploration done but no valid solution found."
|
||||
|
||||
def generate_thoughts(self, task: str) -> str:
|
||||
"""
|
||||
Generate thoughts for the task using the Swarms framework.
|
||||
|
||||
:param task: The task or query to generate thoughts for.
|
||||
:return: A string representing multiple thought branches generated by Swarms logic.
|
||||
"""
|
||||
logger.debug(f"Generating thoughts for task: {task}")
|
||||
response = self.agent.run(
|
||||
task
|
||||
) # Assuming Swarms uses an LLM for thought generation
|
||||
return response
|
||||
|
||||
def extract_thoughts(self, response: str) -> List[str]:
|
||||
"""
|
||||
Extract individual thoughts/branches from the LLM's response.
|
||||
|
||||
:param response: The response string containing multiple thoughts.
|
||||
:return: A list of extracted thoughts.
|
||||
"""
|
||||
logger.debug(f"Extracting thoughts from response: {response}")
|
||||
return [
|
||||
thought.strip()
|
||||
for thought in response.split("\n")
|
||||
if thought
|
||||
]
|
||||
|
||||
def reflect(self) -> str:
|
||||
"""
|
||||
Reflect on the task and thoughts stored in memory, providing a summary of the process.
|
||||
The reflection will be generated by the LLM based on the stored thoughts.
|
||||
|
||||
:return: Reflection output generated by the LLM.
|
||||
"""
|
||||
logger.debug("Running reflection on the task.")
|
||||
|
||||
# Compile all thoughts into a prompt for reflection
|
||||
thoughts_for_reflection = "\n".join(
|
||||
[
|
||||
f"Thought {i + 1}: {log.thought}"
|
||||
for i, log in enumerate(self.memory.thoughts)
|
||||
]
|
||||
)
|
||||
reflection_prompt = (
|
||||
f"Reflect on the following task and thoughts:\n"
|
||||
f"Task: {self.memory.task}\n"
|
||||
f"Thoughts:\n{thoughts_for_reflection}\n"
|
||||
"What did we learn from this? How could this process be improved?"
|
||||
)
|
||||
|
||||
# Use the agent's LLM to generate a reflection based on the memory
|
||||
reflection_response = self.agent.run(reflection_prompt)
|
||||
self.memory.final_result = reflection_response
|
||||
|
||||
logger.info(f"Reflection generated: {reflection_response}")
|
||||
return reflection_response
|
||||
|
||||
|
||||
# # Example usage of the RecursiveAgent
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# # Example initialization and running
|
||||
# agent_name = "Autonomous-Financial-Agent"
|
||||
# system_prompt = "You are a highly intelligent agent designed to handle financial queries efficiently."
|
||||
# max_loops = 1
|
||||
|
||||
# # Initialize the agent using Swarms
|
||||
# agent = RecursiveAgent(
|
||||
# agent_name=agent_name,
|
||||
# system_prompt=system_prompt,
|
||||
# max_loops=max_loops
|
||||
# )
|
||||
|
||||
# # Define the task for the agent
|
||||
# task = "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria?"
|
||||
|
||||
# # Run the tree of thoughts mechanism
|
||||
# result = agent.run_tree_of_thoughts(task)
|
||||
# logger.info(f"Final result: {result}")
|
||||
|
||||
# # Perform reflection
|
||||
# reflection = agent.reflect()
|
||||
# logger.info(f"Reflection: {reflection}")
|
@ -1,48 +0,0 @@
|
||||
import os
|
||||
from swarms import Agent
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Retrieve the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Initialize the model for OpenAI Chat
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
# Initialize the agent with automated prompt engineering enabled
|
||||
agent = Agent(
|
||||
agent_name="Financial-Analysis-Agent",
|
||||
system_prompt=None, # System prompt is dynamically generated
|
||||
agent_description=None,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=False,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="finance_agent.json",
|
||||
user_name="Human:",
|
||||
return_step_meta=False,
|
||||
output_type="string",
|
||||
streaming_on=False,
|
||||
auto_generate_prompt=True, # Enable automated prompt engineering
|
||||
)
|
||||
|
||||
# Run the agent with a task description and specify the device
|
||||
agent.run(
|
||||
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria",
|
||||
## Will design a system prompt based on the task if description and system prompt are None
|
||||
device="cpu",
|
||||
)
|
||||
|
||||
# Print the dynamically generated system prompt
|
||||
print(agent.system_prompt)
|
@ -1,182 +0,0 @@
|
||||
import os
|
||||
from swarms import Agent, SequentialWorkflow
|
||||
from swarm_models import OpenAIChat
|
||||
|
||||
# model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"))
|
||||
company = "TGSC"
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Model
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
|
||||
# Initialize the Managing Director agent
|
||||
managing_director = Agent(
|
||||
agent_name="Managing-Director",
|
||||
system_prompt=f"""
|
||||
As the Managing Director at Blackstone, your role is to oversee the entire investment analysis process for potential acquisitions.
|
||||
Your responsibilities include:
|
||||
1. Setting the overall strategy and direction for the analysis
|
||||
2. Coordinating the efforts of the various team members and ensuring a comprehensive evaluation
|
||||
3. Reviewing the findings and recommendations from each team member
|
||||
4. Making the final decision on whether to proceed with the acquisition
|
||||
|
||||
For the current potential acquisition of {company}, direct the tasks for the team to thoroughly analyze all aspects of the company, including its financials, industry position, technology, market potential, and regulatory compliance. Provide guidance and feedback as needed to ensure a rigorous and unbiased assessment.
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="managing-director.json",
|
||||
)
|
||||
|
||||
# Initialize the Vice President of Finance
|
||||
vp_finance = Agent(
|
||||
agent_name="VP-Finance",
|
||||
system_prompt=f"""
|
||||
As the Vice President of Finance at Blackstone, your role is to lead the financial analysis of potential acquisitions.
|
||||
For the current potential acquisition of {company}, your tasks include:
|
||||
1. Conducting a thorough review of {company}' financial statements, including income statements, balance sheets, and cash flow statements
|
||||
2. Analyzing key financial metrics such as revenue growth, profitability margins, liquidity ratios, and debt levels
|
||||
3. Assessing the company's historical financial performance and projecting future performance based on assumptions and market conditions
|
||||
4. Identifying any financial risks or red flags that could impact the acquisition decision
|
||||
5. Providing a detailed report on your findings and recommendations to the Managing Director
|
||||
|
||||
Be sure to consider factors such as the sustainability of {company}' business model, the strength of its customer base, and its ability to generate consistent cash flows. Your analysis should be data-driven, objective, and aligned with Blackstone's investment criteria.
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="vp-finance.json",
|
||||
)
|
||||
|
||||
# Initialize the Industry Analyst
|
||||
industry_analyst = Agent(
|
||||
agent_name="Industry-Analyst",
|
||||
system_prompt=f"""
|
||||
As the Industry Analyst at Blackstone, your role is to provide in-depth research and analysis on the industries and markets relevant to potential acquisitions.
|
||||
For the current potential acquisition of {company}, your tasks include:
|
||||
1. Conducting a comprehensive analysis of the industrial robotics and automation solutions industry, including market size, growth rates, key trends, and future prospects
|
||||
2. Identifying the major players in the industry and assessing their market share, competitive strengths and weaknesses, and strategic positioning
|
||||
3. Evaluating {company}' competitive position within the industry, including its market share, differentiation, and competitive advantages
|
||||
4. Analyzing the key drivers and restraints for the industry, such as technological advancements, labor costs, regulatory changes, and economic conditions
|
||||
5. Identifying potential risks and opportunities for {company} based on the industry analysis, such as disruptive technologies, emerging markets, or shifts in customer preferences
|
||||
|
||||
Your analysis should provide a clear and objective assessment of the attractiveness and future potential of the industrial robotics industry, as well as {company}' positioning within it. Consider both short-term and long-term factors, and provide evidence-based insights to inform the investment decision.
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="industry-analyst.json",
|
||||
)
|
||||
|
||||
# Initialize the Technology Expert
|
||||
tech_expert = Agent(
|
||||
agent_name="Tech-Expert",
|
||||
system_prompt=f"""
|
||||
As the Technology Expert at Blackstone, your role is to assess the technological capabilities, competitive advantages, and potential risks of companies being considered for acquisition.
|
||||
For the current potential acquisition of {company}, your tasks include:
|
||||
1. Conducting a deep dive into {company}' proprietary technologies, including its robotics platforms, automation software, and AI capabilities
|
||||
2. Assessing the uniqueness, scalability, and defensibility of {company}' technology stack and intellectual property
|
||||
3. Comparing {company}' technologies to those of its competitors and identifying any key differentiators or technology gaps
|
||||
4. Evaluating {company}' research and development capabilities, including its innovation pipeline, engineering talent, and R&D investments
|
||||
5. Identifying any potential technology risks or disruptive threats that could impact {company}' long-term competitiveness, such as emerging technologies or expiring patents
|
||||
|
||||
Your analysis should provide a comprehensive assessment of {company}' technological strengths and weaknesses, as well as the sustainability of its competitive advantages. Consider both the current state of its technology and its future potential in light of industry trends and advancements.
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="tech-expert.json",
|
||||
)
|
||||
|
||||
# Initialize the Market Researcher
|
||||
market_researcher = Agent(
|
||||
agent_name="Market-Researcher",
|
||||
system_prompt=f"""
|
||||
As the Market Researcher at Blackstone, your role is to analyze the target company's customer base, market share, and growth potential to assess the commercial viability and attractiveness of the potential acquisition.
|
||||
For the current potential acquisition of {company}, your tasks include:
|
||||
1. Analyzing {company}' current customer base, including customer segmentation, concentration risk, and retention rates
|
||||
2. Assessing {company}' market share within its target markets and identifying key factors driving its market position
|
||||
3. Conducting a detailed market sizing and segmentation analysis for the industrial robotics and automation markets, including identifying high-growth segments and emerging opportunities
|
||||
4. Evaluating the demand drivers and sales cycles for {company}' products and services, and identifying any potential risks or limitations to adoption
|
||||
5. Developing financial projections and estimates for {company}' revenue growth potential based on the market analysis and assumptions around market share and penetration
|
||||
|
||||
Your analysis should provide a data-driven assessment of the market opportunity for {company} and the feasibility of achieving our investment return targets. Consider both bottom-up and top-down market perspectives, and identify any key sensitivities or assumptions in your projections.
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="market-researcher.json",
|
||||
)
|
||||
|
||||
# Initialize the Regulatory Specialist
|
||||
regulatory_specialist = Agent(
|
||||
agent_name="Regulatory-Specialist",
|
||||
system_prompt=f"""
|
||||
As the Regulatory Specialist at Blackstone, your role is to identify and assess any regulatory risks, compliance requirements, and potential legal liabilities associated with potential acquisitions.
|
||||
For the current potential acquisition of {company}, your tasks include:
|
||||
1. Identifying all relevant regulatory bodies and laws that govern the operations of {company}, including industry-specific regulations, labor laws, and environmental regulations
|
||||
2. Reviewing {company}' current compliance policies, procedures, and track record to identify any potential gaps or areas of non-compliance
|
||||
3. Assessing the potential impact of any pending or proposed changes to relevant regulations that could affect {company}' business or create additional compliance burdens
|
||||
4. Evaluating the potential legal liabilities and risks associated with {company}' products, services, and operations, including product liability, intellectual property, and customer contracts
|
||||
5. Providing recommendations on any regulatory or legal due diligence steps that should be taken as part of the acquisition process, as well as any post-acquisition integration considerations
|
||||
|
||||
Your analysis should provide a comprehensive assessment of the regulatory and legal landscape surrounding {company}, and identify any material risks or potential deal-breakers. Consider both the current state and future outlook, and provide practical recommendations to mitigate identified risks.
|
||||
""",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="regulatory-specialist.json",
|
||||
)
|
||||
|
||||
# Create a list of agents
|
||||
agents = [
|
||||
managing_director,
|
||||
vp_finance,
|
||||
industry_analyst,
|
||||
tech_expert,
|
||||
market_researcher,
|
||||
regulatory_specialist,
|
||||
]
|
||||
|
||||
|
||||
swarm = SequentialWorkflow(
|
||||
name="blackstone-private-equity-advisors",
|
||||
agents=agents,
|
||||
)
|
||||
|
||||
print(
|
||||
swarm.run(
|
||||
"Analyze nvidia if it's a good deal to invest in now 10B"
|
||||
)
|
||||
)
|
@ -1,100 +0,0 @@
|
||||
import os
|
||||
from swarms import Agent, ConcurrentWorkflow
|
||||
from swarm_models import OpenAIChat
|
||||
from loguru import logger
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Retrieve the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Initialize the model for OpenAI Chat
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
|
||||
logger.add("swarms_example.log", rotation="10 MB")
|
||||
|
||||
agents = [
|
||||
Agent(
|
||||
agent_name=f"Term-Sheet-Analysis-Agent-{i}",
|
||||
system_prompt="Analyze the term sheet for investment opportunities.",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path=f"term_sheet_analysis_agent_{i}.json",
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
return_step_meta=False,
|
||||
)
|
||||
for i in range(3) # Adjust number of agents as needed
|
||||
]
|
||||
|
||||
# Initialize the workflow with the list of agents
|
||||
workflow = ConcurrentWorkflow(
|
||||
agents=agents,
|
||||
metadata_output_path="term_sheet_analysis_metadata.json",
|
||||
return_str_on=True,
|
||||
auto_generate_prompts=True,
|
||||
auto_save=True,
|
||||
)
|
||||
|
||||
# Define the task for all agents
|
||||
task = "Analyze the term sheet for investment opportunities and identify key terms and conditions."
|
||||
|
||||
# Run the workflow and save metadata
|
||||
metadata = workflow.run(task)
|
||||
logger.info(metadata)
|
||||
|
||||
|
||||
# # Example usage of the run_batched method
|
||||
# tasks = [
|
||||
# "What are the benefits of a ROTH IRA?",
|
||||
# "How do I open a ROTH IRA account?",
|
||||
# ]
|
||||
# results = workflow.run_batched(tasks)
|
||||
# print("\nRun Batched Method Output:")
|
||||
# print(results)
|
||||
|
||||
# # Example usage of the run_async method
|
||||
# async def run_async_example():
|
||||
# future = workflow.run_async(task)
|
||||
# result = await future
|
||||
# print("\nRun Async Method Output:")
|
||||
# print(result)
|
||||
|
||||
# # Example usage of the run_batched_async method
|
||||
# async def run_batched_async_example():
|
||||
# futures = workflow.run_batched_async(tasks)
|
||||
# results = await asyncio.gather(*futures)
|
||||
# print("\nRun Batched Async Method Output:")
|
||||
# print(results)
|
||||
|
||||
# # Example usage of the run_parallel method
|
||||
# parallel_results = workflow.run_parallel(tasks)
|
||||
# print("\nRun Parallel Method Output:")
|
||||
# print(parallel_results)
|
||||
|
||||
# # Example usage of the run_parallel_async method
|
||||
# async def run_parallel_async_example():
|
||||
# parallel_futures = workflow.run_parallel_async(tasks)
|
||||
# parallel_results = await asyncio.gather(*parallel_futures)
|
||||
# print("\nRun Parallel Async Method Output:")
|
||||
# print(parallel_results)
|
||||
|
||||
# # To run the async examples, you would typically use an event loop
|
||||
# if __name__ == "__main__":
|
||||
# asyncio.run(run_async_example())
|
||||
# asyncio.run(run_batched_async_example())
|
||||
# asyncio.run(run_parallel_async_example())
|
@ -1,41 +0,0 @@
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
|
||||
def scrape_blackrock_trades():
|
||||
url = "https://arkhamintelligence.com/blackrock/trades"
|
||||
response = requests.get(url)
|
||||
|
||||
if response.status_code == 200:
|
||||
soup = BeautifulSoup(response.content, "html.parser")
|
||||
|
||||
# Example: Assuming trades are in a table
|
||||
trades = []
|
||||
table = soup.find("table", {"id": "trades-table"})
|
||||
|
||||
if table:
|
||||
for row in table.find_all("tr"):
|
||||
columns = row.find_all("td")
|
||||
if len(columns) > 0:
|
||||
trade = {
|
||||
"trade_date": columns[0].text.strip(),
|
||||
"asset": columns[1].text.strip(),
|
||||
"action": columns[2].text.strip(),
|
||||
"quantity": columns[3].text.strip(),
|
||||
"price": columns[4].text.strip(),
|
||||
"total_value": columns[5].text.strip(),
|
||||
}
|
||||
trades.append(trade)
|
||||
return trades
|
||||
else:
|
||||
print(
|
||||
f"Failed to fetch data. Status code: {response.status_code}"
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
trades = scrape_blackrock_trades()
|
||||
if trades:
|
||||
for trade in trades:
|
||||
print(trade)
|
@ -1,140 +0,0 @@
|
||||
from typing import Dict, Any, Union
|
||||
import requests
|
||||
from loguru import logger
|
||||
|
||||
|
||||
class AlphaVantageClient:
|
||||
"""
|
||||
Client to fetch commodities and economic indicators data from Alpha Vantage API.
|
||||
"""
|
||||
|
||||
BASE_URL = "https://www.alphavantage.co/query"
|
||||
|
||||
def __init__(self, api_key: str) -> None:
|
||||
"""
|
||||
Initialize the AlphaVantageClient with an API key.
|
||||
|
||||
:param api_key: Your Alpha Vantage API key.
|
||||
"""
|
||||
self.api_key = api_key
|
||||
|
||||
def fetch_data(
|
||||
self, function: str, symbol: str = None
|
||||
) -> Union[str, Dict[str, Any]]:
|
||||
"""
|
||||
Fetches data from Alpha Vantage API and returns it as both string and dictionary.
|
||||
|
||||
:param function: Alpha Vantage function type (e.g., 'TIME_SERIES_DAILY', 'REAL_GDP').
|
||||
:param symbol: Optional. The commodity/economic indicator symbol.
|
||||
:return: The data as both a string and a dictionary.
|
||||
"""
|
||||
params = {
|
||||
"apikey": self.api_key,
|
||||
"function": function,
|
||||
"symbol": symbol,
|
||||
}
|
||||
|
||||
logger.info(
|
||||
f"Fetching data for function '{function}' with symbol '{symbol}'"
|
||||
)
|
||||
|
||||
try:
|
||||
response = requests.get(self.BASE_URL, params=params)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
data_as_string = response.text
|
||||
logger.success(
|
||||
f"Successfully fetched data for {symbol if symbol else function}"
|
||||
)
|
||||
return data_as_string, data
|
||||
except requests.RequestException as e:
|
||||
logger.error(
|
||||
f"Error while fetching data from Alpha Vantage: {e}"
|
||||
)
|
||||
return str(e), {}
|
||||
|
||||
def get_commodities_data(
|
||||
self,
|
||||
) -> Dict[str, Union[str, Dict[str, Any]]]:
|
||||
"""
|
||||
Fetches data for trending commodities such as Crude Oil, Natural Gas, and others.
|
||||
|
||||
:return: Dictionary with commodity names as keys and a tuple of (string data, dictionary data) as values.
|
||||
"""
|
||||
commodities = {
|
||||
"Crude Oil (WTI)": "OIL_WTI",
|
||||
"Crude Oil (Brent)": "OIL_BRENT",
|
||||
"Natural Gas": "NATURAL_GAS",
|
||||
"Copper": "COPPER",
|
||||
"Aluminum": "ALUMINUM",
|
||||
"Wheat": "WHEAT",
|
||||
"Corn": "CORN",
|
||||
"Cotton": "COTTON",
|
||||
"Sugar": "SUGAR",
|
||||
"Coffee": "COFFEE",
|
||||
"Global Commodities Index": "COMMODITIES",
|
||||
}
|
||||
|
||||
commodity_data = {}
|
||||
for name, symbol in commodities.items():
|
||||
data_str, data_dict = self.fetch_data(
|
||||
function="TIME_SERIES_DAILY", symbol=symbol
|
||||
)
|
||||
commodity_data[name] = (data_str, data_dict)
|
||||
|
||||
return commodity_data
|
||||
|
||||
def get_economic_indicators(
|
||||
self,
|
||||
) -> Dict[str, Union[str, Dict[str, Any]]]:
|
||||
"""
|
||||
Fetches data for economic indicators such as Real GDP, Unemployment Rate, etc.
|
||||
|
||||
:return: Dictionary with indicator names as keys and a tuple of (string data, dictionary data) as values.
|
||||
"""
|
||||
indicators = {
|
||||
"Real GDP": "REAL_GDP",
|
||||
"Real GDP per Capita": "REAL_GDP_PER_CAPITA",
|
||||
"Treasury Yield": "TREASURY_YIELD",
|
||||
"Federal Funds Rate": "FEDERAL_FUNDS_RATE",
|
||||
"CPI": "CPI",
|
||||
"Inflation": "INFLATION",
|
||||
"Retail Sales": "RETAIL_SALES",
|
||||
"Durable Goods Orders": "DURABLE_GOODS",
|
||||
"Unemployment Rate": "UNEMPLOYMENT",
|
||||
"Nonfarm Payroll": "NONFARM_PAYROLL",
|
||||
}
|
||||
|
||||
indicator_data = {}
|
||||
for name, function in indicators.items():
|
||||
data_str, data_dict = self.fetch_data(function=function)
|
||||
indicator_data[name] = (data_str, data_dict)
|
||||
|
||||
return indicator_data
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Replace with your actual API key
|
||||
API_KEY = "your_alpha_vantage_api_key"
|
||||
|
||||
av_client = AlphaVantageClient(api_key=API_KEY)
|
||||
|
||||
logger.info("Fetching commodities data...")
|
||||
commodities_data = av_client.get_commodities_data()
|
||||
|
||||
logger.info("Fetching economic indicators data...")
|
||||
economic_indicators_data = av_client.get_economic_indicators()
|
||||
|
||||
# Example of accessing the data
|
||||
for name, (data_str, data_dict) in commodities_data.items():
|
||||
logger.info(
|
||||
f"{name}: {data_str}..."
|
||||
) # Truncate the string for display
|
||||
|
||||
for name, (
|
||||
data_str,
|
||||
data_dict,
|
||||
) in economic_indicators_data.items():
|
||||
logger.info(
|
||||
f"{name}: {data_str}..."
|
||||
) # Truncate the string for display
|
@ -1,333 +0,0 @@
|
||||
import concurrent.futures
|
||||
import os
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Any, Dict, List, Tuple
|
||||
|
||||
import requests
|
||||
import yfinance as yf
|
||||
from alpha_vantage.cryptocurrencies import CryptoCurrencies
|
||||
from alpha_vantage.foreignexchange import ForeignExchange
|
||||
from alpha_vantage.timeseries import TimeSeries
|
||||
from loguru import logger
|
||||
|
||||
|
||||
def fetch_yahoo_finance_data(tickers: List[str]) -> Dict[str, Any]:
|
||||
try:
|
||||
yf_data = yf.download(tickers, period="1d")["Close"]
|
||||
return {
|
||||
"S&P 500": yf_data["^GSPC"].iloc[-1],
|
||||
"Dow Jones": yf_data["^DJI"].iloc[-1],
|
||||
"NASDAQ": yf_data["^IXIC"].iloc[-1],
|
||||
"Gold Price": yf_data["GC=F"].iloc[-1],
|
||||
"Oil Price": yf_data["CL=F"].iloc[-1],
|
||||
"10-Year Treasury Yield": yf_data["^TNX"].iloc[-1],
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Yahoo Finance data: {str(e)}")
|
||||
return {ticker: "N/A" for ticker in tickers}
|
||||
|
||||
|
||||
def fetch_polygon_ticker_data(
|
||||
api_key: str, ticker: str
|
||||
) -> Dict[str, Any]:
|
||||
url = f"https://api.polygon.io/v2/aggs/ticker/{ticker}/prev?apiKey={api_key}"
|
||||
try:
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return {ticker: data["results"][0]["c"]}
|
||||
except requests.RequestException as e:
|
||||
logger.error(
|
||||
f"Error fetching Polygon data for {ticker}: {str(e)}"
|
||||
)
|
||||
return {ticker: None}
|
||||
|
||||
|
||||
def fetch_polygon_forex_data(
|
||||
api_key: str, from_currency: str, to_currency: str
|
||||
) -> Dict[str, Any]:
|
||||
url = f"https://api.polygon.io/v2/aggs/ticker/C:{from_currency}{to_currency}/prev?apiKey={api_key}"
|
||||
try:
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return {
|
||||
f"{from_currency} to {to_currency}": data["results"][0][
|
||||
"c"
|
||||
]
|
||||
}
|
||||
except requests.RequestException as e:
|
||||
logger.error(
|
||||
f"Error fetching Polygon forex data for {from_currency}/{to_currency}: {str(e)}"
|
||||
)
|
||||
return {f"{from_currency} to {to_currency}": None}
|
||||
|
||||
|
||||
def fetch_polygon_economic_data(
|
||||
api_key: str, indicator: str
|
||||
) -> Dict[str, Any]:
|
||||
end_date = datetime.now().strftime("%Y-%m-%d")
|
||||
start_date = (datetime.now() - timedelta(days=30)).strftime(
|
||||
"%Y-%m-%d"
|
||||
)
|
||||
url = f"https://api.polygon.io/v2/aggs/ticker/{indicator}/range/1/day/{start_date}/{end_date}?apiKey={api_key}"
|
||||
try:
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
return {indicator: data["results"][-1]["c"]}
|
||||
except requests.RequestException as e:
|
||||
logger.error(
|
||||
f"Error fetching Polygon economic data for {indicator}: {str(e)}"
|
||||
)
|
||||
return {indicator: None}
|
||||
|
||||
|
||||
def fetch_polygon_data(api_key: str) -> Dict[str, Any]:
|
||||
if not api_key:
|
||||
logger.warning(
|
||||
"Polygon API key not found. Skipping Polygon data."
|
||||
)
|
||||
return {}
|
||||
|
||||
result_dict = {}
|
||||
|
||||
# Define data to fetch
|
||||
stock_tickers = ["SPY", "DIA", "QQQ", "GLD", "USO", "TLT"]
|
||||
forex_pairs = [("USD", "EUR"), ("USD", "GBP"), ("USD", "JPY")]
|
||||
economic_indicators = {
|
||||
"I:CPI": "Consumer Price Index",
|
||||
"I:GDPUSD": "US GDP",
|
||||
"I:UNRATE": "US Unemployment Rate",
|
||||
"I:INDPRO": "Industrial Production Index",
|
||||
"I:HOUST": "Housing Starts",
|
||||
"I:RSXFS": "Retail Sales",
|
||||
"I:CPIUCSL": "Inflation Rate",
|
||||
"I:FEDFUNDS": "Federal Funds Rate",
|
||||
"I:GFDEBTN": "US National Debt",
|
||||
"I:REALGDP": "Real GDP",
|
||||
}
|
||||
|
||||
# Fetch stock data
|
||||
for ticker in stock_tickers:
|
||||
result_dict.update(fetch_polygon_ticker_data(api_key, ticker))
|
||||
|
||||
# Fetch forex data
|
||||
for from_currency, to_currency in forex_pairs:
|
||||
result_dict.update(
|
||||
fetch_polygon_forex_data(
|
||||
api_key, from_currency, to_currency
|
||||
)
|
||||
)
|
||||
|
||||
# Fetch economic indicator data
|
||||
for indicator in economic_indicators:
|
||||
result_dict.update(
|
||||
fetch_polygon_economic_data(api_key, indicator)
|
||||
)
|
||||
|
||||
return result_dict
|
||||
|
||||
|
||||
def fetch_exchange_rates() -> Dict[str, Any]:
|
||||
exchange_url = "https://open.er-api.com/v6/latest/USD"
|
||||
try:
|
||||
response = requests.get(exchange_url)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
if data.get("rates"):
|
||||
return {
|
||||
"USD to EUR": data["rates"].get("EUR", "N/A"),
|
||||
"USD to GBP": data["rates"].get("GBP", "N/A"),
|
||||
"USD to JPY": data["rates"].get("JPY", "N/A"),
|
||||
}
|
||||
else:
|
||||
logger.error("Exchange rate data structure unexpected")
|
||||
return {
|
||||
"USD to EUR": "N/A",
|
||||
"USD to GBP": "N/A",
|
||||
"USD to JPY": "N/A",
|
||||
}
|
||||
except requests.RequestException as e:
|
||||
logger.error(f"Error fetching exchange rate data: {str(e)}")
|
||||
return {
|
||||
"USD to EUR": "N/A",
|
||||
"USD to GBP": "N/A",
|
||||
"USD to JPY": "N/A",
|
||||
}
|
||||
|
||||
|
||||
def fetch_world_bank_data(
|
||||
indicator: Tuple[str, str]
|
||||
) -> Dict[str, Any]:
|
||||
indicator_name, indicator_code = indicator
|
||||
wb_url = f"http://api.worldbank.org/v2/indicator/{indicator_code}?date=2021:2022&format=json"
|
||||
try:
|
||||
response = requests.get(wb_url)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
if (
|
||||
isinstance(data, list)
|
||||
and len(data) > 1
|
||||
and len(data[1]) > 0
|
||||
):
|
||||
return {indicator_name: data[1][0].get("value", "N/A")}
|
||||
else:
|
||||
logger.error(
|
||||
f"Unexpected data structure for {indicator_name}"
|
||||
)
|
||||
return {indicator_name: "N/A"}
|
||||
except requests.RequestException as e:
|
||||
logger.error(
|
||||
f"Error fetching {indicator_name} data: {str(e)}"
|
||||
)
|
||||
return {indicator_name: "N/A"}
|
||||
|
||||
|
||||
def fetch_alpha_vantage_data(api_key: str) -> Dict[str, Any]:
|
||||
if not api_key:
|
||||
logger.warning(
|
||||
"Alpha Vantage API key not found. Skipping Alpha Vantage data."
|
||||
)
|
||||
return {}
|
||||
|
||||
ts = TimeSeries(key=api_key, output_format="json")
|
||||
fx = ForeignExchange(key=api_key)
|
||||
cc = CryptoCurrencies(key=api_key)
|
||||
|
||||
result = {}
|
||||
try:
|
||||
data, _ = ts.get_daily("MSFT")
|
||||
result["MSFT Daily Close"] = data["4. close"]
|
||||
|
||||
data, _ = fx.get_currency_exchange_rate(
|
||||
from_currency="USD", to_currency="EUR"
|
||||
)
|
||||
result["USD to EUR (Alpha Vantage)"] = data[
|
||||
"5. Exchange Rate"
|
||||
]
|
||||
|
||||
data, _ = cc.get_digital_currency_daily(
|
||||
symbol="BTC", market="USD"
|
||||
)
|
||||
result["BTC to USD"] = data["4b. close (USD)"]
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching Alpha Vantage data: {str(e)}")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def fetch_macro_economic_data() -> Tuple[str, Dict[str, Any]]:
|
||||
"""
|
||||
Fetches comprehensive macro-economic data from various APIs using multithreading.
|
||||
|
||||
Returns:
|
||||
Tuple[str, Dict[str, Any]]: A tuple containing:
|
||||
- A formatted string with the macro-economic data
|
||||
- A dictionary with the raw macro-economic data
|
||||
"""
|
||||
logger.info("Starting to fetch comprehensive macro-economic data")
|
||||
|
||||
result_dict: Dict[str, Any] = {}
|
||||
|
||||
# Define data fetching tasks
|
||||
tasks = [
|
||||
(
|
||||
fetch_yahoo_finance_data,
|
||||
(["^GSPC", "^DJI", "^IXIC", "GC=F", "CL=F", "^TNX"],),
|
||||
),
|
||||
(fetch_polygon_data, (os.environ.get("POLYGON_API_KEY"),)),
|
||||
(fetch_exchange_rates, ()),
|
||||
(
|
||||
fetch_alpha_vantage_data,
|
||||
(os.environ.get("ALPHA_VANTAGE_API_KEY"),),
|
||||
),
|
||||
]
|
||||
|
||||
# Execute tasks concurrently
|
||||
with concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=20
|
||||
) as executor:
|
||||
future_to_task = {
|
||||
executor.submit(task, *args): task.__name__
|
||||
for task, args in tasks
|
||||
}
|
||||
for future in concurrent.futures.as_completed(future_to_task):
|
||||
task_name = future_to_task[future]
|
||||
try:
|
||||
data = future.result()
|
||||
result_dict.update(data)
|
||||
logger.success(
|
||||
f"Successfully fetched data from {task_name}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"{task_name} generated an exception: {str(e)}"
|
||||
)
|
||||
|
||||
# Create the formatted string output
|
||||
|
||||
# Update the output_string in fetch_macro_economic_data function
|
||||
output_string = f"""
|
||||
Macro-economic Data (as of {datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
|
||||
-----------------------------------------------------------
|
||||
Stock Market Indices:
|
||||
S&P 500 (SPY): ${result_dict.get('SPY')}
|
||||
Dow Jones (DIA): ${result_dict.get('DIA')}
|
||||
NASDAQ (QQQ): ${result_dict.get('QQQ')}
|
||||
|
||||
Commodities:
|
||||
Gold (GLD): ${result_dict.get('GLD')}
|
||||
Oil (USO): ${result_dict.get('USO')}
|
||||
|
||||
Bonds:
|
||||
20+ Year Treasury Bond (TLT): ${result_dict.get('TLT')}
|
||||
|
||||
Forex:
|
||||
USD to EUR: {result_dict.get('USD to EUR')}
|
||||
USD to GBP: {result_dict.get('USD to GBP')}
|
||||
USD to JPY: {result_dict.get('USD to JPY')}
|
||||
|
||||
Economic Indicators:
|
||||
Consumer Price Index: {result_dict.get('I:CPI')}
|
||||
US GDP: ${result_dict.get('I:GDPUSD')} billion
|
||||
US Unemployment Rate: {result_dict.get('I:UNRATE')}%
|
||||
Industrial Production Index: {result_dict.get('I:INDPRO')}
|
||||
Housing Starts: {result_dict.get('I:HOUST')} thousand
|
||||
Retail Sales: ${result_dict.get('I:RSXFS')} billion
|
||||
Inflation Rate: {result_dict.get('I:CPIUCSL')}%
|
||||
Federal Funds Rate: {result_dict.get('I:FEDFUNDS')}%
|
||||
US National Debt: ${result_dict.get('I:GFDEBTN')} billion
|
||||
Real GDP: ${result_dict.get('I:REALGDP')} billion
|
||||
|
||||
Other Market Data:
|
||||
S&P 500 (Yahoo): {result_dict.get('S&P 500', 'N/A')}
|
||||
Dow Jones (Yahoo): {result_dict.get('Dow Jones', 'N/A')}
|
||||
NASDAQ (Yahoo): {result_dict.get('NASDAQ', 'N/A')}
|
||||
Gold Price (Yahoo): ${result_dict.get('Gold Price', 'N/A')}
|
||||
Oil Price (Yahoo): ${result_dict.get('Oil Price', 'N/A')}
|
||||
10-Year Treasury Yield (Yahoo): {result_dict.get('10-Year Treasury Yield', 'N/A')}%
|
||||
MSFT Daily Close: {result_dict.get('MSFT Daily Close', 'N/A')}
|
||||
BTC to USD: {result_dict.get('BTC to USD', 'N/A')}
|
||||
|
||||
Exchange Rates (Other Sources):
|
||||
USD to EUR (Open Exchange Rates): {result_dict.get('USD to EUR', 'N/A')}
|
||||
USD to GBP (Open Exchange Rates): {result_dict.get('USD to GBP', 'N/A')}
|
||||
USD to JPY (Open Exchange Rates): {result_dict.get('USD to JPY', 'N/A')}
|
||||
USD to EUR (Alpha Vantage): {result_dict.get('USD to EUR (Alpha Vantage)', 'N/A')}
|
||||
"""
|
||||
|
||||
logger.info("Finished fetching comprehensive macro-economic data")
|
||||
return output_string, result_dict
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
logger.add("macro_economic_data.log", rotation="500 MB")
|
||||
|
||||
try:
|
||||
output_str, output_dict = fetch_macro_economic_data()
|
||||
print(output_str)
|
||||
print("Dictionary output:", output_dict)
|
||||
except Exception as e:
|
||||
logger.exception(f"An error occurred: {str(e)}")
|
@ -1,158 +0,0 @@
|
||||
import requests
|
||||
import pandas as pd
|
||||
from datetime import datetime
|
||||
import os
|
||||
from typing import Dict, Tuple, Any
|
||||
import logging
|
||||
|
||||
# Set up logging
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# You'll need to set these environment variables with your actual API keys
|
||||
ALPHA_VANTAGE_API_KEY = os.getenv("ALPHA_VANTAGE_API_KEY")
|
||||
WORLD_BANK_API_KEY = os.getenv("WORLD_BANK_API_KEY")
|
||||
FRED_API_KEY = os.getenv("FRED_API_KEY")
|
||||
|
||||
|
||||
def fetch_real_economic_data(
|
||||
country: str, start_date: datetime, end_date: datetime
|
||||
) -> Tuple[str, Dict[str, Any]]:
|
||||
data = {}
|
||||
|
||||
def get_alpha_vantage_data(indicator: str) -> pd.Series:
|
||||
try:
|
||||
url = f"https://www.alphavantage.co/query?function={indicator}&interval=monthly&apikey={ALPHA_VANTAGE_API_KEY}"
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
df = pd.DataFrame(
|
||||
response.json()["Monthly Time Series"]
|
||||
).T
|
||||
df.index = pd.to_datetime(df.index)
|
||||
df = df.sort_index()
|
||||
return df["4. close"].astype(float)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error fetching Alpha Vantage data: {str(e)}"
|
||||
)
|
||||
return pd.Series()
|
||||
|
||||
def get_world_bank_data(indicator: str) -> pd.Series:
|
||||
try:
|
||||
url = f"http://api.worldbank.org/v2/country/{country}/indicator/{indicator}?format=json&date={start_date.year}:{end_date.year}&per_page=1000"
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
data = response.json()[1]
|
||||
df = pd.DataFrame(data)
|
||||
df["date"] = pd.to_datetime(df["date"], format="%Y")
|
||||
df = df.set_index("date").sort_index()
|
||||
return df["value"].astype(float)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching World Bank data: {str(e)}")
|
||||
return pd.Series()
|
||||
|
||||
def get_fred_data(series_id: str) -> pd.Series:
|
||||
try:
|
||||
url = f"https://api.stlouisfed.org/fred/series/observations?series_id={series_id}&api_key={FRED_API_KEY}&file_type=json"
|
||||
response = requests.get(url)
|
||||
response.raise_for_status()
|
||||
df = pd.DataFrame(response.json()["observations"])
|
||||
df["date"] = pd.to_datetime(df["date"])
|
||||
df = df.set_index("date").sort_index()
|
||||
return df["value"].astype(float)
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching FRED data: {str(e)}")
|
||||
return pd.Series()
|
||||
|
||||
# Fetch data from different sources
|
||||
data["GDP_growth_rate"] = get_world_bank_data("NY.GDP.MKTP.KD.ZG")
|
||||
data["unemployment_rate"] = get_world_bank_data("SL.UEM.TOTL.ZS")
|
||||
data["inflation_rate"] = get_world_bank_data("FP.CPI.TOTL.ZG")
|
||||
data["debt_to_GDP_ratio"] = get_world_bank_data(
|
||||
"GC.DOD.TOTL.GD.ZS"
|
||||
)
|
||||
data["current_account_balance"] = get_world_bank_data(
|
||||
"BN.CAB.XOKA.CD"
|
||||
)
|
||||
data["yield_curve_slope"] = get_fred_data("T10Y2Y")
|
||||
data["stock_market_index"] = get_alpha_vantage_data(
|
||||
"TIME_SERIES_MONTHLY"
|
||||
)
|
||||
data["consumer_confidence_index"] = get_fred_data(
|
||||
"CSCICP03USM665S"
|
||||
)
|
||||
data["business_confidence_index"] = get_fred_data(
|
||||
"BSCICP03USM665S"
|
||||
)
|
||||
|
||||
# Combine all data into a single DataFrame
|
||||
df = pd.DataFrame(data)
|
||||
df = df.loc[start_date:end_date]
|
||||
|
||||
if df.empty:
|
||||
logger.warning(
|
||||
"No data retrieved for the specified date range and country."
|
||||
)
|
||||
return "No data available", {
|
||||
"country": country,
|
||||
"real_time_data": {},
|
||||
"historical_data": {},
|
||||
}
|
||||
|
||||
# Prepare the dictionary output
|
||||
output_dict = {
|
||||
"country": country,
|
||||
"real_time_data": df.iloc[-1].to_dict(),
|
||||
"historical_data": df.to_dict(),
|
||||
}
|
||||
|
||||
# Create summary string
|
||||
summary = f"Economic Data Summary for {country} (as of {end_date.strftime('%Y-%m-%d')}):\n"
|
||||
for key, value in output_dict["real_time_data"].items():
|
||||
if pd.notna(value):
|
||||
summary += (
|
||||
f"{key.replace('_', ' ').title()}: {value:.2f}\n"
|
||||
)
|
||||
else:
|
||||
summary += f"{key.replace('_', ' ').title()}: Data not available\n"
|
||||
|
||||
return summary, output_dict
|
||||
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
country = "US" # ISO country code
|
||||
start_date = datetime(2020, 1, 1)
|
||||
end_date = datetime.now()
|
||||
|
||||
summary, data = fetch_real_economic_data(
|
||||
country, start_date, end_date
|
||||
)
|
||||
print(summary)
|
||||
print("\nOutput Dictionary (truncated):")
|
||||
print(f"Country: {data['country']}")
|
||||
print("Real-time data:", data["real_time_data"])
|
||||
print("Historical data: {First day, Last day}")
|
||||
if data["historical_data"]:
|
||||
first_day = min(
|
||||
next(iter(data["historical_data"].values())).keys()
|
||||
)
|
||||
last_day = max(
|
||||
next(iter(data["historical_data"].values())).keys()
|
||||
)
|
||||
print(
|
||||
f" {first_day}:",
|
||||
{
|
||||
k: v[first_day] if first_day in v else "N/A"
|
||||
for k, v in data["historical_data"].items()
|
||||
},
|
||||
)
|
||||
print(
|
||||
f" {last_day}:",
|
||||
{
|
||||
k: v[last_day] if last_day in v else "N/A"
|
||||
for k, v in data["historical_data"].items()
|
||||
},
|
||||
)
|
||||
else:
|
||||
print(" No historical data available.")
|
@ -1,82 +0,0 @@
|
||||
from swarms.prompts.prompt import Prompt
|
||||
import subprocess
|
||||
|
||||
|
||||
# Tools
|
||||
def terminal(
|
||||
code: str,
|
||||
):
|
||||
"""
|
||||
Run code in the terminal.
|
||||
|
||||
Args:
|
||||
code (str): The code to run in the terminal.
|
||||
|
||||
Returns:
|
||||
str: The output of the code.
|
||||
"""
|
||||
out = subprocess.run(
|
||||
code, shell=True, capture_output=True, text=True
|
||||
).stdout
|
||||
return str(out)
|
||||
|
||||
|
||||
def browser(query: str):
|
||||
"""
|
||||
Search the query in the browser with the `browser` tool.
|
||||
|
||||
Args:
|
||||
query (str): The query to search in the browser.
|
||||
|
||||
Returns:
|
||||
str: The search results.
|
||||
"""
|
||||
import webbrowser
|
||||
|
||||
url = f"https://www.google.com/search?q={query}"
|
||||
webbrowser.open(url)
|
||||
return f"Searching for {query} in the browser."
|
||||
|
||||
|
||||
def create_file(file_path: str, content: str):
|
||||
"""
|
||||
Create a file using the file editor tool.
|
||||
|
||||
Args:
|
||||
file_path (str): The path to the file.
|
||||
content (str): The content to write to the file.
|
||||
|
||||
Returns:
|
||||
str: The result of the file creation operation.
|
||||
"""
|
||||
with open(file_path, "w") as file:
|
||||
file.write(content)
|
||||
return f"File {file_path} created successfully."
|
||||
|
||||
|
||||
def file_editor(file_path: str, mode: str, content: str):
|
||||
"""
|
||||
Edit a file using the file editor tool.
|
||||
|
||||
Args:
|
||||
file_path (str): The path to the file.
|
||||
mode (str): The mode to open the file in.
|
||||
content (str): The content to write to the file.
|
||||
|
||||
Returns:
|
||||
str: The result of the file editing operation.
|
||||
"""
|
||||
with open(file_path, mode) as file:
|
||||
file.write(content)
|
||||
return f"File {file_path} edited successfully."
|
||||
|
||||
|
||||
prompt = Prompt(
|
||||
content="This is my first prompt!",
|
||||
name="My First Prompt",
|
||||
description="A simple example prompt.",
|
||||
# tools=[file_editor, create_file, terminal]
|
||||
)
|
||||
|
||||
prompt.add_tools(tools=[file_editor, create_file, terminal])
|
||||
print(prompt.content)
|
@ -1,49 +0,0 @@
|
||||
import os
|
||||
from swarms import Agent
|
||||
from swarm_models import OpenAIChat
|
||||
from swarms.prompts.prompt_generator_optimizer import (
|
||||
prompt_generator_sys_prompt,
|
||||
)
|
||||
from dotenv import load_dotenv
|
||||
from swarms.agents.prompt_generator_agent import PromptGeneratorAgent
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
# Create an instance of the OpenAIChat class
|
||||
model = OpenAIChat(
|
||||
openai_api_key=api_key,
|
||||
model_name="gpt-4o-mini",
|
||||
temperature=0.1,
|
||||
max_tokens=2000,
|
||||
)
|
||||
|
||||
# Initialize the agent
|
||||
agent = Agent(
|
||||
agent_name="Prompt-Optimizer",
|
||||
system_prompt=prompt_generator_sys_prompt.get_prompt(),
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="optimizer_agent.json",
|
||||
user_name="swarms_corp",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
return_step_meta=False,
|
||||
# output_type="json",
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
|
||||
# Main Class
|
||||
prompt_generator = PromptGeneratorAgent(agent)
|
||||
|
||||
# Run the agent
|
||||
prompt_generator.run(
|
||||
"Generate an amazing prompt for analyzing healthcare insurance documents"
|
||||
)
|
@ -0,0 +1,190 @@
|
||||
agents:
|
||||
- agent_name: "Miami-Real-Estate-Compliance-Agent"
|
||||
system_prompt: |
|
||||
You are a specialized legal analysis agent focused on Miami real estate law compliance. Your task is to provide expert guidance on Florida and Miami-Dade County real estate regulations, ensuring full compliance with all relevant laws. You should be well-versed in:
|
||||
- Florida real estate statutes and Miami-Dade County ordinances
|
||||
- Zoning regulations and land use restrictions
|
||||
- Building codes and permits
|
||||
- Property disclosure requirements
|
||||
- HOA and condo association regulations
|
||||
- Title insurance and closing requirements
|
||||
- Environmental regulations affecting real estate
|
||||
|
||||
Provide detailed explanations of compliance requirements, potential legal issues, and necessary documentation for real estate transactions in Miami.
|
||||
max_loops: 1
|
||||
autosave: true
|
||||
dashboard: false
|
||||
verbose: true
|
||||
dynamic_temperature_enabled: true
|
||||
saved_state_path: "miami_real_estate_compliance_agent.json"
|
||||
user_name: "miami_real_estate_pro"
|
||||
retry_attempts: 1
|
||||
context_length: 250000
|
||||
return_step_meta: false
|
||||
output_type: "str"
|
||||
task: "What are the essential compliance requirements for a residential property sale in Miami Beach?"
|
||||
|
||||
- agent_name: "Miami-Condo-Law-Specialist-Agent"
|
||||
system_prompt: |
|
||||
You are a specialized legal agent focused on Miami condominium law. Your expertise covers:
|
||||
- Florida Condominium Act requirements
|
||||
- Condo association bylaws and regulations
|
||||
- Assessment and maintenance fee structures
|
||||
- Special assessment procedures
|
||||
- Unit owner rights and responsibilities
|
||||
- Common element regulations
|
||||
- Condo conversion requirements
|
||||
- Association dispute resolution
|
||||
- Building safety recertification requirements
|
||||
- Post-Surfside collapse regulations
|
||||
|
||||
Provide comprehensive guidance on condominium-specific legal issues in Miami-Dade County.
|
||||
max_loops: 1
|
||||
autosave: true
|
||||
dashboard: false
|
||||
verbose: true
|
||||
dynamic_temperature_enabled: true
|
||||
saved_state_path: "miami_condo_law_agent.json"
|
||||
user_name: "condo_law_specialist"
|
||||
retry_attempts: 2
|
||||
context_length: 200000
|
||||
output_type: "str"
|
||||
task: "What are the current legal requirements for condo safety inspections in Miami-Dade County?"
|
||||
|
||||
- agent_name: "International-Real-Estate-Agent"
|
||||
system_prompt: |
|
||||
You are a specialized agent focused on international real estate transactions in Miami. Your expertise includes:
|
||||
- Foreign investment regulations
|
||||
- FinCEN requirements and reporting
|
||||
- FIRPTA compliance
|
||||
- International tax considerations
|
||||
- Immigration law intersection with real estate
|
||||
- Foreign corporate structuring
|
||||
- Currency transfer regulations
|
||||
- International buyer due diligence
|
||||
- EB-5 visa program requirements
|
||||
|
||||
Provide guidance on legal requirements and optimal structures for international real estate transactions.
|
||||
max_loops: 1
|
||||
autosave: true
|
||||
dashboard: false
|
||||
verbose: true
|
||||
saved_state_path: "international_real_estate_agent.json"
|
||||
user_name: "international_specialist"
|
||||
retry_attempts: 2
|
||||
context_length: 200000
|
||||
output_type: "str"
|
||||
task: "What are the current legal requirements for foreign buyers purchasing Miami luxury condos?"
|
||||
|
||||
- agent_name: "Commercial-Real-Estate-Agent"
|
||||
system_prompt: |
|
||||
You are a specialized agent focused on commercial real estate law in Miami. Your expertise covers:
|
||||
- Commercial zoning regulations
|
||||
- Commercial lease requirements
|
||||
- Mixed-use development regulations
|
||||
- Commercial property due diligence
|
||||
- Environmental compliance
|
||||
- ADA compliance requirements
|
||||
- Commercial financing regulations
|
||||
- Property tax assessment appeals
|
||||
- Development impact fees
|
||||
- Commercial construction regulations
|
||||
|
||||
Provide guidance on commercial real estate legal requirements and optimization strategies.
|
||||
max_loops: 1
|
||||
autosave: true
|
||||
dashboard: false
|
||||
verbose: true
|
||||
saved_state_path: "commercial_real_estate_agent.json"
|
||||
user_name: "commercial_specialist"
|
||||
retry_attempts: 2
|
||||
context_length: 200000
|
||||
output_type: "str"
|
||||
task: "What are the key legal considerations for developing a mixed-use property in Miami's Brickell area?"
|
||||
|
||||
- agent_name: "Title-And-Closing-Agent"
|
||||
system_prompt: |
|
||||
You are a specialized agent focused on real estate title and closing procedures in Miami. Your expertise includes:
|
||||
- Title search requirements
|
||||
- Title insurance regulations
|
||||
- Closing document preparation
|
||||
- Settlement statement requirements
|
||||
- Escrow management
|
||||
- Recording requirements
|
||||
- Lien search procedures
|
||||
- Municipal lien searches
|
||||
- Documentary stamp calculations
|
||||
- Closing disclosure compliance
|
||||
|
||||
Provide guidance on title and closing requirements for Miami real estate transactions.
|
||||
max_loops: 1
|
||||
autosave: true
|
||||
dashboard: false
|
||||
verbose: true
|
||||
saved_state_path: "title_closing_agent.json"
|
||||
user_name: "title_specialist"
|
||||
retry_attempts: 2
|
||||
context_length: 200000
|
||||
output_type: "str"
|
||||
task: "What are the required steps for conducting a thorough title search in Miami-Dade County?"
|
||||
|
||||
- agent_name: "Real-Estate-Litigation-Agent"
|
||||
system_prompt: |
|
||||
You are a specialized agent focused on real estate litigation in Miami. Your expertise covers:
|
||||
- Property dispute resolution
|
||||
- Foreclosure procedures
|
||||
- Construction defect litigation
|
||||
- Title dispute resolution
|
||||
- Landlord-tenant disputes
|
||||
- Contract enforcement
|
||||
- Quiet title actions
|
||||
- Partition actions
|
||||
- Easement disputes
|
||||
- Adverse possession claims
|
||||
|
||||
Provide guidance on litigation strategies and dispute resolution in Miami real estate.
|
||||
max_loops: 1
|
||||
autosave: true
|
||||
dashboard: false
|
||||
verbose: true
|
||||
saved_state_path: "litigation_agent.json"
|
||||
user_name: "litigation_specialist"
|
||||
retry_attempts: 2
|
||||
context_length: 200000
|
||||
output_type: "str"
|
||||
task: "What are the current legal procedures for handling construction defect claims in Miami?"
|
||||
|
||||
- agent_name: "Land-Use-And-Zoning-Agent"
|
||||
system_prompt: |
|
||||
You are a specialized agent focused on land use and zoning law in Miami. Your expertise includes:
|
||||
- Zoning code interpretation
|
||||
- Variance requests
|
||||
- Special use permits
|
||||
- Historic preservation requirements
|
||||
- Environmental protection zones
|
||||
- Density and height restrictions
|
||||
- Parking requirements
|
||||
- Development review procedures
|
||||
- Impact fee assessments
|
||||
- Comprehensive plan compliance
|
||||
|
||||
Provide guidance on land use and zoning requirements for Miami real estate development.
|
||||
max_loops: 1
|
||||
autosave: true
|
||||
dashboard: false
|
||||
verbose: true
|
||||
saved_state_path: "zoning_agent.json"
|
||||
user_name: "zoning_specialist"
|
||||
retry_attempts: 2
|
||||
context_length: 200000
|
||||
output_type: "str"
|
||||
task: "What are the current zoning requirements for mixed-use development in Miami's Design District?"
|
||||
|
||||
swarm_architecture:
|
||||
name: "Miami-Real-Estate-Legal-Swarm"
|
||||
description: "A comprehensive swarm for Miami real estate legal analysis"
|
||||
max_loops: 1
|
||||
swarm_type: "auto"
|
||||
task: "Provide comprehensive legal analysis for a mixed-use development project in Miami, including compliance, zoning, international investment, and closing requirements"
|
||||
autosave: true
|
||||
return_json: false
|
|
@ -1,162 +0,0 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms import Agent
|
||||
from swarm_models import OpenAIChat
|
||||
from swarms.structs.swarm_router import SwarmRouter
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Model
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
# Define specialized system prompts for each agent
|
||||
DATA_EXTRACTOR_PROMPT = """You are a highly specialized private equity agent focused on data extraction from various documents. Your expertise includes:
|
||||
1. Extracting key financial metrics (revenue, EBITDA, growth rates, etc.) from financial statements and reports
|
||||
2. Identifying and extracting important contract terms from legal documents
|
||||
3. Pulling out relevant market data from industry reports and analyses
|
||||
4. Extracting operational KPIs from management presentations and internal reports
|
||||
5. Identifying and extracting key personnel information from organizational charts and bios
|
||||
Provide accurate, structured data extracted from various document types to support investment analysis."""
|
||||
|
||||
SUMMARIZER_PROMPT = """You are an expert private equity agent specializing in summarizing complex documents. Your core competencies include:
|
||||
1. Distilling lengthy financial reports into concise executive summaries
|
||||
2. Summarizing legal documents, highlighting key terms and potential risks
|
||||
3. Condensing industry reports to capture essential market trends and competitive dynamics
|
||||
4. Summarizing management presentations to highlight key strategic initiatives and projections
|
||||
5. Creating brief overviews of technical documents, emphasizing critical points for non-technical stakeholders
|
||||
Deliver clear, concise summaries that capture the essence of various documents while highlighting information crucial for investment decisions."""
|
||||
|
||||
FINANCIAL_ANALYST_PROMPT = """You are a specialized private equity agent focused on financial analysis. Your key responsibilities include:
|
||||
1. Analyzing historical financial statements to identify trends and potential issues
|
||||
2. Evaluating the quality of earnings and potential adjustments to EBITDA
|
||||
3. Assessing working capital requirements and cash flow dynamics
|
||||
4. Analyzing capital structure and debt capacity
|
||||
5. Evaluating financial projections and underlying assumptions
|
||||
Provide thorough, insightful financial analysis to inform investment decisions and valuation."""
|
||||
|
||||
MARKET_ANALYST_PROMPT = """You are a highly skilled private equity agent specializing in market analysis. Your expertise covers:
|
||||
1. Analyzing industry trends, growth drivers, and potential disruptors
|
||||
2. Evaluating competitive landscape and market positioning
|
||||
3. Assessing market size, segmentation, and growth potential
|
||||
4. Analyzing customer dynamics, including concentration and loyalty
|
||||
5. Identifying potential regulatory or macroeconomic impacts on the market
|
||||
Deliver comprehensive market analysis to assess the attractiveness and risks of potential investments."""
|
||||
|
||||
OPERATIONAL_ANALYST_PROMPT = """You are an expert private equity agent focused on operational analysis. Your core competencies include:
|
||||
1. Evaluating operational efficiency and identifying improvement opportunities
|
||||
2. Analyzing supply chain and procurement processes
|
||||
3. Assessing sales and marketing effectiveness
|
||||
4. Evaluating IT systems and digital capabilities
|
||||
5. Identifying potential synergies in merger or add-on acquisition scenarios
|
||||
Provide detailed operational analysis to uncover value creation opportunities and potential risks."""
|
||||
|
||||
# Initialize specialized agents
|
||||
data_extractor_agent = Agent(
|
||||
agent_name="Data-Extractor",
|
||||
system_prompt=DATA_EXTRACTOR_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="data_extractor_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
summarizer_agent = Agent(
|
||||
agent_name="Document-Summarizer",
|
||||
system_prompt=SUMMARIZER_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="summarizer_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
financial_analyst_agent = Agent(
|
||||
agent_name="Financial-Analyst",
|
||||
system_prompt=FINANCIAL_ANALYST_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="financial_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
market_analyst_agent = Agent(
|
||||
agent_name="Market-Analyst",
|
||||
system_prompt=MARKET_ANALYST_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="market_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
operational_analyst_agent = Agent(
|
||||
agent_name="Operational-Analyst",
|
||||
system_prompt=OPERATIONAL_ANALYST_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="operational_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
# Initialize the SwarmRouter
|
||||
router = SwarmRouter(
|
||||
name="pe-document-analysis-swarm",
|
||||
description="Analyze documents for private equity due diligence and investment decision-making",
|
||||
max_loops=1,
|
||||
agents=[
|
||||
data_extractor_agent,
|
||||
summarizer_agent,
|
||||
# financial_analyst_agent,
|
||||
# market_analyst_agent,
|
||||
# operational_analyst_agent,
|
||||
],
|
||||
swarm_type="auto", # or "SequentialWorkflow" or "ConcurrentWorkflow" or
|
||||
# auto_generate_prompts=True,
|
||||
)
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Run a comprehensive private equity document analysis task
|
||||
result = router.run(
|
||||
"Where is the best place to find template term sheets for series A startups. Provide links and references"
|
||||
)
|
||||
print(result)
|
||||
|
||||
# Retrieve and print logs
|
||||
for log in router.get_logs():
|
||||
print(f"{log.timestamp} - {log.level}: {log.message}")
|
@ -1,161 +0,0 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms import Agent
|
||||
from swarm_models import OpenAIChat
|
||||
from swarms.structs.swarm_router import SwarmRouter
|
||||
|
||||
load_dotenv()
|
||||
|
||||
# Get the OpenAI API key from the environment variable
|
||||
api_key = os.getenv("GROQ_API_KEY")
|
||||
|
||||
# Model
|
||||
model = OpenAIChat(
|
||||
openai_api_base="https://api.groq.com/openai/v1",
|
||||
openai_api_key=api_key,
|
||||
model_name="llama-3.1-70b-versatile",
|
||||
temperature=0.1,
|
||||
)
|
||||
# Define specialized system prompts for each agent
|
||||
DATA_EXTRACTOR_PROMPT = """You are a highly specialized private equity agent focused on data extraction from various documents. Your expertise includes:
|
||||
1. Extracting key financial metrics (revenue, EBITDA, growth rates, etc.) from financial statements and reports
|
||||
2. Identifying and extracting important contract terms from legal documents
|
||||
3. Pulling out relevant market data from industry reports and analyses
|
||||
4. Extracting operational KPIs from management presentations and internal reports
|
||||
5. Identifying and extracting key personnel information from organizational charts and bios
|
||||
Provide accurate, structured data extracted from various document types to support investment analysis."""
|
||||
|
||||
SUMMARIZER_PROMPT = """You are an expert private equity agent specializing in summarizing complex documents. Your core competencies include:
|
||||
1. Distilling lengthy financial reports into concise executive summaries
|
||||
2. Summarizing legal documents, highlighting key terms and potential risks
|
||||
3. Condensing industry reports to capture essential market trends and competitive dynamics
|
||||
4. Summarizing management presentations to highlight key strategic initiatives and projections
|
||||
5. Creating brief overviews of technical documents, emphasizing critical points for non-technical stakeholders
|
||||
Deliver clear, concise summaries that capture the essence of various documents while highlighting information crucial for investment decisions."""
|
||||
|
||||
FINANCIAL_ANALYST_PROMPT = """You are a specialized private equity agent focused on financial analysis. Your key responsibilities include:
|
||||
1. Analyzing historical financial statements to identify trends and potential issues
|
||||
2. Evaluating the quality of earnings and potential adjustments to EBITDA
|
||||
3. Assessing working capital requirements and cash flow dynamics
|
||||
4. Analyzing capital structure and debt capacity
|
||||
5. Evaluating financial projections and underlying assumptions
|
||||
Provide thorough, insightful financial analysis to inform investment decisions and valuation."""
|
||||
|
||||
MARKET_ANALYST_PROMPT = """You are a highly skilled private equity agent specializing in market analysis. Your expertise covers:
|
||||
1. Analyzing industry trends, growth drivers, and potential disruptors
|
||||
2. Evaluating competitive landscape and market positioning
|
||||
3. Assessing market size, segmentation, and growth potential
|
||||
4. Analyzing customer dynamics, including concentration and loyalty
|
||||
5. Identifying potential regulatory or macroeconomic impacts on the market
|
||||
Deliver comprehensive market analysis to assess the attractiveness and risks of potential investments."""
|
||||
|
||||
OPERATIONAL_ANALYST_PROMPT = """You are an expert private equity agent focused on operational analysis. Your core competencies include:
|
||||
1. Evaluating operational efficiency and identifying improvement opportunities
|
||||
2. Analyzing supply chain and procurement processes
|
||||
3. Assessing sales and marketing effectiveness
|
||||
4. Evaluating IT systems and digital capabilities
|
||||
5. Identifying potential synergies in merger or add-on acquisition scenarios
|
||||
Provide detailed operational analysis to uncover value creation opportunities and potential risks."""
|
||||
|
||||
# Initialize specialized agents
|
||||
data_extractor_agent = Agent(
|
||||
agent_name="Data-Extractor",
|
||||
system_prompt=DATA_EXTRACTOR_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="data_extractor_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
summarizer_agent = Agent(
|
||||
agent_name="Document-Summarizer",
|
||||
system_prompt=SUMMARIZER_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="summarizer_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
financial_analyst_agent = Agent(
|
||||
agent_name="Financial-Analyst",
|
||||
system_prompt=FINANCIAL_ANALYST_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="financial_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
market_analyst_agent = Agent(
|
||||
agent_name="Market-Analyst",
|
||||
system_prompt=MARKET_ANALYST_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="market_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
operational_analyst_agent = Agent(
|
||||
agent_name="Operational-Analyst",
|
||||
system_prompt=OPERATIONAL_ANALYST_PROMPT,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
saved_state_path="operational_analyst_agent.json",
|
||||
user_name="pe_firm",
|
||||
retry_attempts=1,
|
||||
context_length=200000,
|
||||
output_type="string",
|
||||
)
|
||||
|
||||
# Initialize the SwarmRouter
|
||||
router = SwarmRouter(
|
||||
name="pe-document-analysis-swarm",
|
||||
description="Analyze documents for private equity due diligence and investment decision-making",
|
||||
max_loops=1,
|
||||
agents=[
|
||||
data_extractor_agent,
|
||||
summarizer_agent,
|
||||
financial_analyst_agent,
|
||||
market_analyst_agent,
|
||||
operational_analyst_agent,
|
||||
],
|
||||
swarm_type="ConcurrentWorkflow", # or "SequentialWorkflow" or "ConcurrentWorkflow" or
|
||||
)
|
||||
|
||||
# Example usage
|
||||
if __name__ == "__main__":
|
||||
# Run a comprehensive private equity document analysis task
|
||||
result = router.run(
|
||||
"Where is the best place to find template term sheets for series A startups. Provide links and references"
|
||||
)
|
||||
print(result)
|
||||
|
||||
# Retrieve and print logs
|
||||
for log in router.get_logs():
|
||||
print(f"{log.timestamp} - {log.level}: {log.message}")
|
Loading…
Reference in new issue