parent
7312393a71
commit
b3e3f68ee8
@ -0,0 +1,111 @@
|
|||||||
|
from swarms import Agent, ConcurrentWorkflow
|
||||||
|
from swarms_tools import coin_gecko_coin_api
|
||||||
|
|
||||||
|
# Create specialized agents for Solana, Bitcoin, Ethereum, Cardano, and Polkadot analysis using CoinGecko API
|
||||||
|
|
||||||
|
market_analyst_solana = Agent(
|
||||||
|
agent_name="Market-Trend-Analyst-Solana",
|
||||||
|
system_prompt="""You are a market trend analyst specializing in Solana (SOL).
|
||||||
|
Analyze SOL price movements, volume patterns, and market sentiment using real-time data from the CoinGecko API.
|
||||||
|
Focus on:
|
||||||
|
- Technical indicators and chart patterns for Solana
|
||||||
|
- Volume analysis and market depth for SOL
|
||||||
|
- Short-term and medium-term trend identification
|
||||||
|
- Support and resistance levels
|
||||||
|
|
||||||
|
Always use the CoinGecko API tool to fetch up-to-date Solana market data for your analysis.
|
||||||
|
Provide actionable insights based on this data.""",
|
||||||
|
model_name="claude-sonnet-4-20250514",
|
||||||
|
max_loops=1,
|
||||||
|
temperature=0.2,
|
||||||
|
tools=[coin_gecko_coin_api],
|
||||||
|
)
|
||||||
|
|
||||||
|
market_analyst_bitcoin = Agent(
|
||||||
|
agent_name="Market-Trend-Analyst-Bitcoin",
|
||||||
|
system_prompt="""You are a market trend analyst specializing in Bitcoin (BTC).
|
||||||
|
Analyze BTC price movements, volume patterns, and market sentiment using real-time data from the CoinGecko API.
|
||||||
|
Focus on:
|
||||||
|
- Technical indicators and chart patterns for Bitcoin
|
||||||
|
- Volume analysis and market depth for BTC
|
||||||
|
- Short-term and medium-term trend identification
|
||||||
|
- Support and resistance levels
|
||||||
|
|
||||||
|
Always use the CoinGecko API tool to fetch up-to-date Bitcoin market data for your analysis.
|
||||||
|
Provide actionable insights based on this data.""",
|
||||||
|
model_name="claude-sonnet-4-20250514",
|
||||||
|
max_loops=1,
|
||||||
|
temperature=0.2,
|
||||||
|
tools=[coin_gecko_coin_api],
|
||||||
|
)
|
||||||
|
|
||||||
|
market_analyst_ethereum = Agent(
|
||||||
|
agent_name="Market-Trend-Analyst-Ethereum",
|
||||||
|
system_prompt="""You are a market trend analyst specializing in Ethereum (ETH).
|
||||||
|
Analyze ETH price movements, volume patterns, and market sentiment using real-time data from the CoinGecko API.
|
||||||
|
Focus on:
|
||||||
|
- Technical indicators and chart patterns for Ethereum
|
||||||
|
- Volume analysis and market depth for ETH
|
||||||
|
- Short-term and medium-term trend identification
|
||||||
|
- Support and resistance levels
|
||||||
|
|
||||||
|
Always use the CoinGecko API tool to fetch up-to-date Ethereum market data for your analysis.
|
||||||
|
Provide actionable insights based on this data.""",
|
||||||
|
model_name="claude-sonnet-4-20250514",
|
||||||
|
max_loops=1,
|
||||||
|
temperature=0.2,
|
||||||
|
tools=[coin_gecko_coin_api],
|
||||||
|
)
|
||||||
|
|
||||||
|
market_analyst_cardano = Agent(
|
||||||
|
agent_name="Market-Trend-Analyst-Cardano",
|
||||||
|
system_prompt="""You are a market trend analyst specializing in Cardano (ADA).
|
||||||
|
Analyze ADA price movements, volume patterns, and market sentiment using real-time data from the CoinGecko API.
|
||||||
|
Focus on:
|
||||||
|
- Technical indicators and chart patterns for Cardano
|
||||||
|
- Volume analysis and market depth for ADA
|
||||||
|
- Short-term and medium-term trend identification
|
||||||
|
- Support and resistance levels
|
||||||
|
|
||||||
|
Always use the CoinGecko API tool to fetch up-to-date Cardano market data for your analysis.
|
||||||
|
Provide actionable insights based on this data.""",
|
||||||
|
model_name="claude-sonnet-4-20250514",
|
||||||
|
max_loops=1,
|
||||||
|
temperature=0.2,
|
||||||
|
tools=[coin_gecko_coin_api],
|
||||||
|
)
|
||||||
|
|
||||||
|
market_analyst_polkadot = Agent(
|
||||||
|
agent_name="Market-Trend-Analyst-Polkadot",
|
||||||
|
system_prompt="""You are a market trend analyst specializing in Polkadot (DOT).
|
||||||
|
Analyze DOT price movements, volume patterns, and market sentiment using real-time data from the CoinGecko API.
|
||||||
|
Focus on:
|
||||||
|
- Technical indicators and chart patterns for Polkadot
|
||||||
|
- Volume analysis and market depth for DOT
|
||||||
|
- Short-term and medium-term trend identification
|
||||||
|
- Support and resistance levels
|
||||||
|
|
||||||
|
Always use the CoinGecko API tool to fetch up-to-date Polkadot market data for your analysis.
|
||||||
|
Provide actionable insights based on this data.""",
|
||||||
|
model_name="claude-sonnet-4-20250514",
|
||||||
|
max_loops=1,
|
||||||
|
temperature=0.2,
|
||||||
|
tools=[coin_gecko_coin_api],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create concurrent workflow
|
||||||
|
crypto_analysis_swarm = ConcurrentWorkflow(
|
||||||
|
agents=[
|
||||||
|
market_analyst_solana,
|
||||||
|
market_analyst_bitcoin,
|
||||||
|
market_analyst_ethereum,
|
||||||
|
market_analyst_cardano,
|
||||||
|
market_analyst_polkadot,
|
||||||
|
],
|
||||||
|
max_loops=1,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
crypto_analysis_swarm.run(
|
||||||
|
"Analyze your own specified coin and create a comprehensive analysis of the coin"
|
||||||
|
)
|
@ -0,0 +1,267 @@
|
|||||||
|
import time
|
||||||
|
from typing import Dict, List
|
||||||
|
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.utils.litellm_tokenizer import count_tokens
|
||||||
|
|
||||||
|
|
||||||
|
class LongFormGenerator:
|
||||||
|
"""
|
||||||
|
A class for generating long-form content using the swarms Agent framework.
|
||||||
|
|
||||||
|
This class provides methods for creating comprehensive, detailed content
|
||||||
|
with support for continuation and sectioned generation.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, model: str = "claude-sonnet-4-20250514"):
|
||||||
|
"""
|
||||||
|
Initialize the LongFormGenerator with specified model.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model (str): The model to use for content generation
|
||||||
|
"""
|
||||||
|
self.model = model
|
||||||
|
|
||||||
|
def estimate_tokens(self, text: str) -> int:
|
||||||
|
"""
|
||||||
|
Estimate token count for text.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
text (str): The text to estimate tokens for
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: Estimated token count
|
||||||
|
"""
|
||||||
|
return count_tokens(text=text, model=self.model)
|
||||||
|
|
||||||
|
def create_expansion_prompt(
|
||||||
|
self, topic: str, requirements: Dict
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Create optimized prompt for long-form content.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
topic (str): The main topic to generate content about
|
||||||
|
requirements (Dict): Requirements for content generation
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Formatted prompt for content generation
|
||||||
|
"""
|
||||||
|
structure_requirements = []
|
||||||
|
if "sections" in requirements:
|
||||||
|
for i, section in enumerate(requirements["sections"]):
|
||||||
|
structure_requirements.append(
|
||||||
|
f"{i+1}. {section['title']} - {section.get('description', 'Provide comprehensive analysis')}"
|
||||||
|
)
|
||||||
|
|
||||||
|
length_guidance = (
|
||||||
|
f"Target length: {requirements.get('min_words', 2000)}-{requirements.get('max_words', 4000)} words"
|
||||||
|
if "min_words" in requirements
|
||||||
|
else ""
|
||||||
|
)
|
||||||
|
|
||||||
|
prompt = f"""Create a comprehensive, detailed analysis of: {topic}
|
||||||
|
REQUIREMENTS:
|
||||||
|
- This is a professional-level document requiring thorough treatment
|
||||||
|
- Each section must be substantive with detailed explanations
|
||||||
|
- Include specific examples, case studies, and technical details where relevant
|
||||||
|
- Provide multiple perspectives and comprehensive coverage
|
||||||
|
- {length_guidance}
|
||||||
|
STRUCTURE:
|
||||||
|
{chr(10).join(structure_requirements)}
|
||||||
|
QUALITY STANDARDS:
|
||||||
|
- Demonstrate deep expertise and understanding
|
||||||
|
- Include relevant technical specifications and details
|
||||||
|
- Provide actionable insights and practical applications
|
||||||
|
- Use professional language appropriate for expert audience
|
||||||
|
- Ensure logical flow and comprehensive coverage of all aspects
|
||||||
|
Begin your comprehensive analysis:"""
|
||||||
|
|
||||||
|
return prompt
|
||||||
|
|
||||||
|
def generate_with_continuation(
|
||||||
|
self, topic: str, requirements: Dict, max_attempts: int = 3
|
||||||
|
) -> str:
|
||||||
|
"""
|
||||||
|
Generate long-form content with continuation if needed.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
topic (str): The main topic to generate content about
|
||||||
|
requirements (Dict): Requirements for content generation
|
||||||
|
max_attempts (int): Maximum number of continuation attempts
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Generated long-form content
|
||||||
|
"""
|
||||||
|
initial_prompt = self.create_expansion_prompt(
|
||||||
|
topic, requirements
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create agent for initial generation
|
||||||
|
agent = Agent(
|
||||||
|
name="LongForm Content Generator",
|
||||||
|
system_prompt=initial_prompt,
|
||||||
|
model=self.model,
|
||||||
|
max_loops=1,
|
||||||
|
temperature=0.7,
|
||||||
|
max_tokens=4000,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Generate initial response
|
||||||
|
content = agent.run(topic)
|
||||||
|
target_words = requirements.get("min_words", 2000)
|
||||||
|
|
||||||
|
# Check if continuation is needed
|
||||||
|
word_count = len(content.split())
|
||||||
|
continuation_count = 0
|
||||||
|
|
||||||
|
while (
|
||||||
|
word_count < target_words
|
||||||
|
and continuation_count < max_attempts
|
||||||
|
):
|
||||||
|
continuation_prompt = f"""Continue and expand the previous analysis. The current response is {word_count} words, but we need approximately {target_words} words total for comprehensive coverage.
|
||||||
|
Please continue with additional detailed analysis, examples, and insights. Focus on areas that could benefit from deeper exploration or additional perspectives. Maintain the same professional tone and analytical depth.
|
||||||
|
Continue the analysis:"""
|
||||||
|
|
||||||
|
# Create continuation agent
|
||||||
|
continuation_agent = Agent(
|
||||||
|
name="Content Continuation Agent",
|
||||||
|
system_prompt=continuation_prompt,
|
||||||
|
model=self.model,
|
||||||
|
max_loops=1,
|
||||||
|
temperature=0.7,
|
||||||
|
max_tokens=4000,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Generate continuation
|
||||||
|
continuation_content = continuation_agent.run(
|
||||||
|
f"Continue the analysis on: {topic}"
|
||||||
|
)
|
||||||
|
content += "\n\n" + continuation_content
|
||||||
|
word_count = len(content.split())
|
||||||
|
continuation_count += 1
|
||||||
|
|
||||||
|
# Rate limiting
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
return content
|
||||||
|
|
||||||
|
def generate_sectioned_content(
|
||||||
|
self,
|
||||||
|
topic: str,
|
||||||
|
sections: List[Dict],
|
||||||
|
combine_sections: bool = True,
|
||||||
|
) -> Dict:
|
||||||
|
"""
|
||||||
|
Generate content section by section for maximum length.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
topic (str): The main topic to generate content about
|
||||||
|
sections (List[Dict]): List of section definitions
|
||||||
|
combine_sections (bool): Whether to combine all sections into one document
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: Dictionary containing individual sections and optionally combined content
|
||||||
|
"""
|
||||||
|
results = {}
|
||||||
|
combined_content = ""
|
||||||
|
|
||||||
|
for section in sections:
|
||||||
|
section_prompt = f"""Write a comprehensive, detailed section on: {section['title']}
|
||||||
|
Context: This is part of a larger analysis on {topic}
|
||||||
|
Requirements for this section:
|
||||||
|
- Provide {section.get('target_words', 500)}-{section.get('max_words', 800)} words of detailed content
|
||||||
|
- {section.get('description', 'Provide thorough analysis with examples and insights')}
|
||||||
|
- Include specific examples, technical details, and practical applications
|
||||||
|
- Use professional language suitable for expert audience
|
||||||
|
- Ensure comprehensive coverage of all relevant aspects
|
||||||
|
Write the complete section:"""
|
||||||
|
|
||||||
|
# Create agent for this section
|
||||||
|
section_agent = Agent(
|
||||||
|
name=f"Section Generator - {section['title']}",
|
||||||
|
system_prompt=section_prompt,
|
||||||
|
model=self.model,
|
||||||
|
max_loops=1,
|
||||||
|
temperature=0.7,
|
||||||
|
max_tokens=3000,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Generate section content
|
||||||
|
section_content = section_agent.run(
|
||||||
|
f"Generate section: {section['title']} for topic: {topic}"
|
||||||
|
)
|
||||||
|
results[section["title"]] = section_content
|
||||||
|
|
||||||
|
if combine_sections:
|
||||||
|
combined_content += (
|
||||||
|
f"\n\n## {section['title']}\n\n{section_content}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Rate limiting between sections
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
if combine_sections:
|
||||||
|
results["combined"] = combined_content.strip()
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Initialize the generator
|
||||||
|
generator = LongFormGenerator()
|
||||||
|
|
||||||
|
# Example topic and requirements
|
||||||
|
topic = "Artificial Intelligence in Healthcare"
|
||||||
|
requirements = {
|
||||||
|
"min_words": 2500,
|
||||||
|
"max_words": 4000,
|
||||||
|
"sections": [
|
||||||
|
{
|
||||||
|
"title": "Current Applications",
|
||||||
|
"description": "Analyze current AI applications in healthcare",
|
||||||
|
"target_words": 600,
|
||||||
|
"max_words": 800,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"title": "Future Prospects",
|
||||||
|
"description": "Discuss future developments and potential",
|
||||||
|
"target_words": 500,
|
||||||
|
"max_words": 700,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
# Generate comprehensive content
|
||||||
|
content = generator.generate_with_continuation(
|
||||||
|
topic, requirements
|
||||||
|
)
|
||||||
|
print("Generated Content:")
|
||||||
|
print(content)
|
||||||
|
print(f"\nWord count: {len(content.split())}")
|
||||||
|
|
||||||
|
# Generate sectioned content
|
||||||
|
sections = [
|
||||||
|
{
|
||||||
|
"title": "AI in Medical Imaging",
|
||||||
|
"description": "Comprehensive analysis of AI applications in medical imaging",
|
||||||
|
"target_words": 500,
|
||||||
|
"max_words": 700,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"title": "AI in Drug Discovery",
|
||||||
|
"description": "Detailed examination of AI in pharmaceutical research",
|
||||||
|
"target_words": 600,
|
||||||
|
"max_words": 800,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
sectioned_results = generator.generate_sectioned_content(
|
||||||
|
topic, sections
|
||||||
|
)
|
||||||
|
print("\nSectioned Content:")
|
||||||
|
for section_title, section_content in sectioned_results.items():
|
||||||
|
if section_title != "combined":
|
||||||
|
print(f"\n--- {section_title} ---")
|
||||||
|
print(section_content[:200] + "...")
|
@ -0,0 +1,29 @@
|
|||||||
|
from swarms import Agent
|
||||||
|
|
||||||
|
|
||||||
|
def generate_comprehensive_content(topic, sections):
|
||||||
|
prompt = f"""You are tasked with creating a comprehensive, detailed analysis of {topic}.
|
||||||
|
This should be a thorough, professional-level document suitable for expert review.
|
||||||
|
|
||||||
|
Structure your response with the following sections, ensuring each is substantive and detailed:
|
||||||
|
{chr(10).join([f"{i+1}. {section} - Provide extensive detail with examples and analysis" for i, section in enumerate(sections)])}
|
||||||
|
|
||||||
|
For each section:
|
||||||
|
- Include multiple subsections where appropriate
|
||||||
|
- Provide specific examples and case studies
|
||||||
|
- Offer detailed explanations of complex concepts
|
||||||
|
- Include relevant technical details and specifications
|
||||||
|
- Discuss implications and considerations thoroughly
|
||||||
|
|
||||||
|
Aim for comprehensive coverage that demonstrates deep expertise. This is a professional document that should be thorough and substantive throughout."""
|
||||||
|
|
||||||
|
agent = Agent(
|
||||||
|
name="Comprehensive Content Generator",
|
||||||
|
system_prompt=prompt,
|
||||||
|
model="claude-sonnet-4-20250514",
|
||||||
|
max_loops=1,
|
||||||
|
temperature=0.5,
|
||||||
|
max_tokens=4000,
|
||||||
|
)
|
||||||
|
|
||||||
|
return agent.run(topic)
|
Loading…
Reference in new issue