|
|
|
@ -5,13 +5,12 @@ import os
|
|
|
|
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
|
|
|
from datetime import datetime
|
|
|
|
|
from pathlib import Path
|
|
|
|
|
from typing import Any, Dict, List, Tuple
|
|
|
|
|
from typing import Any, Dict, List
|
|
|
|
|
|
|
|
|
|
import aiohttp
|
|
|
|
|
from dotenv import load_dotenv
|
|
|
|
|
from rich.console import Console
|
|
|
|
|
from rich.panel import Panel
|
|
|
|
|
from rich.text import Text
|
|
|
|
|
from rich.tree import Tree
|
|
|
|
|
|
|
|
|
|
from swarms.agents.reasoning_duo import ReasoningDuo
|
|
|
|
@ -135,7 +134,8 @@ async def _async_exa_search(
|
|
|
|
|
|
|
|
|
|
# Filter out None keys AND None values from kwargs
|
|
|
|
|
safe_kwargs = {
|
|
|
|
|
str(k): v for k, v in kwargs.items()
|
|
|
|
|
str(k): v
|
|
|
|
|
for k, v in kwargs.items()
|
|
|
|
|
if k is not None and v is not None and str(k) != "None"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -151,7 +151,12 @@ async def _async_exa_search(
|
|
|
|
|
|
|
|
|
|
# Only add safe_kwargs if they don't conflict with existing keys
|
|
|
|
|
for key, value in safe_kwargs.items():
|
|
|
|
|
if key not in payload and key not in ["query", "useAutoprompt", "numResults", "contents"]:
|
|
|
|
|
if key not in payload and key not in [
|
|
|
|
|
"query",
|
|
|
|
|
"useAutoprompt",
|
|
|
|
|
"numResults",
|
|
|
|
|
"contents",
|
|
|
|
|
]:
|
|
|
|
|
payload[key] = value
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
@ -422,8 +427,12 @@ class DeepResearchSwarm:
|
|
|
|
|
queries = self.get_queries(query)
|
|
|
|
|
|
|
|
|
|
if not queries:
|
|
|
|
|
error_msg = "No queries generated. Please check your input."
|
|
|
|
|
self.conversation.add(role="System", content=error_msg)
|
|
|
|
|
error_msg = (
|
|
|
|
|
"No queries generated. Please check your input."
|
|
|
|
|
)
|
|
|
|
|
self.conversation.add(
|
|
|
|
|
role="System", content=error_msg
|
|
|
|
|
)
|
|
|
|
|
return history_output_formatter(
|
|
|
|
|
self.conversation, type=self.output_type
|
|
|
|
|
)
|
|
|
|
@ -448,7 +457,9 @@ class DeepResearchSwarm:
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
# Handle any errors in the thread
|
|
|
|
|
error_msg = f"Error processing query '{q}': {str(e)}"
|
|
|
|
|
error_msg = (
|
|
|
|
|
f"Error processing query '{q}': {str(e)}"
|
|
|
|
|
)
|
|
|
|
|
console.print(f"[bold red]{error_msg}[/bold red]")
|
|
|
|
|
self.conversation.add(
|
|
|
|
|
role="System",
|
|
|
|
@ -466,7 +477,9 @@ class DeepResearchSwarm:
|
|
|
|
|
content=final_summary,
|
|
|
|
|
)
|
|
|
|
|
except Exception as e:
|
|
|
|
|
error_msg = f"Error generating final summary: {str(e)}"
|
|
|
|
|
error_msg = (
|
|
|
|
|
f"Error generating final summary: {str(e)}"
|
|
|
|
|
)
|
|
|
|
|
console.print(f"[bold red]{error_msg}[/bold red]")
|
|
|
|
|
self.conversation.add(
|
|
|
|
|
role="System",
|
|
|
|
@ -482,10 +495,13 @@ class DeepResearchSwarm:
|
|
|
|
|
if self.output_type.lower() == "json":
|
|
|
|
|
try:
|
|
|
|
|
import json
|
|
|
|
|
|
|
|
|
|
if isinstance(result, str):
|
|
|
|
|
# Try to parse and reformat for pretty printing
|
|
|
|
|
parsed = json.loads(result)
|
|
|
|
|
return json.dumps(parsed, indent=2, ensure_ascii=False)
|
|
|
|
|
return json.dumps(
|
|
|
|
|
parsed, indent=2, ensure_ascii=False
|
|
|
|
|
)
|
|
|
|
|
except (json.JSONDecodeError, TypeError):
|
|
|
|
|
# If parsing fails, return as-is
|
|
|
|
|
pass
|
|
|
|
@ -495,7 +511,11 @@ class DeepResearchSwarm:
|
|
|
|
|
except Exception as e:
|
|
|
|
|
error_msg = f"Critical error in step execution: {str(e)}"
|
|
|
|
|
console.print(f"[bold red]{error_msg}[/bold red]")
|
|
|
|
|
return {"error": error_msg} if self.output_type.lower() == "json" else error_msg
|
|
|
|
|
return (
|
|
|
|
|
{"error": error_msg}
|
|
|
|
|
if self.output_type.lower() == "json"
|
|
|
|
|
else error_msg
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def run(self, task: str):
|
|
|
|
|
return self.step(task)
|
|
|
|
@ -515,7 +535,9 @@ class DeepResearchSwarm:
|
|
|
|
|
future = self.executor.submit(self.step, task)
|
|
|
|
|
futures.append((task, future))
|
|
|
|
|
|
|
|
|
|
def parse_and_display_results(self, json_result: str, export_markdown: bool = True):
|
|
|
|
|
def parse_and_display_results(
|
|
|
|
|
self, json_result: str, export_markdown: bool = True
|
|
|
|
|
):
|
|
|
|
|
"""
|
|
|
|
|
Parse JSON results and display in rich format with optional markdown export.
|
|
|
|
|
|
|
|
|
@ -529,18 +551,25 @@ class DeepResearchSwarm:
|
|
|
|
|
|
|
|
|
|
# Create rich display
|
|
|
|
|
console.print("\n" + "=" * 100, style="cyan")
|
|
|
|
|
console.print("🔬 DEEP RESEARCH RESULTS", style="bold cyan", justify="center")
|
|
|
|
|
console.print(
|
|
|
|
|
"🔬 DEEP RESEARCH RESULTS",
|
|
|
|
|
style="bold cyan",
|
|
|
|
|
justify="center",
|
|
|
|
|
)
|
|
|
|
|
console.print("=" * 100, style="cyan")
|
|
|
|
|
|
|
|
|
|
# Create conversation tree
|
|
|
|
|
tree = Tree("🗣️ Research Conversation", style="bold blue")
|
|
|
|
|
markdown_content = ["# Deep Research Results\n", f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n"]
|
|
|
|
|
markdown_content = [
|
|
|
|
|
"# Deep Research Results\n",
|
|
|
|
|
f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
for i, entry in enumerate(data, 1):
|
|
|
|
|
if isinstance(entry, dict):
|
|
|
|
|
role = entry.get('role', 'Unknown')
|
|
|
|
|
content = entry.get('content', '')
|
|
|
|
|
timestamp = entry.get('timestamp', '')
|
|
|
|
|
role = entry.get("role", "Unknown")
|
|
|
|
|
content = entry.get("content", "")
|
|
|
|
|
timestamp = entry.get("timestamp", "")
|
|
|
|
|
|
|
|
|
|
# Get role info for display
|
|
|
|
|
role_info = self._get_role_display_info(role)
|
|
|
|
@ -548,26 +577,52 @@ class DeepResearchSwarm:
|
|
|
|
|
# Create tree branch
|
|
|
|
|
branch_text = f"{role_info['emoji']} {role}"
|
|
|
|
|
if timestamp:
|
|
|
|
|
time_part = timestamp.split()[-1] if ' ' in timestamp else timestamp[-8:]
|
|
|
|
|
time_part = (
|
|
|
|
|
timestamp.split()[-1]
|
|
|
|
|
if " " in timestamp
|
|
|
|
|
else timestamp[-8:]
|
|
|
|
|
)
|
|
|
|
|
branch_text += f" ({time_part})"
|
|
|
|
|
|
|
|
|
|
branch = tree.add(branch_text, style=role_info['style'])
|
|
|
|
|
branch = tree.add(
|
|
|
|
|
branch_text, style=role_info["style"]
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Add content preview to tree
|
|
|
|
|
content_preview = content[:150] + "..." if len(content) > 150 else content
|
|
|
|
|
content_preview = content_preview.replace('\n', ' ')
|
|
|
|
|
content_preview = (
|
|
|
|
|
content[:150] + "..."
|
|
|
|
|
if len(content) > 150
|
|
|
|
|
else content
|
|
|
|
|
)
|
|
|
|
|
content_preview = content_preview.replace(
|
|
|
|
|
"\n", " "
|
|
|
|
|
)
|
|
|
|
|
branch.add(content_preview, style="dim")
|
|
|
|
|
|
|
|
|
|
# Add to markdown
|
|
|
|
|
markdown_content.append(f"\n## {i}. {role}")
|
|
|
|
|
if timestamp:
|
|
|
|
|
markdown_content.append(f"**Timestamp:** {timestamp}")
|
|
|
|
|
markdown_content.append(
|
|
|
|
|
f"**Timestamp:** {timestamp}"
|
|
|
|
|
)
|
|
|
|
|
markdown_content.append(f"\n{content}\n")
|
|
|
|
|
|
|
|
|
|
# Display full content for important entries
|
|
|
|
|
if role.lower() in ['reasoning-agent-01'] and len(content) > 300:
|
|
|
|
|
console.print(f"\n📋 {role} Full Response:", style="bold green")
|
|
|
|
|
console.print(Panel(content, border_style="green", title=f"{role} Analysis"))
|
|
|
|
|
if (
|
|
|
|
|
role.lower() in ["reasoning-agent-01"]
|
|
|
|
|
and len(content) > 300
|
|
|
|
|
):
|
|
|
|
|
console.print(
|
|
|
|
|
f"\n📋 {role} Full Response:",
|
|
|
|
|
style="bold green",
|
|
|
|
|
)
|
|
|
|
|
console.print(
|
|
|
|
|
Panel(
|
|
|
|
|
content,
|
|
|
|
|
border_style="green",
|
|
|
|
|
title=f"{role} Analysis",
|
|
|
|
|
)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Display the tree
|
|
|
|
|
console.print(tree)
|
|
|
|
@ -580,20 +635,29 @@ class DeepResearchSwarm:
|
|
|
|
|
|
|
|
|
|
# Generate filename with timestamp
|
|
|
|
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
|
|
|
filename = results_dir / f"research_results_{timestamp}.md"
|
|
|
|
|
filename = (
|
|
|
|
|
results_dir / f"research_results_{timestamp}.md"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Write markdown file
|
|
|
|
|
with open(filename, 'w', encoding='utf-8') as f:
|
|
|
|
|
f.write('\n'.join(markdown_content))
|
|
|
|
|
with open(filename, "w", encoding="utf-8") as f:
|
|
|
|
|
f.write("\n".join(markdown_content))
|
|
|
|
|
|
|
|
|
|
console.print(f"\n💾 Results exported to: {filename}", style="bold green")
|
|
|
|
|
console.print(
|
|
|
|
|
f"\n💾 Results exported to: {filename}",
|
|
|
|
|
style="bold green",
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
console.print("\n✅ Research analysis complete!", style="bold cyan")
|
|
|
|
|
console.print(
|
|
|
|
|
"\n✅ Research analysis complete!", style="bold cyan"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
except json.JSONDecodeError as e:
|
|
|
|
|
console.print(f"❌ Error parsing JSON: {e}", style="red")
|
|
|
|
|
except Exception as e:
|
|
|
|
|
console.print(f"❌ Error displaying results: {e}", style="red")
|
|
|
|
|
console.print(
|
|
|
|
|
f"❌ Error displaying results: {e}", style="red"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def _get_role_display_info(self, role: str) -> Dict[str, str]:
|
|
|
|
|
"""Get display information for different conversation roles."""
|
|
|
|
@ -605,7 +669,9 @@ class DeepResearchSwarm:
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
role_lower = role.lower()
|
|
|
|
|
return role_map.get(role_lower, {"emoji": "🤖", "style": "white"})
|
|
|
|
|
return role_map.get(
|
|
|
|
|
role_lower, {"emoji": "🤖", "style": "white"}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Example usage
|
|
|
|
|