From 954c2b520ede754ce8ec432a2bf127f1f96b3eba Mon Sep 17 00:00:00 2001 From: Kye Gomez Date: Sun, 29 Jun 2025 22:54:59 -0700 Subject: [PATCH] model providers in swarms examples --- docs/mkdocs.yml | 56 +++---- docs/swarms/examples/model_providers.md | 171 +++++++++++++++++++++ examples/multi_modal/multimodal_example.py | 10 +- stream_example.py | 7 +- swarms/structs/agent.py | 14 +- swarms/tools/base_tool.py | 4 +- swarms/utils/formatter.py | 2 +- 7 files changed, 215 insertions(+), 49 deletions(-) create mode 100644 docs/swarms/examples/model_providers.md diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index dbbb8924..5e8b06e9 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -52,29 +52,17 @@ extra: link: https://x.com/swarms_corp - icon: fontawesome/brands/github link: https://github.com/kyegomez/swarms - - icon: fontawesome/brands/twitter - link: https://x.com/swarms_corp - icon: fontawesome/brands/discord link: https://discord.gg/jM3Z6M9uMq + - icon: fontawesome/brands/youtube + link: https://www.youtube.com/@kyegomez3242 + - icon: fontawesome/brands/linkedin + link: https://www.linkedin.com/company/swarms-corp/ analytics: provider: google property: G-MPE9C65596 - # alternate: - # - name: English - # link: / - # lang: en - # - name: 简体中文 - # link: /zh/ - # lang: zh - # - name: 日本語 - # link: /ja/ - # lang: ja - # - name: 한국어 - # link: /ko/ - # lang: ko - theme: name: material custom_dir: overrides @@ -290,20 +278,7 @@ nav: - Swarm Ecosystem: "swarms/concept/swarm_ecosystem.md" - Swarms Products: "swarms/products.md" - - Contributors: - - Swarms Framework Architecture: "swarms/concept/framework_architecture.md" - - Bounty Program: "corporate/bounty_program.md" - - Contributing to Documentation: "contributors/docs.md" - - Contributing Tools/Custom Plugins for Swarms: "contributors/tools.md" - - Contributing: - - Contributing: "swarms/contributing.md" - - Tests: "swarms/framework/test.md" - - Code Cleanliness: "swarms/framework/code_cleanliness.md" - - Philosophy: "swarms/concept/philosophy.md" - - Changelog: - - Swarms 5.6.8: "swarms/changelog/5_6_8.md" - - Swarms 5.8.1: "swarms/changelog/5_8_1.md" - - Swarms 5.9.2: "swarms/changelog/changelog_new.md" + - Examples: - Overview: "examples/index.md" @@ -320,7 +295,8 @@ nav: - Agent with Multiple Images: "swarms/examples/multiple_images.md" - Agents with Vision and Tool Usage: "swarms/examples/vision_tools.md" - Gradio Chat Interface: "swarms/ui/main.md" - - Various Model Providers: + - Model Providers: + - Overview: "swarms/examples/model_providers.md" - OpenAI: "swarms/examples/openai_example.md" - Anthropic: "swarms/examples/claude.md" - Groq: "swarms/examples/groq.md" @@ -424,8 +400,24 @@ nav: - Overview: "swarms_rs/overview.md" - Agents: "swarms_rs/agents.md" - - Resources: + + - Contributors: - Overview: "governance/main.md" + - Swarms Framework Architecture: "swarms/concept/framework_architecture.md" + - Bounty Program: "corporate/bounty_program.md" + - Contributing to Documentation: "contributors/docs.md" + - Contributing Tools/Custom Plugins for Swarms: "contributors/tools.md" + - Contributing: + - Contributing: "swarms/contributing.md" + - Tests: "swarms/framework/test.md" + - Code Cleanliness: "swarms/framework/code_cleanliness.md" + - Philosophy: "swarms/concept/philosophy.md" + - Changelog: + - Swarms 5.6.8: "swarms/changelog/5_6_8.md" + - Swarms 5.8.1: "swarms/changelog/5_8_1.md" + - Swarms 5.9.2: "swarms/changelog/changelog_new.md" + + # - Tokenomics: "web3/token.md" diff --git a/docs/swarms/examples/model_providers.md b/docs/swarms/examples/model_providers.md new file mode 100644 index 00000000..9b739bab --- /dev/null +++ b/docs/swarms/examples/model_providers.md @@ -0,0 +1,171 @@ +# Model Providers Overview + +Swarms supports a vast array of model providers, giving you the flexibility to choose the best model for your specific use case. Whether you need high-performance inference, cost-effective solutions, or specialized capabilities, Swarms has you covered. + +## Supported Model Providers + +| Provider | Description | Documentation | +|----------|-------------|---------------| +| **OpenAI** | Industry-leading language models including GPT-4, GPT-4o, and GPT-4o-mini. Perfect for general-purpose tasks, creative writing, and complex reasoning. | [OpenAI Integration](openai_example.md) | +| **Anthropic/Claude** | Advanced AI models known for their safety, helpfulness, and reasoning capabilities. Claude models excel at analysis, coding, and creative tasks. | [Claude Integration](claude.md) | +| **Groq** | Ultra-fast inference platform offering real-time AI responses. Ideal for applications requiring low latency and high throughput. | [Groq Integration](groq.md) | +| **Cohere** | Enterprise-grade language models with strong performance on business applications, text generation, and semantic search. | [Cohere Integration](cohere.md) | +| **DeepSeek** | Advanced reasoning models including the DeepSeek Reasoner (R1). Excellent for complex problem-solving and analytical tasks. | [DeepSeek Integration](deepseek.md) | +| **Ollama** | Local model deployment platform allowing you to run open-source models on your own infrastructure. No API keys required. | [Ollama Integration](ollama.md) | +| **OpenRouter** | Unified API gateway providing access to hundreds of models from various providers through a single interface. | [OpenRouter Integration](openrouter.md) | +| **XAI** | xAI's Grok models offering unique capabilities for research, analysis, and creative tasks with advanced reasoning abilities. | [XAI Integration](xai.md) | +| **vLLM** | High-performance inference library for serving large language models with optimized memory usage and throughput. | [vLLM Integration](vllm_integration.md) | +| **Llama4** | Meta's latest open-source language models including Llama-4-Maverick and Llama-4-Scout variants with expert routing capabilities. | [Llama4 Integration](llama4.md) | + +## Quick Start + +All model providers follow a consistent pattern in Swarms. Here's the basic template: + +```python +from swarms import Agent +import os +from dotenv import load_dotenv + +load_dotenv() + +# Initialize agent with your chosen model +agent = Agent( + agent_name="Your-Agent-Name", + model_name="gpt-4o-mini", # Varies by provider + system_prompt="Your system prompt here", + agent_description="Description of what your agent does.", +) + +# Run your agent +response = agent.run("Your query here") +``` + +## Model Selection Guide + +### For High-Performance Applications + +- **OpenAI GPT-4o**: Best overall performance and reasoning + +- **Anthropic Claude**: Excellent safety and analysis capabilities + +- **DeepSeek R1**: Advanced reasoning and problem-solving + +### For Cost-Effective Solutions + +- **OpenAI GPT-4o-mini**: Great performance at lower cost + +- **Ollama**: Free local deployment + +- **OpenRouter**: Access to cost-effective models + +### For Real-Time Applications + +- **Groq**: Ultra-fast inference + +- **vLLM**: Optimized for high throughput + +### For Specialized Tasks + +- **Llama4**: Expert routing for complex workflows + +- **XAI Grok**: Advanced research capabilities + +- **Cohere**: Strong business applications + +## Environment Setup + +Most providers require API keys. Add them to your `.env` file: + +```bash +# OpenAI +OPENAI_API_KEY=your_openai_key + +# Anthropic +ANTHROPIC_API_KEY=your_anthropic_key + +# Groq +GROQ_API_KEY=your_groq_key + +# Cohere +COHERE_API_KEY=your_cohere_key + +# DeepSeek +DEEPSEEK_API_KEY=your_deepseek_key + +# OpenRouter +OPENROUTER_API_KEY=your_openrouter_key + +# XAI +XAI_API_KEY=your_xai_key +``` + +!!! note "No API Key Required" + Ollama and vLLM can be run locally without API keys, making them perfect for development and testing. + +## Advanced Features + +### Multi-Model Workflows + +Swarms allows you to create workflows that use different models for different tasks: + +```python +from swarms import Agent, ConcurrentWorkflow + +# Research agent using Claude for analysis +research_agent = Agent( + agent_name="Research-Agent", + model_name="claude-3-sonnet-20240229", + system_prompt="You are a research expert." +) + +# Creative agent using GPT-4o for content generation +creative_agent = Agent( + agent_name="Creative-Agent", + model_name="gpt-4o", + system_prompt="You are a creative content expert." +) + +# Workflow combining both agents +workflow = ConcurrentWorkflow( + name="Research-Creative-Workflow", + agents=[research_agent, creative_agent] +) +``` + +### Model Routing + +Automatically route tasks to the most appropriate model: + +```python +from swarms import Agent, ModelRouter + +# Define model preferences for different task types +model_router = ModelRouter( + models={ + "analysis": "claude-3-sonnet-20240229", + "creative": "gpt-4o", + "fast": "gpt-4o-mini", + "local": "ollama/llama2" + } +) + +# Agent will automatically choose the best model +agent = Agent( + agent_name="Smart-Agent", + llm=model_router, + system_prompt="You are a versatile assistant." +) +``` + +## Getting Help + +- **Documentation**: Each provider has detailed documentation with examples + +- **Community**: Join the Swarms community for support and best practices + +- **Issues**: Report bugs and request features on GitHub + +- **Discussions**: Share your use cases and learn from others + +!!! success "Ready to Get Started?" + Choose a model provider from the table above and follow the detailed integration guide. Each provider offers unique capabilities that can enhance your Swarms applications. diff --git a/examples/multi_modal/multimodal_example.py b/examples/multi_modal/multimodal_example.py index 29060e96..19a2c996 100644 --- a/examples/multi_modal/multimodal_example.py +++ b/examples/multi_modal/multimodal_example.py @@ -1,18 +1,16 @@ -import json import logging from swarms.structs import Agent from swarms.prompts.logistics import ( Quality_Control_Agent_Prompt, ) -from swarms import BaseTool # Set up debug logging logging.basicConfig(level=logging.DEBUG) # Image for analysis -# factory_image="image.png" # normal image of a factory +# factory_image="image.png" # normal image of a factory -factory_image = "image2.png" # image of a burning factory +factory_image = "image2.png" # image of a burning factory def security_analysis(danger_level: str) -> str: @@ -52,13 +50,11 @@ def security_analysis(danger_level: str) -> str: 🚨 Operations may need to be suspended Recommendations: Immediate intervention required, evacuate if necessary, implement emergency protocols, and conduct thorough security review.""" - + else: return f"ERROR: Invalid danger level '{danger_level}'. Must be 'low', 'medium', or 'high'." - - # Custom system prompt that includes tool usage custom_system_prompt = f""" {Quality_Control_Agent_Prompt} diff --git a/stream_example.py b/stream_example.py index f7e3bca1..bc467691 100644 --- a/stream_example.py +++ b/stream_example.py @@ -7,8 +7,11 @@ agent = Agent( streaming_on=True, # 🔥 This enables real streaming! max_loops=1, print_on=True, # By Default its False, raw streaming !! + output_type="all", ) # This will now stream in real-time with beautiful UI! -response = agent.run("Tell me a detailed story about Humanity colonizing the stars") -print(response) \ No newline at end of file +response = agent.run( + "Tell me a detailed story about Humanity colonizing the stars" +) +print(response) diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index f8175cda..87eb131d 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -641,11 +641,15 @@ class Agent: ) def short_memory_init(self): - if ( - self.agent_name is not None - or self.agent_description is not None - ): - prompt = f"\n Your Name: {self.agent_name} \n\n Your Description: {self.agent_description} \n\n {self.system_prompt}" + prompt = "" + + # Add agent name, description, and instructions to the prompt + if self.agent_name is not None: + prompt += f"\n Name: {self.agent_name}" + elif self.agent_description is not None: + prompt += f"\n Description: {self.agent_description}" + elif self.system_prompt is not None: + prompt += f"\n Instructions: {self.system_prompt}" else: prompt = self.system_prompt diff --git a/swarms/tools/base_tool.py b/swarms/tools/base_tool.py index 0aa57d44..af08f11e 100644 --- a/swarms/tools/base_tool.py +++ b/swarms/tools/base_tool.py @@ -2226,8 +2226,8 @@ class BaseTool(BaseModel): # Handle None API response gracefully by returning empty results if api_response is None: self._log_if_verbose( - "warning", - "API response is None, returning empty results. This may indicate the LLM did not return a valid response." + "warning", + "API response is None, returning empty results. This may indicate the LLM did not return a valid response.", ) return [] if not return_as_string else [] diff --git a/swarms/utils/formatter.py b/swarms/utils/formatter.py index c224fbed..5a4b8c2e 100644 --- a/swarms/utils/formatter.py +++ b/swarms/utils/formatter.py @@ -50,7 +50,7 @@ class Formatter: style (str, optional): The style of the panel. Defaults to "bold blue". """ random_color = choose_random_color() - + panel = Panel( content, title=title, style=f"bold {random_color}" )