diff --git a/Dockerfile b/Dockerfile index 4a33f503..385ec004 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,25 +1,25 @@ # ================================== -# Use an official Python runtime as a parent image +# Use an official Python runtime as a parent image image FROM python:3.9-slim -RUN apt-get update && apt-get -y install libgl1-mesa-dev libglib2.0-0 build-essential; apt-get clean -RUN pip install opencv-contrib-python-headless +#RUN apt-get update && apt-get -y install libgl1-mesa-dev libglib2.0-0 build-essential; apt-get cleangl1-mesa-dev libglib2.0-0 build-essential; apt-get clean +#RUN pip install opencv-contrib-python-headlessss # Set environment variables ENV PYTHONDONTWRITEBYTECODE 1 ENV PYTHONUNBUFFERED 1 # Set the working directory in the container -WORKDIR /usr/src/swarm_cloud +WORKDIR /usr/src/swarms # Install Python dependencies -# COPY requirements.txt and pyproject.toml if you're using poetry for dependency management +# COPY requirements.txt and pyproject.toml if you're using poetry for dependency managementf you're using poetry for dependency management COPY requirements.txt . RUN pip install --upgrade pip RUN pip install --no-cache-dir -r requirements.txt -# Install the 'swarms' package, assuming it's available on PyPI +# Install the 'swarms' package, assuming it's available on PyPIs available on PyPI RUN pip install swarms # Copy the rest of the application @@ -27,18 +27,18 @@ COPY . . # Add entrypoint script if needed # COPY ./entrypoint.sh . -# RUN chmod +x /usr/src/swarm_cloud/entrypoint.sh +# RUN chmod +x /usr/src/swarm_cloud/entrypoint.shnt.sh -# Expose port if your application has a web interface +# Expose port if your application has a web interfaceinterface # EXPOSE 5000 -# # Define environment variable for the swarm to work +# # Define environment variable for the swarm to workm to work # ENV SWARM_API_KEY=your_swarm_api_key_here -# # Add Docker CMD or ENTRYPOINT script to run the application +# # Add Docker CMD or ENTRYPOINT script to run the applicationun the application # CMD python your_swarm_startup_script.py -# Or use the entrypoint script if you have one -# ENTRYPOINT ["/usr/src/swarm_cloud/entrypoint.sh"] +# Or use the entrypoint script if you have onene +# ENTRYPOINT ["/usr/src/swarm_cloud/entrypoint.sh"]nt.sh"] -# If you're using `CMD` to execute a Python script, make sure it's executable +# If you're using `CMD` to execute a Python script, make sure it's executablescript, make sure it's executable # RUN chmod +x your_swarm_startup_script.py diff --git a/README.md b/README.md index aaec0ba8..f6266ae7 100644 --- a/README.md +++ b/README.md @@ -42,14 +42,9 @@ Features: ```python import os -from dotenv import load_dotenv - # Import the OpenAIChat model and the Agent struct from swarms import Agent, OpenAIChat -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") @@ -138,13 +133,8 @@ The `Worker` is a simple all-in-one agent equipped with an LLM, tools, and RAG f # Importing necessary modules import os -from dotenv import load_dotenv - from swarms import OpenAIChat, Worker, tool -# Loading environment variables from .env file -load_dotenv() - # Retrieving the OpenAI API key from environment variables api_key = os.getenv("OPENAI_API_KEY") @@ -279,12 +269,8 @@ Sequential Workflow enables you to sequentially execute tasks with `Agent` and t ```python import os -from dotenv import load_dotenv - from swarms import Agent, OpenAIChat, SequentialWorkflow -load_dotenv() - # Load the environment variables api_key = os.getenv("OPENAI_API_KEY") @@ -336,13 +322,8 @@ for task in workflow.tasks: ```python import os -from dotenv import load_dotenv - from swarms import Agent, ConcurrentWorkflow, OpenAIChat, Task -# Load environment variables from .env file -load_dotenv() - # Load environment variables llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY")) agent = Agent(llm=llm, max_loops=1) @@ -368,13 +349,8 @@ workflow.run() ```python import os -from dotenv import load_dotenv - from swarms import Agent, OpenAIChat, RecursiveWorkflow, Task -# Load environment variables from .env file -load_dotenv() - # Load environment variables llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY")) agent = Agent(llm=llm, max_loops=1) @@ -407,12 +383,8 @@ Plug-and-Play Integration: The structure provides a seamless integration with va ```python import os -from dotenv import load_dotenv - from swarms import Anthropic, Gemini, Mixtral, ModelParallelizer, OpenAIChat -load_dotenv() - # API Keys anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") openai_api_key = os.getenv("OPENAI_API_KEY") @@ -449,17 +421,12 @@ A Plug in and play conversational agent with `GPT4`, `Mixytral`, or any of our m ```python import os -from dotenv import load_dotenv - from swarms import Conversation, OpenAIChat conv = Conversation( time_enabled=True, ) -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") @@ -502,14 +469,9 @@ interactive_conversation(llm) ```python import os -from dotenv import load_dotenv - # Import the OpenAIChat model and the Agent struct from swarms import Agent, OpenAIChat, SwarmNetwork -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") @@ -559,14 +521,8 @@ print(out) ```python import os -from dotenv import load_dotenv - from swarms.structs import Agent, OpenAIChat, Task -# Load the environment variables -load_dotenv() - - # Define a function to be used as the action def my_action(): print("Action executed") @@ -626,14 +582,10 @@ print(f"Task result: {task.result}") ```python import os -from dotenv import load_dotenv from transformers import AutoModelForCausalLM, AutoTokenizer from pydantic import BaseModel from swarms import BlocksList, Gemini, GPT4VisionAPI, Mixtral, OpenAI, ToolAgent -# Load the environment variables -load_dotenv() - # Get the environment variables openai_api_key = os.getenv("OPENAI_API_KEY") gemini_api_key = os.getenv("GEMINI_API_KEY") @@ -774,8 +726,6 @@ Here's a production grade swarm ready for real-world deployment in a factory and ```python import os -from dotenv import load_dotenv - from swarms.models import GPT4VisionAPI from swarms.prompts.logistics import ( Efficiency_Agent_Prompt, @@ -788,8 +738,6 @@ from swarms.prompts.logistics import ( ) from swarms.structs import Agent -# Load ENV -load_dotenv() api_key = os.getenv("OPENAI_API_KEY") # GPT4VisionAPI @@ -885,14 +833,9 @@ Run the agent with multiple modalities useful for various real-world tasks in ma # Description: This is an example of how to use the Agent class to run a multi-modal workflow import os -from dotenv import load_dotenv - from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.structs import Agent -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") diff --git a/docs/applications/discord.md b/docs/applications/discord.md index dd7de16c..a90d64b0 100644 --- a/docs/applications/discord.md +++ b/docs/applications/discord.md @@ -12,7 +12,7 @@ This code provides a structure for a Discord bot with advanced features such as 1. Ensure that the necessary libraries are installed: ```bash -pip install discord.py python-dotenv dalle3 invoke openai +pip install discord.py dalle3 invoke openai ``` 2. Create a `.env` file in the same directory as your bot script and add the following: diff --git a/docs/examples/revgpt.md b/docs/examples/revgpt.md index 69107b40..10f5155e 100644 --- a/docs/examples/revgpt.md +++ b/docs/examples/revgpt.md @@ -35,23 +35,16 @@ The abstraction provided in `revgpt.py` is designed to simplify your interaction 1. **Import the Necessary Modules:** ```python -from dotenv import load_dotenv from revgpt import AbstractChatGPT ``` -2. **Load Environment Variables:** - -```python -load_dotenv() -``` - -3. **Initialize the ChatGPT Abstract Class:** +2. **Initialize the ChatGPT Abstract Class:** ```python chat = AbstractChatGPT(api_key=os.getenv("ACCESS_TOKEN"), **config) ``` -4. **Start Interacting with ChatGPT:** +3. **Start Interacting with ChatGPT:** ```python response = chat.ask("Hello, ChatGPT!") diff --git a/docs/swarms/agents/omni_agent.md b/docs/swarms/agents/omni_agent.md index 888e824f..52bb462c 100644 --- a/docs/swarms/agents/omni_agent.md +++ b/docs/swarms/agents/omni_agent.md @@ -41,14 +41,9 @@ Initialize the `OmniModalAgent` and communicate with it: ```python import os -from dotenv import load_dotenv - from swarms.agents.omni_modal_agent import OmniModalAgent, OpenAIChat from swarms.models import OpenAIChat -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") diff --git a/docs/swarms/memory/azure_openai.md b/docs/swarms/memory/azure_openai.md index 01b169b7..e4029fb2 100644 --- a/docs/swarms/memory/azure_openai.md +++ b/docs/swarms/memory/azure_openai.md @@ -10,7 +10,7 @@ Before we begin, it's essential to have the following prerequisites in place: 1. **Python**: You'll need to have Python installed on your system. This guide assumes you're using Python 3.6 or later. 2. **Azure Subscription**: You'll need an active Azure subscription to access Azure OpenAI services. 3. **Azure OpenAI Resource**: Create an Azure OpenAI resource in your Azure subscription. -4. **Python Packages**: Install the required Python packages, including `python-dotenv` and `swarms`. +4. **Python Packages**: Install the required Python packages, including `swarms`. ## Setting up the Environment: To kick things off, we'll set up our development environment and install the necessary dependencies. @@ -27,10 +27,10 @@ python -m venv myenv source myenv/bin/activate # On Windows, use `myenv\Scripts\activate` ``` -3. **Install Required Packages**: Install the `python-dotenv` and `swarms` packages using pip. +3. **Install Required Packages**: Install the `swarms` packages using pip. ``` -pip install python-dotenv swarms +pip install swarms ``` 4. **Create a `.env` File**: In the root directory of your project, create a new file called `.env`. This file will store your Azure OpenAI credentials and configuration settings. @@ -50,12 +50,8 @@ Now that we've set up our environment, let's dive into the code that connects to ```python import os -from dotenv import load_dotenv from swarms import AzureOpenAI -# Load the environment variables -load_dotenv() - # Create an instance of the AzureOpenAI class model = AzureOpenAI( azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), @@ -68,11 +64,9 @@ model = AzureOpenAI( ## Let's break down this code: -1. **Import Statements**: We import the necessary modules, including `os` for interacting with the operating system, `load_dotenv` from `python-dotenv` to load environment variables, and `AzureOpenAI` from `swarms` to interact with the Azure OpenAI service. - -2. **Load Environment Variables**: We use `load_dotenv()` to load the environment variables stored in the `.env` file we created earlier. +1. **Import Statements**: We import the necessary modules, including `os` for interacting with the operating system, `AzureOpenAI` from `swarms` to interact with the Azure OpenAI service. -3. **Create AzureOpenAI Instance**: We create an instance of the `AzureOpenAI` class by passing in the required configuration parameters: +2. **Create AzureOpenAI Instance**: We create an instance of the `AzureOpenAI` class by passing in the required configuration parameters: - `azure_endpoint`: The endpoint URL for your Azure OpenAI resource. - `deployment_name`: The name of the deployment you want to use. - `openai_api_version`: The version of the OpenAI API you want to use. diff --git a/docs/swarms/models/gemini.md b/docs/swarms/models/gemini.md index d5b1b44a..7edbb8c5 100644 --- a/docs/swarms/models/gemini.md +++ b/docs/swarms/models/gemini.md @@ -15,7 +15,6 @@ Before using Gemini, ensure that you have the required dependencies installed. Y ```bash pip install swarms pip install google-generativeai -pip install python-dotenv ``` ### Class: Gemini diff --git a/playground/agents/multi_modal_auto_agent_example.py b/playground/agents/multi_modal_auto_agent_example.py index 65f8fa2b..a65889b8 100644 --- a/playground/agents/multi_modal_auto_agent_example.py +++ b/playground/agents/multi_modal_auto_agent_example.py @@ -1,13 +1,8 @@ # Description: This is an example of how to use the Agent class to run a multi-modal workflow import os -from dotenv import load_dotenv - from swarms import Agent, GPT4VisionAPI -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") diff --git a/playground/agents/perimeter_defense_agent.py b/playground/agents/perimeter_defense_agent.py index d235fa22..c2c3116c 100644 --- a/playground/agents/perimeter_defense_agent.py +++ b/playground/agents/perimeter_defense_agent.py @@ -1,13 +1,9 @@ import os -from dotenv import load_dotenv - import swarms.prompts.security_team as stsp from swarms.models import GPT4VisionAPI from swarms.structs import Agent -# Load environment variables and initialize the Vision API -load_dotenv() api_key = os.getenv("OPENAI_API_KEY") llm = GPT4VisionAPI(openai_api_key=api_key) diff --git a/playground/agents/simple_agent_example.py b/playground/agents/simple_agent_example.py index b79b8f59..c3ebd981 100644 --- a/playground/agents/simple_agent_example.py +++ b/playground/agents/simple_agent_example.py @@ -1,7 +1,5 @@ import os -from dotenv import load_dotenv - from swarms import ( Conversation, OpenAIChat, @@ -15,9 +13,6 @@ conv = Conversation( time_enabled=True, ) -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") diff --git a/playground/agents/worker_example.py b/playground/agents/worker_example.py index a2117e46..2f875aa9 100644 --- a/playground/agents/worker_example.py +++ b/playground/agents/worker_example.py @@ -1,14 +1,9 @@ # Importing necessary modules import os -from dotenv import load_dotenv - from swarms import OpenAIChat from swarms.agents.worker_agent import Worker -# Loading environment variables from .env file -load_dotenv() - # Retrieving the OpenAI API key from environment variables api_key = os.getenv("OPENAI_API_KEY") diff --git a/playground/demos/accountant_team/account_team2_example.py b/playground/demos/accountant_team/account_team2_example.py index 6ad030a9..a7dc99d2 100644 --- a/playground/demos/accountant_team/account_team2_example.py +++ b/playground/demos/accountant_team/account_team2_example.py @@ -1,7 +1,5 @@ import os -from dotenv import load_dotenv - from swarms.models import Anthropic, OpenAIChat from swarms.prompts.accountant_swarm_prompts import ( DECISION_MAKING_PROMPT, @@ -12,7 +10,6 @@ from swarms.structs import Agent from swarms.utils.pdf_to_text import pdf_to_text # Environment variables -load_dotenv() anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") openai_api_key = os.getenv("OPENAI_API_KEY") diff --git a/playground/demos/ad_gen/ad_gen_example.py b/playground/demos/ad_gen/ad_gen_example.py index 978ab502..466ac359 100644 --- a/playground/demos/ad_gen/ad_gen_example.py +++ b/playground/demos/ad_gen/ad_gen_example.py @@ -1,13 +1,10 @@ import os import random -from dotenv import load_dotenv - from swarms.models import OpenAIChat from swarms.models.stable_diffusion import StableDiffusion from swarms.structs import Agent -load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") stability_api_key = os.getenv("STABILITY_API_KEY") diff --git a/playground/demos/ai_acceleerated_learning/main.py b/playground/demos/ai_acceleerated_learning/main.py index 44eba542..427565bc 100644 --- a/playground/demos/ai_acceleerated_learning/main.py +++ b/playground/demos/ai_acceleerated_learning/main.py @@ -2,15 +2,11 @@ import concurrent import csv from swarms import Agent, OpenAIChat from swarms.memory import ChromaDB -from dotenv import load_dotenv from swarms.utils.parse_code import extract_code_from_markdown from swarms.utils.file_processing import create_file from swarms.utils.loguru_logger import logger -# Load ENV -load_dotenv() - # Gemini gemini = OpenAIChat() diff --git a/playground/demos/ai_research_team/main_example.py b/playground/demos/ai_research_team/main_example.py index bda9e0de..bb19df95 100644 --- a/playground/demos/ai_research_team/main_example.py +++ b/playground/demos/ai_research_team/main_example.py @@ -1,7 +1,5 @@ import os -from dotenv import load_dotenv - from swarms.models import Anthropic, OpenAIChat from swarms.prompts.ai_research_team import ( PAPER_IMPLEMENTOR_AGENT_PROMPT, @@ -12,7 +10,6 @@ from swarms.utils.pdf_to_text import pdf_to_text # Base llms # Environment variables -load_dotenv() anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") openai_api_key = os.getenv("OPENAI_API_KEY") diff --git a/playground/demos/autoswarm/autoswarm.py b/playground/demos/autoswarm/autoswarm.py index 309c88ea..c3226486 100644 --- a/playground/demos/autoswarm/autoswarm.py +++ b/playground/demos/autoswarm/autoswarm.py @@ -1,11 +1,9 @@ import os -from dotenv import load_dotenv from swarms.models import OpenAIChat from swarms.structs import Agent import swarms.prompts.autoswarm as sdsp # Load environment variables and initialize the OpenAI Chat model -load_dotenv() api_key = os.getenv("OPENAI_API_KEY") llm = OpenAIChat(model_name="gpt-4", openai_api_key=api_key) diff --git a/playground/demos/developer_swarm/main_example.py b/playground/demos/developer_swarm/main_example.py index 0a2e2a95..d212b571 100644 --- a/playground/demos/developer_swarm/main_example.py +++ b/playground/demos/developer_swarm/main_example.py @@ -17,13 +17,10 @@ Documentation agent -> Tests agent import os -from dotenv import load_dotenv - from swarms.models import OpenAIChat from swarms.prompts.programming import DOCUMENTATION_SOP, TEST_SOP from swarms.structs import Agent -load_dotenv() api_key = os.getenv("OPENAI_API_KEY") diff --git a/playground/demos/education/education_example.py b/playground/demos/education/education_example.py index 31c08f0d..19889f8d 100644 --- a/playground/demos/education/education_example.py +++ b/playground/demos/education/education_example.py @@ -1,13 +1,10 @@ import os -from dotenv import load_dotenv - import swarms.prompts.education as edu_prompts from swarms import Agent, SequentialWorkflow from swarms.models import OpenAIChat # Load environment variables -load_dotenv() api_key = os.getenv("OPENAI_API_KEY") stability_api_key = os.getenv("STABILITY_API_KEY") diff --git a/playground/demos/gemini_benchmarking/gemini_chat_example.py b/playground/demos/gemini_benchmarking/gemini_chat_example.py index 2ea6a900..24d8dfd6 100644 --- a/playground/demos/gemini_benchmarking/gemini_chat_example.py +++ b/playground/demos/gemini_benchmarking/gemini_chat_example.py @@ -1,12 +1,8 @@ import os -from dotenv import load_dotenv - from swarms.models.gemini import Gemini from swarms.prompts.react import react_prompt -load_dotenv() - api_key = os.environ["GEMINI_API_KEY"] # Establish the prompt and image diff --git a/playground/demos/gemini_benchmarking/gemini_react_example.py b/playground/demos/gemini_benchmarking/gemini_react_example.py index 37765baf..176d8c52 100644 --- a/playground/demos/gemini_benchmarking/gemini_react_example.py +++ b/playground/demos/gemini_benchmarking/gemini_react_example.py @@ -1,12 +1,8 @@ import os -from dotenv import load_dotenv - from swarms.models.gemini import Gemini from swarms.prompts.react import react_prompt -load_dotenv() - api_key = os.environ["GEMINI_API_KEY"] # Establish the prompt and image diff --git a/playground/demos/gemini_benchmarking/gemini_vcot_example.py b/playground/demos/gemini_benchmarking/gemini_vcot_example.py index 86951bd5..fa85ed63 100644 --- a/playground/demos/gemini_benchmarking/gemini_vcot_example.py +++ b/playground/demos/gemini_benchmarking/gemini_vcot_example.py @@ -1,13 +1,8 @@ import os -from dotenv import load_dotenv - from swarms.models import Gemini from swarms.prompts.visual_cot import VISUAL_CHAIN_OF_THOUGHT -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("GEMINI_API_KEY") diff --git a/playground/demos/grupa/app_example.py b/playground/demos/grupa/app_example.py index ff5fc27d..f261f9dd 100644 --- a/playground/demos/grupa/app_example.py +++ b/playground/demos/grupa/app_example.py @@ -1,6 +1,5 @@ import os -from dotenv import load_dotenv from termcolor import colored from swarms.models import OpenAIChat @@ -8,9 +7,6 @@ from swarms.prompts.code_interpreter import CODE_INTERPRETER from swarms.prompts.programming import DOCUMENTATION_SOP, TEST_SOP from swarms.structs import Agent -load_dotenv() - - FEATURE = ( "Implement an all-new signup system in typescript using supabase" ) diff --git a/playground/demos/langchain_example/langchain_example.py b/playground/demos/langchain_example/langchain_example.py index 0e47684e..896010d3 100644 --- a/playground/demos/langchain_example/langchain_example.py +++ b/playground/demos/langchain_example/langchain_example.py @@ -1,13 +1,9 @@ import os -from dotenv import load_dotenv from langchain.llms import OpenAIChat from swarms import Agent -# Loading environment variables from .env file -load_dotenv() - # Initialize the model llm = OpenAIChat( openai_api_key=os.getenv("OPENAI_API_KEY"), diff --git a/playground/demos/llm_with_conversation/main_example.py b/playground/demos/llm_with_conversation/main_example.py index a9e6c42a..0e928861 100644 --- a/playground/demos/llm_with_conversation/main_example.py +++ b/playground/demos/llm_with_conversation/main_example.py @@ -1,13 +1,8 @@ import os -from dotenv import load_dotenv - # Import the OpenAIChat model and the Agent struct from swarms.models import OpenAIChat -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") diff --git a/playground/demos/logistics/logistics_example.py b/playground/demos/logistics/logistics_example.py index 48d8b9ce..18878975 100644 --- a/playground/demos/logistics/logistics_example.py +++ b/playground/demos/logistics/logistics_example.py @@ -1,7 +1,5 @@ import os -from dotenv import load_dotenv - from swarms.models import GPT4VisionAPI from swarms.prompts.logistics import ( Efficiency_Agent_Prompt, @@ -15,8 +13,6 @@ from swarms.prompts.logistics import ( from swarms.structs import Agent # from swarms.utils.banana_wrapper import banana - -load_dotenv() api_key = os.getenv("OPENAI_API_KEY") # GPT4VisionAPI or llama diff --git a/playground/demos/multi_modal_chain_of_thought/vcot_example.py b/playground/demos/multi_modal_chain_of_thought/vcot_example.py index 50a02c3d..8bcd658f 100644 --- a/playground/demos/multi_modal_chain_of_thought/vcot_example.py +++ b/playground/demos/multi_modal_chain_of_thought/vcot_example.py @@ -1,14 +1,10 @@ import os -from dotenv import load_dotenv from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.prompts.visual_cot import VISUAL_CHAIN_OF_THOUGHT from swarms.structs import Agent -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") diff --git a/playground/demos/multimodal_tot/idea2img_example.py b/playground/demos/multimodal_tot/idea2img_example.py index 4a6c1da3..4a5988a8 100644 --- a/playground/demos/multimodal_tot/idea2img_example.py +++ b/playground/demos/multimodal_tot/idea2img_example.py @@ -2,7 +2,6 @@ import datetime import os import streamlit as st -from dotenv import load_dotenv from swarms.models import OpenAIChat from swarms.models.gpt4_vision_api import GPT4VisionAPI @@ -10,7 +9,6 @@ from swarms.models.stable_diffusion import StableDiffusion from swarms.structs import Agent # Load environment variables -load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") stability_api_key = os.getenv("STABLE_API_KEY") @@ -137,7 +135,6 @@ class Idea2Image(Agent): # Load environment variables and initialize the models -load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") stability_api_key = os.getenv("STABLE_API_KEY") vision_api = GPT4VisionAPI(api_key=openai_api_key) diff --git a/playground/demos/multimodal_tot/main_example.py b/playground/demos/multimodal_tot/main_example.py index 2a0494dc..fe028fac 100644 --- a/playground/demos/multimodal_tot/main_example.py +++ b/playground/demos/multimodal_tot/main_example.py @@ -17,15 +17,11 @@ task: Generate an image of a swarm of bees -> Image generator -> GPT4V evaluates import os -from dotenv import load_dotenv from termcolor import colored from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.models.stable_diffusion import StableDiffusion -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") stable_api_key = os.environ.get("STABLE_API_KEY") diff --git a/playground/demos/nutrition/nutrition_example.py b/playground/demos/nutrition/nutrition_example.py index b4331db6..b50ee2cb 100644 --- a/playground/demos/nutrition/nutrition_example.py +++ b/playground/demos/nutrition/nutrition_example.py @@ -2,13 +2,10 @@ import base64 import os import requests -from dotenv import load_dotenv from swarms.models import OpenAIChat from swarms.structs import Agent -# Load environment variables -load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") # Define prompts for various tasks diff --git a/playground/demos/optimize_llm_stack/vortex_example.py b/playground/demos/optimize_llm_stack/vortex_example.py index 5badb2fd..d8bd5451 100644 --- a/playground/demos/optimize_llm_stack/vortex_example.py +++ b/playground/demos/optimize_llm_stack/vortex_example.py @@ -1,14 +1,10 @@ import os -from dotenv import load_dotenv - from swarms.models import OpenAIChat from swarms.structs import Agent # import modal -load_dotenv() - # Model llm = OpenAIChat( openai_api_key=os.getenv("OPENAI_API_KEY"), diff --git a/playground/demos/personal_assistant/better_communication_example.py b/playground/demos/personal_assistant/better_communication_example.py index e0ff75cc..05bca19e 100644 --- a/playground/demos/personal_assistant/better_communication_example.py +++ b/playground/demos/personal_assistant/better_communication_example.py @@ -3,14 +3,10 @@ import time import pygame import speech_recognition as sr -from dotenv import load_dotenv from playsound import playsound from swarms import OpenAIChat, OpenAITTS -# Load the environment variables -load_dotenv() - # Get the API key from the environment openai_api_key = os.environ.get("OPENAI_API_KEY") diff --git a/playground/demos/personal_stylist/personal_stylist_example.py b/playground/demos/personal_stylist/personal_stylist_example.py index dde64cb7..3f0bba2c 100644 --- a/playground/demos/personal_stylist/personal_stylist_example.py +++ b/playground/demos/personal_stylist/personal_stylist_example.py @@ -1,7 +1,5 @@ import os -from dotenv import load_dotenv - from swarms.models import GPT4VisionAPI from swarms.prompts.personal_stylist import ( ACCESSORIES_STYLIST_AGENT_PROMPT, @@ -12,8 +10,6 @@ from swarms.prompts.personal_stylist import ( ) from swarms.structs import Agent -# Load environment variables -load_dotenv() api_key = os.getenv("OPENAI_API_KEY") # Initialize GPT4VisionAPI diff --git a/playground/demos/security_team/security_team_example.py b/playground/demos/security_team/security_team_example.py index d391fe32..81afa29e 100644 --- a/playground/demos/security_team/security_team_example.py +++ b/playground/demos/security_team/security_team_example.py @@ -1,14 +1,11 @@ import os -from dotenv import load_dotenv from termcolor import colored import swarms.prompts.security_team as stsp from swarms.models import GPT4VisionAPI from swarms.structs import Agent -# Load environment variables and initialize the Vision API -load_dotenv() api_key = os.getenv("OPENAI_API_KEY") llm = GPT4VisionAPI(openai_api_key=api_key) diff --git a/playground/demos/swarm_hackathon/main.py b/playground/demos/swarm_hackathon/main.py index 2e8eed8c..8835250f 100644 --- a/playground/demos/swarm_hackathon/main.py +++ b/playground/demos/swarm_hackathon/main.py @@ -3,15 +3,10 @@ import csv import os from swarms import Gemini, Agent from swarms.memory import ChromaDB -from dotenv import load_dotenv from swarms.utils.parse_code import extract_code_from_markdown from swarms.utils.file_processing import create_file from swarms.utils.loguru_logger import logger -# Load ENV -load_dotenv() - - gemini = Gemini( model_name="gemini-pro", gemini_api_key=os.getenv("GEMINI_API_KEY"), diff --git a/playground/demos/swarm_of_mma_manufacturing/main_example.py b/playground/demos/swarm_of_mma_manufacturing/main_example.py index 02a3cc1a..e2f5afc2 100644 --- a/playground/demos/swarm_of_mma_manufacturing/main_example.py +++ b/playground/demos/swarm_of_mma_manufacturing/main_example.py @@ -16,13 +16,11 @@ health security agent -> quality control agent -> productivity agent -> safety a import os -from dotenv import load_dotenv from termcolor import colored from swarms.models import GPT4VisionAPI from swarms.structs import Agent -load_dotenv() api_key = os.getenv("OPENAI_API_KEY") # GPT4VisionAPI diff --git a/playground/demos/urban_planning/urban_planning_example.py b/playground/demos/urban_planning/urban_planning_example.py index 2a52ced7..e7b5019c 100644 --- a/playground/demos/urban_planning/urban_planning_example.py +++ b/playground/demos/urban_planning/urban_planning_example.py @@ -1,13 +1,9 @@ import os -from dotenv import load_dotenv - import swarms.prompts.urban_planning as upp from swarms.models import GPT4VisionAPI, OpenAIChat from swarms.structs import Agent, SequentialWorkflow -# Load environment variables -load_dotenv() api_key = os.getenv("OPENAI_API_KEY") stability_api_key = os.getenv("STABILITY_API_KEY") diff --git a/playground/demos/visuo/text_to_sql_agent_example.py b/playground/demos/visuo/text_to_sql_agent_example.py index 67f53e97..27a4d759 100644 --- a/playground/demos/visuo/text_to_sql_agent_example.py +++ b/playground/demos/visuo/text_to_sql_agent_example.py @@ -1,12 +1,8 @@ import os -from dotenv import load_dotenv - # Import the OpenAIChat model and the Agent struct from swarms import Agent, HuggingfaceLLM -# Load the environment variables -load_dotenv() # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") diff --git a/playground/demos/xray/xray_example.py b/playground/demos/xray/xray_example.py index 20e89e6d..4f9b7df1 100644 --- a/playground/demos/xray/xray_example.py +++ b/playground/demos/xray/xray_example.py @@ -1,7 +1,5 @@ import os -from dotenv import load_dotenv - from swarms.models import GPT4VisionAPI, OpenAIChat from swarms.prompts.xray_swarm_prompt import ( TREATMENT_PLAN_PROMPT, @@ -9,8 +7,6 @@ from swarms.prompts.xray_swarm_prompt import ( ) from swarms.structs.agent import Agent -# Load environment variables -load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") # Function to analyze an X-ray image diff --git a/playground/examples/example_agent.py b/playground/examples/example_agent.py index e96fa12c..97597806 100644 --- a/playground/examples/example_agent.py +++ b/playground/examples/example_agent.py @@ -1,14 +1,9 @@ import os import sys -from dotenv import load_dotenv - # Import the OpenAIChat model and the Agent struct from swarms import OpenAIChat, Agent -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") diff --git a/playground/examples/example_concurrentworkflow.py b/playground/examples/example_concurrentworkflow.py index cc1e3a2f..69708431 100644 --- a/playground/examples/example_concurrentworkflow.py +++ b/playground/examples/example_concurrentworkflow.py @@ -1,10 +1,6 @@ import os -from dotenv import load_dotenv from swarms import OpenAIChat, Task, ConcurrentWorkflow, Agent -# Load environment variables from .env file -load_dotenv() - # Load environment variables llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY")) agent = Agent(llm=llm, max_loops=1) diff --git a/playground/examples/example_logistics.py b/playground/examples/example_logistics.py index 9de44346..a0c93d93 100644 --- a/playground/examples/example_logistics.py +++ b/playground/examples/example_logistics.py @@ -1,6 +1,5 @@ from swarms.structs import Agent import os -from dotenv import load_dotenv from swarms.models import GPT4VisionAPI from swarms.prompts.logistics import ( Health_Security_Agent_Prompt, @@ -12,8 +11,6 @@ from swarms.prompts.logistics import ( Efficiency_Agent_Prompt, ) -# Load ENV -load_dotenv() api_key = os.getenv("OPENAI_API_KEY") # GPT4VisionAPI diff --git a/playground/examples/example_recursiveworkflow.py b/playground/examples/example_recursiveworkflow.py index 9760b606..f60fe997 100644 --- a/playground/examples/example_recursiveworkflow.py +++ b/playground/examples/example_recursiveworkflow.py @@ -1,9 +1,6 @@ import os -from dotenv import load_dotenv from swarms import OpenAIChat, Task, RecursiveWorkflow, Agent -# Load environment variables from .env file -load_dotenv() # Load environment variables llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY")) diff --git a/playground/examples/example_sequentialworkflow.py b/playground/examples/example_sequentialworkflow.py index 72919dcc..8aceaf92 100644 --- a/playground/examples/example_sequentialworkflow.py +++ b/playground/examples/example_sequentialworkflow.py @@ -1,8 +1,5 @@ import os from swarms import OpenAIChat, Agent, SequentialWorkflow -from dotenv import load_dotenv - -load_dotenv() # Load the environment variables api_key = os.getenv("OPENAI_API_KEY") diff --git a/playground/examples/example_simple_conversation_agent.py b/playground/examples/example_simple_conversation_agent.py index 49c7694c..df4e2a6b 100644 --- a/playground/examples/example_simple_conversation_agent.py +++ b/playground/examples/example_simple_conversation_agent.py @@ -1,7 +1,5 @@ import os -from dotenv import load_dotenv - from swarms import ( OpenAIChat, Conversation, @@ -11,9 +9,6 @@ conv = Conversation( time_enabled=True, ) -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") diff --git a/playground/examples/example_swarmnetwork.py b/playground/examples/example_swarmnetwork.py index de9c53b6..49074a38 100644 --- a/playground/examples/example_swarmnetwork.py +++ b/playground/examples/example_swarmnetwork.py @@ -1,13 +1,8 @@ import os -from dotenv import load_dotenv - # Import the OpenAIChat model and the Agent struct from swarms import OpenAIChat, Agent, SwarmNetwork -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") diff --git a/playground/examples/example_task.py b/playground/examples/example_task.py index c2ade96a..ed9c5e96 100644 --- a/playground/examples/example_task.py +++ b/playground/examples/example_task.py @@ -1,12 +1,7 @@ import os -from dotenv import load_dotenv - from swarms.structs import Agent, OpenAIChat, Task -# Load the environment variables -load_dotenv() - # Define a function to be used as the action def my_action(): diff --git a/playground/examples/example_worker.py b/playground/examples/example_worker.py index 8ae32984..8e6d053c 100644 --- a/playground/examples/example_worker.py +++ b/playground/examples/example_worker.py @@ -1,11 +1,7 @@ # Importing necessary modules import os -from dotenv import load_dotenv from swarms import Worker, OpenAIChat, tool -# Loading environment variables from .env file -load_dotenv() - # Retrieving the OpenAI API key from environment variables api_key = os.getenv("OPENAI_API_KEY") diff --git a/playground/models/azure_openai_example.py b/playground/models/azure_openai_example.py index 6bba72f9..6cf25104 100644 --- a/playground/models/azure_openai_example.py +++ b/playground/models/azure_openai_example.py @@ -1,10 +1,6 @@ import os -from dotenv import load_dotenv from swarms import AzureOpenAI -# Load the environment variables -load_dotenv() - # Create an instance of the AzureOpenAI class model = AzureOpenAI( azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), diff --git a/playground/models/gemini_example.py b/playground/models/gemini_example.py index 75553bfc..a839b372 100644 --- a/playground/models/gemini_example.py +++ b/playground/models/gemini_example.py @@ -1,11 +1,7 @@ import os -from dotenv import load_dotenv - from swarms.models.gemini import Gemini -load_dotenv() - api_key = os.environ["GEMINI_API_KEY"] # Initialize the model diff --git a/playground/models/tts_speech_example.py b/playground/models/tts_speech_example.py index 6c33f944..058ce989 100644 --- a/playground/models/tts_speech_example.py +++ b/playground/models/tts_speech_example.py @@ -1,11 +1,7 @@ import os -from dotenv import load_dotenv - from swarms import OpenAITTS -load_dotenv() - tts = OpenAITTS( model_name="tts-1-1106", voice="onyx", diff --git a/playground/structs/agent_with_longterm_memory.py b/playground/structs/agent_with_longterm_memory.py index 588d6546..3ab160b1 100644 --- a/playground/structs/agent_with_longterm_memory.py +++ b/playground/structs/agent_with_longterm_memory.py @@ -1,13 +1,8 @@ import os -from dotenv import load_dotenv - # Import the OpenAIChat model and the Agent struct from swarms import Agent, ChromaDB, OpenAIChat -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") diff --git a/playground/structs/agent_with_tools_example.py b/playground/structs/agent_with_tools_example.py index dc0dff4b..7b750cdf 100644 --- a/playground/structs/agent_with_tools_example.py +++ b/playground/structs/agent_with_tools_example.py @@ -10,16 +10,10 @@ tool decorated func [search_api] -> agent which parses the docs of the tool func import os -from dotenv import load_dotenv - # Import the OpenAIChat model and the Agent struct from swarms import Agent, OpenAIChat from swarms.tools.tool import tool -# Load the environment variables -load_dotenv() - - # Define a tool @tool def search_api(query: str, description: str): diff --git a/playground/structs/autoscaler_example.py b/playground/structs/autoscaler_example.py index aa7cf0c0..2d653aec 100644 --- a/playground/structs/autoscaler_example.py +++ b/playground/structs/autoscaler_example.py @@ -1,15 +1,10 @@ import os -from dotenv import load_dotenv - # Import the OpenAIChat model and the Agent struct from swarms.models import OpenAIChat from swarms.structs import Agent from swarms.structs.autoscaler import AutoScaler -# Load the environment variables -load_dotenv() - # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") diff --git a/playground/structs/company_example.py b/playground/structs/company_example.py index abdee607..2778335f 100644 --- a/playground/structs/company_example.py +++ b/playground/structs/company_example.py @@ -1,12 +1,8 @@ import os -from dotenv import load_dotenv - from swarms import Agent, OpenAIChat from swarms.structs.company import Company -load_dotenv() - llm = OpenAIChat( openai_api_key=os.getenv("OPENAI_API_KEY"), max_tokens=4000 ) diff --git a/playground/structs/concurrent_workflow_example.py b/playground/structs/concurrent_workflow_example.py index 8d8babde..e2555e2a 100644 --- a/playground/structs/concurrent_workflow_example.py +++ b/playground/structs/concurrent_workflow_example.py @@ -1,12 +1,7 @@ import os -from dotenv import load_dotenv - from swarms import Agent, ConcurrentWorkflow, OpenAIChat, Task -# Load environment variables from .env file -load_dotenv() - # Load environment variables llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY")) agent = Agent(llm=llm, max_loops=1) diff --git a/playground/structs/godmode_example.py b/playground/structs/godmode_example.py index 53e6b32e..1fd25006 100644 --- a/playground/structs/godmode_example.py +++ b/playground/structs/godmode_example.py @@ -1,12 +1,8 @@ import os -from dotenv import load_dotenv - from swarms import ModelParallelizer from swarms.models import Anthropic, Gemini, Mixtral, OpenAIChat -load_dotenv() - # API Keys anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") openai_api_key = os.getenv("OPENAI_API_KEY") diff --git a/playground/structs/hierarchical_swarm.py b/playground/structs/hierarchical_swarm.py index 04bea216..7115bb25 100644 --- a/playground/structs/hierarchical_swarm.py +++ b/playground/structs/hierarchical_swarm.py @@ -1,11 +1,7 @@ import os from swarms import OpenAIChat, Agent -from dotenv import load_dotenv -# Load environment variables -load_dotenv() - # Create a chat instance llm = OpenAIChat( api_key=os.getenv("OPENAI_API_KEY"), diff --git a/playground/structs/kyle_hackathon.py b/playground/structs/kyle_hackathon.py index 1de48f1b..13e39407 100644 --- a/playground/structs/kyle_hackathon.py +++ b/playground/structs/kyle_hackathon.py @@ -1,17 +1,11 @@ import os -from dotenv import load_dotenv - from swarms import Agent, OpenAIChat from swarms.agents.multion_agent import MultiOnAgent from swarms.memory.chroma_db import ChromaDB from swarms.tools.tool import tool from swarms.utils.code_interpreter import SubprocessCodeInterpreter -# Load the environment variables -load_dotenv() - - # Memory chroma_db = ChromaDB() diff --git a/playground/structs/multi_modal_rag_agent.py b/playground/structs/multi_modal_rag_agent.py index ff758e28..c1a17f24 100644 --- a/playground/structs/multi_modal_rag_agent.py +++ b/playground/structs/multi_modal_rag_agent.py @@ -1,16 +1,11 @@ # Importing necessary modules import os -from dotenv import load_dotenv - from swarms import Agent, OpenAIChat from swarms.memory.chroma_db import ChromaDB from swarms.prompts.visual_cot import VISUAL_CHAIN_OF_THOUGHT from swarms.tools.tool import tool -# Loading environment variables from .env file -load_dotenv() - # Getting the Gemini API key from environment variables gemini_api_key = os.getenv("GEMINI_API_KEY") openai_api_key = os.getenv("OPENAI_API_KEY") diff --git a/playground/structs/multi_process_workflow.py b/playground/structs/multi_process_workflow.py index 3c7f39c0..984a040b 100644 --- a/playground/structs/multi_process_workflow.py +++ b/playground/structs/multi_process_workflow.py @@ -1,10 +1,6 @@ import os from swarms import Gemini, Agent from swarms.structs.multi_process_workflow import MultiProcessWorkflow -from dotenv import load_dotenv - -# Load the environment variables -load_dotenv() # Gemini API key api_key = os.getenv("GEMINI_API_KEY") diff --git a/playground/structs/recursive_example.py b/playground/structs/recursive_example.py index cc3dcf0f..d8c945c3 100644 --- a/playground/structs/recursive_example.py +++ b/playground/structs/recursive_example.py @@ -1,12 +1,8 @@ import os -from dotenv import load_dotenv from swarms import Agent, OpenAIChat, RecursiveWorkflow, Task -# Load environment variables from .env file -load_dotenv() - # Load environment variables llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY")) agent = Agent(llm=llm, max_loops=1) diff --git a/playground/structs/task_example.py b/playground/structs/task_example.py index c2ade96a..ed9c5e96 100644 --- a/playground/structs/task_example.py +++ b/playground/structs/task_example.py @@ -1,12 +1,7 @@ import os -from dotenv import load_dotenv - from swarms.structs import Agent, OpenAIChat, Task -# Load the environment variables -load_dotenv() - # Define a function to be used as the action def my_action(): diff --git a/playground/tools/agent_with_tools_example.py b/playground/tools/agent_with_tools_example.py index 35b61703..663df16d 100644 --- a/playground/tools/agent_with_tools_example.py +++ b/playground/tools/agent_with_tools_example.py @@ -1,18 +1,12 @@ import os -from dotenv import load_dotenv - from swarms import Agent, OpenAIChat from swarms.tools.tool import tool -load_dotenv() - api_key = os.environ.get("OPENAI_API_KEY") - llm = OpenAIChat(api_key=api_key) - @tool def search_api(query: str) -> str: """Search API diff --git a/scripts/auto_tests_docs/auto_docs.py b/scripts/auto_tests_docs/auto_docs.py index 570793c8..a1e25459 100644 --- a/scripts/auto_tests_docs/auto_docs.py +++ b/scripts/auto_tests_docs/auto_docs.py @@ -3,8 +3,6 @@ import inspect import os import threading -from dotenv import load_dotenv - from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP from swarms import OpenAIChat from swarms.structs.majority_voting import MajorityVoting @@ -12,12 +10,6 @@ from swarms.structs.stackoverflow_swarm import StackOverflowSwarm from swarms.structs.task_queue_base import TaskQueueBase from swarms.structs.tool_json_schema import JSON -########## - - -#################### -load_dotenv() - api_key = os.getenv("OPENAI_API_KEY") model = OpenAIChat( diff --git a/scripts/auto_tests_docs/auto_docs_functions.py b/scripts/auto_tests_docs/auto_docs_functions.py index 37bf376d..787e3adf 100644 --- a/scripts/auto_tests_docs/auto_docs_functions.py +++ b/scripts/auto_tests_docs/auto_docs_functions.py @@ -3,13 +3,9 @@ import os import sys import threading -from dotenv import load_dotenv - from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP from swarms import OpenAIChat -load_dotenv() - api_key = os.getenv("OPENAI_API_KEY") model = OpenAIChat( diff --git a/scripts/auto_tests_docs/auto_docs_omni.py b/scripts/auto_tests_docs/auto_docs_omni.py index 7fd3cde6..7a123ae4 100644 --- a/scripts/auto_tests_docs/auto_docs_omni.py +++ b/scripts/auto_tests_docs/auto_docs_omni.py @@ -2,18 +2,9 @@ import inspect import os import threading -from dotenv import load_dotenv - from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP from swarms import OpenAIChat -########### - - -############### - -load_dotenv() - api_key = os.getenv("OPENAI_API_KEY") model = OpenAIChat( diff --git a/scripts/auto_tests_docs/auto_tests.py b/scripts/auto_tests_docs/auto_tests.py index c9d7c95e..4b931a1d 100644 --- a/scripts/auto_tests_docs/auto_tests.py +++ b/scripts/auto_tests_docs/auto_tests.py @@ -3,19 +3,13 @@ import os import re import threading -######## -from dotenv import load_dotenv - from scripts.auto_tests_docs.docs import TEST_WRITER_SOP_PROMPT from swarms import OpenAIChat -######### from swarms.memory.dict_internal_memory import DictInternalMemory from swarms.memory.dict_shared_memory import DictSharedMemory from swarms.memory.lanchain_chroma import LangchainChromaVectorMemory -load_dotenv() - api_key = os.getenv("OPENAI_API_KEY") model = OpenAIChat( diff --git a/scripts/auto_tests_docs/auto_tests_functions.py b/scripts/auto_tests_docs/auto_tests_functions.py index 4fa2fafd..eed0b749 100644 --- a/scripts/auto_tests_docs/auto_tests_functions.py +++ b/scripts/auto_tests_docs/auto_tests_functions.py @@ -3,14 +3,10 @@ import os import sys import threading -from dotenv import load_dotenv - from scripts.auto_tests_docs.docs import TEST_WRITER_SOP_PROMPT from swarms import OpenAIChat from swarms.utils.parse_code import extract_code_from_markdown -load_dotenv() - api_key = os.getenv("OPENAI_API_KEY") model = OpenAIChat( diff --git a/scripts/auto_tests_docs/docs.py b/scripts/auto_tests_docs/docs.py index 01df9d71..babc6da9 100644 --- a/scripts/auto_tests_docs/docs.py +++ b/scripts/auto_tests_docs/docs.py @@ -148,7 +148,7 @@ def TEST_WRITER_SOP_PROMPT( 8. **Environment Variables and Secret Handling**: - Store secrets and configurations in environment variables. - - Use libraries like `python-decouple` or `python-dotenv` to load environment variables. + - Use libraries like `python-decouple` to load environment variables. - For tests, mock or set environment variables temporarily within the test environment. 9. **Grouping and Marking Tests**: diff --git a/swarm_network_example.py b/swarm_network_example.py index f073719c..36768805 100644 --- a/swarm_network_example.py +++ b/swarm_network_example.py @@ -8,10 +8,6 @@ from swarms import ( TogetherLLM, ) from swarms.memory import ChromaDB -from dotenv import load_dotenv - -# load the environment variables -load_dotenv() # Initialize the ChromaDB memory = ChromaDB() diff --git a/swarms/memory/chroma_db.py b/swarms/memory/chroma_db.py index 033be6f6..e6cc99cd 100644 --- a/swarms/memory/chroma_db.py +++ b/swarms/memory/chroma_db.py @@ -5,16 +5,11 @@ from typing import Callable, List, Optional import chromadb import numpy as np -from dotenv import load_dotenv from swarms.utils.data_to_text import data_to_text from swarms.utils.markdown_message import display_markdown_message from swarms.memory.base_vectordb import AbstractVectorDatabase -# Load environment variables -load_dotenv() - - # Results storage using local ChromaDB class ChromaDB(AbstractVectorDatabase): """ diff --git a/swarms/models/dalle3.py b/swarms/models/dalle3.py index 0e02c3d6..ccadd8e6 100644 --- a/swarms/models/dalle3.py +++ b/swarms/models/dalle3.py @@ -10,14 +10,11 @@ import backoff import openai import requests from cachetools import TTLCache -from dotenv import load_dotenv from openai import OpenAI from PIL import Image from pydantic import field_validator from termcolor import colored -load_dotenv() - # Configure Logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) diff --git a/swarms/models/gemini.py b/swarms/models/gemini.py index 276cd05d..90f6f38b 100644 --- a/swarms/models/gemini.py +++ b/swarms/models/gemini.py @@ -2,7 +2,6 @@ import os import subprocess as sp from pathlib import Path -from dotenv import load_dotenv from PIL import Image from swarms.models.base_multimodal_model import BaseMultiModalModel @@ -17,9 +16,6 @@ except ImportError as error: sp.run(["pip", "install", "--upgrade", "google-generativeai"]) -load_dotenv() - - # Helpers def get_gemini_api_key_env(): """Get the Gemini API key from the environment diff --git a/swarms/models/gpt4_vision_api.py b/swarms/models/gpt4_vision_api.py index 5966a0b6..00781572 100644 --- a/swarms/models/gpt4_vision_api.py +++ b/swarms/models/gpt4_vision_api.py @@ -6,7 +6,6 @@ from typing import Optional import aiohttp import requests -from dotenv import load_dotenv from termcolor import colored from swarms.models.base_multimodal_model import BaseMultiModalModel @@ -21,7 +20,6 @@ except ImportError: raise ImportError # Load environment variables -load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") diff --git a/swarms/models/openai_tts.py b/swarms/models/openai_tts.py index ed19a8d3..84671df0 100644 --- a/swarms/models/openai_tts.py +++ b/swarms/models/openai_tts.py @@ -3,7 +3,6 @@ import subprocess import sys import requests -from dotenv import load_dotenv from swarms.models.base_llm import AbstractLLM @@ -15,11 +14,6 @@ except ImportError as error: [sys.executable, "-m", "pip", "install", "pyaudio"] ) - -# Load .env file -load_dotenv() - - # OpenAI API Key env def openai_api_key_env(): openai_api_key = os.getenv("OPENAI_API_KEY") diff --git a/swarms/models/stable_diffusion.py b/swarms/models/stable_diffusion.py index 9ae45604..19a7708d 100644 --- a/swarms/models/stable_diffusion.py +++ b/swarms/models/stable_diffusion.py @@ -5,9 +5,6 @@ import uuid from typing import List import requests -from dotenv import load_dotenv - -load_dotenv() stable_api_key = os.environ.get("STABLE_API_KEY") diff --git a/swarms/models/together.py b/swarms/models/together.py index 37d9d0e5..ea4e6259 100644 --- a/swarms/models/together.py +++ b/swarms/models/together.py @@ -3,14 +3,9 @@ import os from typing import Optional import requests -from dotenv import load_dotenv from swarms.models.base_llm import AbstractLLM -# Load environment variables -load_dotenv() - - def together_api_key_env(): """Get the API key from the environment.""" return os.getenv("TOGETHER_API_KEY") diff --git a/swarms/prompts/autoswarm.py b/swarms/prompts/autoswarm.py index 0d76d020..f8a0aef0 100644 --- a/swarms/prompts/autoswarm.py +++ b/swarms/prompts/autoswarm.py @@ -48,13 +48,11 @@ copy/paste to vscode and run it without issue. Here are some tips to consider: Output Format: A complete Python script that is ready for copy/paste to GitHub and demo execution. It should be formatted with complete logic, proper indentation, clear variable names, and comments. Here is an example of a a working swarm script that you can use as a rough template for the logic: import os -from dotenv import load_dotenv from swarms.models import OpenAIChat from swarms.structs import Agent import swarms.prompts.swarm_daddy as sdsp # Load environment variables and initialize the OpenAI Chat model -load_dotenv() api_key = os.getenv("OPENAI_API_KEY") llm = OpenAIChat(model_name = "gpt-4", openai_api_key=api_key) diff --git a/swarms/prompts/programming.py b/swarms/prompts/programming.py index 05732607..ba75eeb9 100644 --- a/swarms/prompts/programming.py +++ b/swarms/prompts/programming.py @@ -39,7 +39,7 @@ and thorough, use the guide below to create the tests, make the tests as thoroug 8. **Environment Variables and Secret Handling**: - Store secrets and configurations in environment variables. - - Use libraries like `python-decouple` or `python-dotenv` to load environment variables. + - Use libraries like `python-decouple`to load environment variables. - For tests, mock or set environment variables temporarily within the test environment. 9. **Grouping and Marking Tests**: diff --git a/swarms/prompts/tests.py b/swarms/prompts/tests.py index 8dac9337..8b3bcc88 100644 --- a/swarms/prompts/tests.py +++ b/swarms/prompts/tests.py @@ -42,7 +42,7 @@ def TEST_WRITER_SOP_PROMPT( 8. **Environment Variables and Secret Handling**: - Store secrets and configurations in environment variables. - - Use libraries like `python-decouple` or `python-dotenv` to load environment variables. + - Use libraries like `python-decouple` to load environment variables. - For tests, mock or set environment variables temporarily within the test environment. 9. **Grouping and Marking Tests**: diff --git a/swarms/structs/autoscaler.py b/swarms/structs/autoscaler.py index 4996b7d5..9553bdd5 100644 --- a/swarms/structs/autoscaler.py +++ b/swarms/structs/autoscaler.py @@ -66,13 +66,10 @@ class AutoScaler(BaseStructure): Examples: >>> import os - >>> from dotenv import load_dotenv >>> # Import the OpenAIChat model and the Agent struct >>> from swarms.models import OpenAIChat >>> from swarms.structs import Agent >>> from swarms.structs.autoscaler import AutoScaler - >>> # Load the environment variables - >>> load_dotenv() >>> # Get the API key from the environment >>> api_key = os.environ.get("OPENAI_API_KEY") >>> # Initialize the language model diff --git a/swarms/telemetry/sentry_active.py b/swarms/telemetry/sentry_active.py index 184a405b..1a915261 100644 --- a/swarms/telemetry/sentry_active.py +++ b/swarms/telemetry/sentry_active.py @@ -1,9 +1,6 @@ import os -from dotenv import load_dotenv import sentry_sdk -load_dotenv() - os.environ["USE_TELEMETRY"] = "True" use_telementry = os.getenv("USE_TELEMETRY") diff --git a/tests/memory/test_pq_db.py b/tests/memory/test_pq_db.py index 5e44f0ba..1e66ce0d 100644 --- a/tests/memory/test_pq_db.py +++ b/tests/memory/test_pq_db.py @@ -1,12 +1,8 @@ import os from unittest.mock import patch -from dotenv import load_dotenv - from swarms.memory.pg import PostgresDB -load_dotenv() - PSG_CONNECTION_STRING = os.getenv("PSG_CONNECTION_STRING") diff --git a/tests/models/test_cohere.py b/tests/models/test_cohere.py index 8a1147d3..e87b8265 100644 --- a/tests/models/test_cohere.py +++ b/tests/models/test_cohere.py @@ -2,12 +2,9 @@ import os from unittest.mock import Mock, patch import pytest -from dotenv import load_dotenv from swarms.models.cohere_chat import BaseCohere, Cohere -# Load the environment variables -load_dotenv() api_key = os.getenv("COHERE_API_KEY") diff --git a/tests/models/test_elevenlab.py b/tests/models/test_elevenlab.py index da41ca53..3a17cd24 100644 --- a/tests/models/test_elevenlab.py +++ b/tests/models/test_elevenlab.py @@ -2,15 +2,12 @@ import os from unittest.mock import mock_open, patch import pytest -from dotenv import load_dotenv from swarms.models.eleven_labs import ( ElevenLabsModel, ElevenLabsText2SpeechTool, ) -load_dotenv() - # Define some test data SAMPLE_TEXT = "Hello, this is a test." API_KEY = os.environ.get("ELEVEN_API_KEY") diff --git a/tests/models/test_gpt4_vision_api.py b/tests/models/test_gpt4_vision_api.py index ac797280..6e023de2 100644 --- a/tests/models/test_gpt4_vision_api.py +++ b/tests/models/test_gpt4_vision_api.py @@ -4,13 +4,10 @@ from unittest.mock import AsyncMock, Mock, mock_open, patch import pytest from aiohttp import ClientResponseError -from dotenv import load_dotenv from requests.exceptions import RequestException from swarms.models.gpt4_vision_api import GPT4VisionAPI -load_dotenv() - custom_api_key = os.environ.get("OPENAI_API_KEY") img = "images/swarms.jpeg" diff --git a/tests/structs/test_agent.py b/tests/structs/test_agent.py index 5be7f31a..b1e4378a 100644 --- a/tests/structs/test_agent.py +++ b/tests/structs/test_agent.py @@ -4,14 +4,11 @@ from unittest import mock from unittest.mock import MagicMock, patch import pytest -from dotenv import load_dotenv from swarms.models import OpenAIChat from swarms.structs.agent import Agent, stop_when_repeats from swarms.utils.logger import logger -load_dotenv() - openai_api_key = os.getenv("OPENAI_API_KEY") diff --git a/tests/structs/test_autoscaler.py b/tests/structs/test_autoscaler.py index 2e5585bf..8fa40620 100644 --- a/tests/structs/test_autoscaler.py +++ b/tests/structs/test_autoscaler.py @@ -2,14 +2,11 @@ import os from unittest.mock import MagicMock, patch import pytest -from dotenv import load_dotenv from swarms.models import OpenAIChat from swarms.structs import Agent from swarms.structs.autoscaler import AutoScaler -load_dotenv() - api_key = os.environ.get("OPENAI_API_KEY") llm = OpenAIChat( temperature=0.5, diff --git a/tests/structs/test_base_workflow.py b/tests/structs/test_base_workflow.py index ccb7a563..561ff15a 100644 --- a/tests/structs/test_base_workflow.py +++ b/tests/structs/test_base_workflow.py @@ -2,13 +2,10 @@ import json import os import pytest -from dotenv import load_dotenv from swarms.models import OpenAIChat from swarms.structs import BaseWorkflow -load_dotenv() - api_key = os.environ.get("OPENAI_API_KEY") diff --git a/tests/structs/test_task.py b/tests/structs/test_task.py index de0352af..f804e79b 100644 --- a/tests/structs/test_task.py +++ b/tests/structs/test_task.py @@ -3,7 +3,6 @@ from datetime import timedelta from unittest.mock import Mock import pytest -from dotenv import load_dotenv from swarms.models.gpt4_vision_api import GPT4VisionAPI from swarms.prompts.multi_modal_autonomous_instruction_prompt import ( @@ -12,8 +11,6 @@ from swarms.prompts.multi_modal_autonomous_instruction_prompt import ( from swarms.structs.agent import Agent from swarms.structs.task import Task -load_dotenv() - @pytest.fixture def llm(): diff --git a/tests/test_upload_tests_to_issues.py b/tests/test_upload_tests_to_issues.py index 0857c58a..d09be24c 100644 --- a/tests/test_upload_tests_to_issues.py +++ b/tests/test_upload_tests_to_issues.py @@ -2,9 +2,7 @@ import os import subprocess import requests -from dotenv import load_dotenv -load_dotenv # Constants GITHUB_USERNAME = os.getenv("GITHUB_USERNAME") diff --git a/tool_agent_with_llm.py b/tool_agent_with_llm.py index 3582be21..cd18432a 100644 --- a/tool_agent_with_llm.py +++ b/tool_agent_with_llm.py @@ -1,14 +1,10 @@ import os -from dotenv import load_dotenv from pydantic import BaseModel, Field from swarms import OpenAIChat, ToolAgent from swarms.utils.json_utils import base_model_to_json -# Load the environment variables -load_dotenv() - # Initialize the OpenAIChat class chat = OpenAIChat( api_key=os.getenv("OPENAI_API"),