diff --git a/.env.example b/.env.example index d1efe4e8..b6d3810b 100644 --- a/.env.example +++ b/.env.example @@ -1,4 +1,5 @@ OPENAI_API_KEY="" +OPENAI_ORG_ID="" GOOGLE_API_KEY="" ANTHROPIC_API_KEY="" AI21_API_KEY="your_api_key_here" diff --git a/playground/agents/multi_modal_auto_agent_example.py b/playground/agents/multi_modal_auto_agent_example.py index d32a6221..4d311001 100644 --- a/playground/agents/multi_modal_auto_agent_example.py +++ b/playground/agents/multi_modal_auto_agent_example.py @@ -9,10 +9,12 @@ load_dotenv() # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Initialize the language model llm = GPT4VisionAPI( openai_api_key=api_key, + openai_org_id=org_id, max_tokens=500, ) diff --git a/playground/agents/simple_agent_example.py b/playground/agents/simple_agent_example.py index 5d9d57ed..a4073a36 100644 --- a/playground/agents/simple_agent_example.py +++ b/playground/agents/simple_agent_example.py @@ -21,9 +21,10 @@ load_dotenv() # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Initialize the language model -llm = OpenAIChat(openai_api_key=api_key) +llm = OpenAIChat(openai_api_key=api_key, openai_org_id=org_id, max_tokens=500,) # Run the language model in a loop diff --git a/playground/agents/worker_example.py b/playground/agents/worker_example.py index 9e215e83..53c8a1bd 100644 --- a/playground/agents/worker_example.py +++ b/playground/agents/worker_example.py @@ -9,6 +9,7 @@ load_dotenv() # Retrieving the OpenAI API key from environment variables api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Creating a Worker instance worker = Worker( @@ -17,7 +18,7 @@ worker = Worker( human_in_the_loop=False, tools=[], temperature=0.5, - llm=OpenAIChat(openai_api_key=api_key), + llm=OpenAIChat(openai_api_key=api_key, openai_org_id=org_id, max_tokens=500,), verbose=True, ) diff --git a/playground/demos/accountant_team/account_team2_example.py b/playground/demos/accountant_team/account_team2_example.py index 1b9d3659..5047b86c 100644 --- a/playground/demos/accountant_team/account_team2_example.py +++ b/playground/demos/accountant_team/account_team2_example.py @@ -13,11 +13,13 @@ from swarms.utils.pdf_to_text import pdf_to_text load_dotenv() anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") openai_api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Base llms llm1 = OpenAIChat( openai_api_key=openai_api_key, + openai_org_id=org_id, max_tokens=5000, ) diff --git a/playground/demos/ad_gen/ad_gen_example.py b/playground/demos/ad_gen/ad_gen_example.py index b665b63a..9cb2ca4a 100644 --- a/playground/demos/ad_gen/ad_gen_example.py +++ b/playground/demos/ad_gen/ad_gen_example.py @@ -7,11 +7,12 @@ from swarms.models.stable_diffusion import StableDiffusion load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") stability_api_key = os.getenv("STABILITY_API_KEY") # Initialize the language model and image generation model llm = OpenAIChat( - openai_api_key=openai_api_key, temperature=0.5, max_tokens=3000 + openai_api_key=openai_api_key, openai_org_id=org_id, temperature=0.5, max_tokens=3000 ) sd_api = StableDiffusion(api_key=stability_api_key) diff --git a/playground/demos/ai_research_team/main_example.py b/playground/demos/ai_research_team/main_example.py index bda9e0de..c561a4da 100644 --- a/playground/demos/ai_research_team/main_example.py +++ b/playground/demos/ai_research_team/main_example.py @@ -15,6 +15,7 @@ from swarms.utils.pdf_to_text import pdf_to_text load_dotenv() anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") openai_api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") PDF_PATH = "fasterffn.pdf" @@ -22,6 +23,7 @@ PDF_PATH = "fasterffn.pdf" # Base llms llm1 = OpenAIChat( openai_api_key=openai_api_key, + openai_org_id=org_id, ) llm2 = Anthropic( diff --git a/playground/demos/autotemp/autotemp_example.py b/playground/demos/autotemp/autotemp_example.py index baf8f091..0382473a 100644 --- a/playground/demos/autotemp/autotemp_example.py +++ b/playground/demos/autotemp/autotemp_example.py @@ -11,12 +11,14 @@ class AutoTemp: def __init__( self, api_key, + org_id, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6, ): self.api_key = api_key + self.org_id = org_id self.default_temp = default_temp self.alt_temps = ( alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] @@ -24,7 +26,7 @@ class AutoTemp: self.auto_select = auto_select self.max_workers = max_workers self.llm = OpenAIChat( - openai_api_key=self.api_key, temperature=self.default_temp + openai_api_key=self.api_key, openai_org_id=org_id, temperature=self.default_temp ) def evaluate_output(self, output, temperature): diff --git a/playground/demos/autotemp/blog_gen_example.py b/playground/demos/autotemp/blog_gen_example.py index e11a1521..a2096d71 100644 --- a/playground/demos/autotemp/blog_gen_example.py +++ b/playground/demos/autotemp/blog_gen_example.py @@ -9,11 +9,14 @@ class BlogGen: def __init__( self, api_key, + org_id, blog_topic, temperature_range: str = "0.4,0.6,0.8,1.0,1.2", ): # Add blog_topic as an argument self.openai_chat = OpenAIChat( - openai_api_key=api_key, temperature=0.8 + openai_api_key=api_key, + openai_org_id=org_id, + temperature=0.8 ) self.auto_temp = AutoTemp(api_key) self.temperature_range = temperature_range @@ -134,5 +137,6 @@ class BlogGen: if __name__ == "__main__": api_key = os.environ["OPENAI_API_KEY"] + org_id = os.environ["OPENAI_ORG_ID"] blog_generator = BlogGen(api_key) blog_generator.run_workflow() diff --git a/playground/demos/developer_swarm/main_example.py b/playground/demos/developer_swarm/main_example.py index 18c0a346..719e2686 100644 --- a/playground/demos/developer_swarm/main_example.py +++ b/playground/demos/developer_swarm/main_example.py @@ -24,6 +24,7 @@ from swarms.structs import Agent load_dotenv() api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") TASK = """ @@ -32,7 +33,9 @@ CODE """ # Initialize the language model -llm = OpenAIChat(openai_api_key=api_key, max_tokens=5000) +llm = OpenAIChat(openai_api_key=api_key, + openai_org_id=org_id, + max_tokens=5000) # Documentation agent diff --git a/playground/demos/education/education_example.py b/playground/demos/education/education_example.py index 266cede9..fb2c66cd 100644 --- a/playground/demos/education/education_example.py +++ b/playground/demos/education/education_example.py @@ -7,11 +7,15 @@ import swarms.prompts.education as edu_prompts # Load environment variables load_dotenv() api_key = os.getenv("OPENAI_API_KEY") +org_id = os.getenv("OPENAI_ORG_ID") stability_api_key = os.getenv("STABILITY_API_KEY") # Initialize language model llm = OpenAIChat( - openai_api_key=api_key, temperature=0.5, max_tokens=3000 + openai_api_key=api_key, + openai_org_id=org_id, + temperature=0.5, + max_tokens=3000 ) # User preferences (can be dynamically set in a real application) diff --git a/playground/demos/grupa/app_example.py b/playground/demos/grupa/app_example.py index 3ab52e22..e8befb58 100644 --- a/playground/demos/grupa/app_example.py +++ b/playground/demos/grupa/app_example.py @@ -43,11 +43,13 @@ export default MainPanel; # Load the environment variables api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Initialize the language agent llm = OpenAIChat( model_name="gpt-4", openai_api_key=api_key, + openai_org_id=org_id, temperature=0.5, max_tokens=4000, ) diff --git a/playground/demos/langchain_example/langchain_example.py b/playground/demos/langchain_example/langchain_example.py index 803e7857..f0b2d929 100644 --- a/playground/demos/langchain_example/langchain_example.py +++ b/playground/demos/langchain_example/langchain_example.py @@ -9,6 +9,7 @@ load_dotenv() # Initialize the model llm = OpenAIChat( openai_api_key=os.getenv("OPENAI_API_KEY"), + openai_org_id=os.getenv("OPENAI_ORG_ID"), max_tokens=1000, ) diff --git a/playground/demos/llm_with_conversation/main_example.py b/playground/demos/llm_with_conversation/main_example.py index a9e6c42a..91674afd 100644 --- a/playground/demos/llm_with_conversation/main_example.py +++ b/playground/demos/llm_with_conversation/main_example.py @@ -10,11 +10,13 @@ load_dotenv() # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Initialize the language model llm = OpenAIChat( temperature=0.5, model_name="gpt-4", openai_api_key=api_key, + openai_org_id=org_id, max_tokens=1000, ) diff --git a/playground/demos/logistics/logistics_example.py b/playground/demos/logistics/logistics_example.py index 108ec702..efdb2021 100644 --- a/playground/demos/logistics/logistics_example.py +++ b/playground/demos/logistics/logistics_example.py @@ -16,10 +16,11 @@ from swarms.prompts.logistics import ( load_dotenv() api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # GPT4VisionAPI or llama # @banana #- deploy to banana -llm = GPT4VisionAPI(openai_api_key=api_key) +llm = GPT4VisionAPI(openai_api_key=api_key, openai_org_id=org_id)) # Image for analysis factory_image = "factory_image1.jpg" diff --git a/playground/demos/multi_modal_chain_of_thought/vcot_example.py b/playground/demos/multi_modal_chain_of_thought/vcot_example.py index 50a02c3d..ede30f96 100644 --- a/playground/demos/multi_modal_chain_of_thought/vcot_example.py +++ b/playground/demos/multi_modal_chain_of_thought/vcot_example.py @@ -11,10 +11,12 @@ load_dotenv() # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Initialize the language model llm = GPT4VisionAPI( openai_api_key=api_key, + openai_org_id=org_id, max_tokens=500, ) diff --git a/playground/demos/multimodal_tot/idea2img_example.py b/playground/demos/multimodal_tot/idea2img_example.py index 4a6c1da3..d6084e06 100644 --- a/playground/demos/multimodal_tot/idea2img_example.py +++ b/playground/demos/multimodal_tot/idea2img_example.py @@ -12,12 +12,13 @@ from swarms.structs import Agent # Load environment variables load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") stability_api_key = os.getenv("STABLE_API_KEY") # Initialize the models -vision_api = GPT4VisionAPI(api_key=openai_api_key) +vision_api = GPT4VisionAPI(api_key=openai_api_key, org_id=org_id) sd_api = StableDiffusion(api_key=stability_api_key) -gpt_api = OpenAIChat(openai_api_key=openai_api_key) +gpt_api = OpenAIChat(openai_api_key=openai_api_key, org_id=org_id) class Idea2Image(Agent): diff --git a/playground/demos/multimodal_tot/main_example.py b/playground/demos/multimodal_tot/main_example.py index 2d5ed653..c232f072 100644 --- a/playground/demos/multimodal_tot/main_example.py +++ b/playground/demos/multimodal_tot/main_example.py @@ -26,12 +26,14 @@ load_dotenv() # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") stable_api_key = os.environ.get("STABLE_API_KEY") # Initialize the language model llm = GPT4VisionAPI( openai_api_key=api_key, + openai_org_id=org_id, max_tokens=500, ) diff --git a/playground/demos/nutrition/nutrition_example.py b/playground/demos/nutrition/nutrition_example.py index 428560e3..96745e85 100644 --- a/playground/demos/nutrition/nutrition_example.py +++ b/playground/demos/nutrition/nutrition_example.py @@ -8,6 +8,7 @@ from swarms.structs import Agent # Load environment variables load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Define prompts for various tasks MEAL_PLAN_PROMPT = ( @@ -33,6 +34,7 @@ def encode_image(image_path): # Initialize Language Model (LLM) llm = OpenAIChat( openai_api_key=openai_api_key, + openai_org_id=org_id, max_tokens=3000, ) @@ -43,6 +45,7 @@ def create_vision_agent(image_path): headers = { "Content-Type": "application/json", "Authorization": f"Bearer {openai_api_key}", + "OpenAI-Organization: f{openai_org_id}", } payload = { "model": "gpt-4-vision-preview", diff --git a/playground/demos/optimize_llm_stack/vortex_example.py b/playground/demos/optimize_llm_stack/vortex_example.py index 5badb2fd..ae6917b2 100644 --- a/playground/demos/optimize_llm_stack/vortex_example.py +++ b/playground/demos/optimize_llm_stack/vortex_example.py @@ -12,6 +12,7 @@ load_dotenv() # Model llm = OpenAIChat( openai_api_key=os.getenv("OPENAI_API_KEY"), + openai_org_id=os.getenv("OPENAI_ORG_ID"), model_name="gpt-4", max_tokens=1000, ) diff --git a/playground/demos/optimize_llm_stack/weaviate_example.py b/playground/demos/optimize_llm_stack/weaviate_example.py index ad594547..e2d0c061 100644 --- a/playground/demos/optimize_llm_stack/weaviate_example.py +++ b/playground/demos/optimize_llm_stack/weaviate_example.py @@ -8,7 +8,7 @@ weaviate_client = WeaviateDB( grpc_port="YOUR_gRPC_PORT", grpc_secure=True, auth_client_secret="YOUR_APIKEY", - additional_headers={"X-OpenAI-Api-Key": "YOUR_OPENAI_APIKEY"}, + additional_headers={"X-OpenAI-Api-Key": "YOUR_OPENAI_APIKEY","OpenAI-Organization: YOUR_OPENAI_ORG_ID"}, additional_config=None, # You can pass additional configuration here ) diff --git a/playground/demos/personal_assistant/better_communication_example.py b/playground/demos/personal_assistant/better_communication_example.py index c6e79eb7..39670270 100644 --- a/playground/demos/personal_assistant/better_communication_example.py +++ b/playground/demos/personal_assistant/better_communication_example.py @@ -13,10 +13,12 @@ load_dotenv() # Get the API key from the environment openai_api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Initialize the language model llm = OpenAIChat( openai_api_key=openai_api_key, + openai_org_id=org_id, ) # Initialize the text-to-speech model @@ -24,6 +26,7 @@ tts = OpenAITTS( model_name="tts-1-1106", voice="onyx", openai_api_key=openai_api_key, + openai_org_id=org_id, saved_filepath="runs/tts_speech.wav", ) diff --git a/playground/demos/personal_stylist/personal_stylist_example.py b/playground/demos/personal_stylist/personal_stylist_example.py index b8641aa3..ac90bd24 100644 --- a/playground/demos/personal_stylist/personal_stylist_example.py +++ b/playground/demos/personal_stylist/personal_stylist_example.py @@ -13,9 +13,10 @@ from swarms.prompts.personal_stylist import ( # Load environment variables load_dotenv() api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Initialize GPT4VisionAPI -llm = GPT4VisionAPI(openai_api_key=api_key) +llm = GPT4VisionAPI(openai_api_key=api_key, openai_org_id=org_id) # User selfie and clothes images user_selfie = "user_image.jpg" diff --git a/playground/demos/positive_med/positive_med_example.py b/playground/demos/positive_med/positive_med_example.py index b92b9586..90e7c62b 100644 --- a/playground/demos/positive_med/positive_med_example.py +++ b/playground/demos/positive_med/positive_med_example.py @@ -33,7 +33,9 @@ from swarms.prompts.autobloggen import ( ) api_key = os.environ["OPENAI_API_KEY"] -llm = OpenAIChat(openai_api_key=api_key) +org_id = os.environ["OPENAI_ORG_ID"] +llm = OpenAIChat(openai_api_key=api_key, openai_org_id=org_id)) + def get_review_prompt(article): diff --git a/playground/demos/security_team/security_team_example.py b/playground/demos/security_team/security_team_example.py index f00b0295..a525163b 100644 --- a/playground/demos/security_team/security_team_example.py +++ b/playground/demos/security_team/security_team_example.py @@ -7,8 +7,9 @@ import swarms.prompts.security_team as stsp # Load environment variables and initialize the Vision API load_dotenv() api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") -llm = GPT4VisionAPI(openai_api_key=api_key) +llm = GPT4VisionAPI(openai_api_key=api_key, openai_org_id=org_id) # Image for analysis img = "bank_robbery.jpg" diff --git a/playground/demos/swarm_of_mma_manufacturing/main_example.py b/playground/demos/swarm_of_mma_manufacturing/main_example.py index 05b0e8e5..81e521de 100644 --- a/playground/demos/swarm_of_mma_manufacturing/main_example.py +++ b/playground/demos/swarm_of_mma_manufacturing/main_example.py @@ -23,9 +23,12 @@ from swarms.structs import Agent load_dotenv() api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # GPT4VisionAPI -llm = GPT4VisionAPI(openai_api_key=api_key, max_tokens=2000) +llm = GPT4VisionAPI(openai_api_key=api_key, + openai_org_id=org_id, + max_tokens=2000) assembly_line = ( "playground/demos/swarm_of_mma_manufacturing/assembly_line.jpg" diff --git a/playground/demos/urban_planning/urban_planning_example.py b/playground/demos/urban_planning/urban_planning_example.py index e85b4d31..1efbb0e7 100644 --- a/playground/demos/urban_planning/urban_planning_example.py +++ b/playground/demos/urban_planning/urban_planning_example.py @@ -7,15 +7,19 @@ import swarms.prompts.urban_planning as upp # Load environment variables load_dotenv() api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") stability_api_key = os.getenv("STABILITY_API_KEY") # Initialize language model llm = OpenAIChat( - openai_api_key=api_key, temperature=0.5, max_tokens=3000 + openai_api_key=api_key, + openai_org_id=org_id, + temperature=0.5, + max_tokens=3000 ) # Initialize Vision model -vision_api = GPT4VisionAPI(api_key=api_key) +vision_api = GPT4VisionAPI(api_key=api_key, org_id=org_id) # Initialize agents for urban planning tasks architecture_analysis_agent = Agent( diff --git a/playground/demos/visuo/text_to_sql_agent_example.py b/playground/demos/visuo/text_to_sql_agent_example.py index 67f53e97..3b513f30 100644 --- a/playground/demos/visuo/text_to_sql_agent_example.py +++ b/playground/demos/visuo/text_to_sql_agent_example.py @@ -10,6 +10,7 @@ load_dotenv() # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Initialize the language model llm = HuggingfaceLLM( diff --git a/playground/demos/xray/xray_example.py b/playground/demos/xray/xray_example.py index 20e89e6d..b4314a59 100644 --- a/playground/demos/xray/xray_example.py +++ b/playground/demos/xray/xray_example.py @@ -12,15 +12,18 @@ from swarms.structs.agent import Agent # Load environment variables load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Function to analyze an X-ray image multimodal_llm = GPT4VisionAPI( openai_api_key=openai_api_key, + openai_org_id=org_id, ) # Initialize Language Model (LLM) llm = OpenAIChat( openai_api_key=openai_api_key, + openai_org_id=org_id, max_tokens=3000, ) diff --git a/playground/diy/hierchical_example.py b/playground/diy/hierchical_example.py index 0734c4f6..60c72b9e 100644 --- a/playground/diy/hierchical_example.py +++ b/playground/diy/hierchical_example.py @@ -3,6 +3,7 @@ from swarms import HierarchicalSwarm swarm = HierarchicalSwarm( openai_api_key="key", + openai_org_id="org_id", model_type="openai", model_id="gpt-4", use_vectorstore=False, diff --git a/playground/models/bingchat_example.py b/playground/models/bingchat_example.py index 2af8472c..f78732b5 100644 --- a/playground/models/bingchat_example.py +++ b/playground/models/bingchat_example.py @@ -5,6 +5,7 @@ from swarms.models import OpenAIChat import os api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Initialize the EdgeGPTModel edgegpt = BingChat(cookies_path="./cookies.txt") @@ -20,6 +21,7 @@ def edgegpt(task: str = None): # This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC llm = OpenAIChat( openai_api_key=api_key, + openai_org_id=org_id, temperature=0.5, ) diff --git a/playground/models/dall3_example.py b/playground/models/dall3_example.py index 2ea2e10c..fd0aafa3 100644 --- a/playground/models/dall3_example.py +++ b/playground/models/dall3_example.py @@ -1,6 +1,6 @@ from swarms.models import Dalle3 -dalle3 = Dalle3(openai_api_key="") +dalle3 = Dalle3(openai_api_key="", openai_org_id="")) task = "A painting of a dog" image_url = dalle3(task) print(image_url) diff --git a/playground/models/dalle3_concurrent_example.py b/playground/models/dalle3_concurrent_example.py index de7f9cbb..af0a2065 100644 --- a/playground/models/dalle3_concurrent_example.py +++ b/playground/models/dalle3_concurrent_example.py @@ -8,8 +8,9 @@ from swarms.models.dalle3 import Dalle3 import os api_key = os.environ["OPENAI_API_KEY"] +org_id = os.environ["OPENAI_ORG_ID"] -dalle3 = Dalle3(openai_api_key=api_key, n=1) +dalle3 = Dalle3(openai_api_key=api_key, openai_org_id=org_id, n=1) # task = "Swarm of robots working super industrial ambience concept art" diff --git a/playground/models/gpt4_v_example.py b/playground/models/gpt4_v_example.py index 822ec726..92e23213 100644 --- a/playground/models/gpt4_v_example.py +++ b/playground/models/gpt4_v_example.py @@ -1,7 +1,7 @@ from swarms.models.gpt4v import GPT4Vision -gpt4vision = GPT4Vision(openai_api_key="") +gpt4vision = GPT4Vision(openai_api_key="" , openai_org_id="") img = "https://upload.wikimedia.org/wikipedia/commons/thumb/0/0d/VFPt_Solenoid_correct2.svg/640px-VFPt_Solenoid_correct2.svg.png" diff --git a/playground/models/openai_example.py b/playground/models/openai_example.py index aacab66f..86eafe84 100644 --- a/playground/models/openai_example.py +++ b/playground/models/openai_example.py @@ -1,6 +1,6 @@ from swarms.models.openai_chat import OpenAIChat -model = OpenAIChat() +model = OpenAIChat(openai_api_key="", openai_org_id="") out = model("Hello, how are you?") diff --git a/playground/models/openai_model_example.py b/playground/models/openai_model_example.py index 3b9cb967..db0fcbfe 100644 --- a/playground/models/openai_model_example.py +++ b/playground/models/openai_model_example.py @@ -1,6 +1,7 @@ from swarms.models.openai_models import OpenAIChat -openai = OpenAIChat(openai_api_key="", verbose=False) +openai = OpenAIChat(openai_api_key="", openai_org_id="" verbose=False) + chat = openai("What are quantum fields?") print(chat) diff --git a/playground/models/tts_speech_example.py b/playground/models/tts_speech_example.py index be38912c..8e1428b5 100644 --- a/playground/models/tts_speech_example.py +++ b/playground/models/tts_speech_example.py @@ -8,6 +8,7 @@ tts = OpenAITTS( model_name="tts-1-1106", voice="onyx", openai_api_key=os.getenv("OPENAI_API_KEY"), + openai_org_id=os.getenv("OPENAI_ORG_ID"), ) out = tts.run_and_save("Dammmmmm those tacos were good") diff --git a/playground/structs/agent_with_longterm.py b/playground/structs/agent_with_longterm.py index e803d095..c340d738 100644 --- a/playground/structs/agent_with_longterm.py +++ b/playground/structs/agent_with_longterm.py @@ -10,6 +10,7 @@ load_dotenv() # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Initilaize the chromadb client @@ -23,6 +24,7 @@ llm = OpenAIChat( temperature=0.5, model_name="gpt-4", openai_api_key=api_key, + openai_org_id=org_id, max_tokens=1000, ) diff --git a/playground/structs/agent_with_tools_example.py b/playground/structs/agent_with_tools_example.py index 44dcd8f6..f10fe479 100644 --- a/playground/structs/agent_with_tools_example.py +++ b/playground/structs/agent_with_tools_example.py @@ -58,11 +58,13 @@ def rapid_api(query: str): # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Initialize the language model llm = OpenAIChat( temperature=0.5, openai_api_key=api_key, + openai_org_id=org_id, ) diff --git a/playground/structs/autoscaler_example.py b/playground/structs/autoscaler_example.py index 8b808db6..32780682 100644 --- a/playground/structs/autoscaler_example.py +++ b/playground/structs/autoscaler_example.py @@ -13,11 +13,13 @@ load_dotenv() # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Initialize the language model llm = OpenAIChat( temperature=0.5, openai_api_key=api_key, + openai_org_id=org_id, ) diff --git a/playground/structs/company_example.py b/playground/structs/company_example.py index 72396c61..a20bee61 100644 --- a/playground/structs/company_example.py +++ b/playground/structs/company_example.py @@ -10,7 +10,9 @@ from swarms.structs.company import Company load_dotenv() llm = OpenAIChat( - openai_api_key=os.getenv("OPENAI_API_KEY"), max_tokens=4000 + openai_api_key=os.getenv("OPENAI_API_KEY"), + openai_org_id=os.getenv("OPENAI_ORG_ID"), + max_tokens=4000 ) ceo = Agent(llm=llm, ai_name="CEO") diff --git a/playground/structs/concurrent_workflow_example.py b/playground/structs/concurrent_workflow_example.py index 98531388..c80f29dc 100644 --- a/playground/structs/concurrent_workflow_example.py +++ b/playground/structs/concurrent_workflow_example.py @@ -6,7 +6,8 @@ from swarms import OpenAIChat, Task, ConcurrentWorkflow, Agent load_dotenv() # Load environment variables -llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY")) +llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY"), + openai_org_id=os.getenv("OPENAI_ORG_ID")) agent = Agent(llm=llm, max_loops=1) # Create a workflow diff --git a/playground/structs/dialogue_simulator_example.py b/playground/structs/dialogue_simulator_example.py index ee9241b6..05864739 100644 --- a/playground/structs/dialogue_simulator_example.py +++ b/playground/structs/dialogue_simulator_example.py @@ -2,8 +2,15 @@ from swarms.swarms import DialogueSimulator from swarms.workers.worker import Worker from swarms.models import OpenAIChat + +api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") + llm = OpenAIChat( - model_name="gpt-4", openai_api_key="api-key", temperature=0.5 + model_name="gpt-4", + openai_api_key=api-key, + openai_org_id=org-id, + temperature=0.5 ) worker1 = Worker( diff --git a/playground/structs/flow_example.py b/playground/structs/flow_example.py index 8ff45802..f7e1bded 100644 --- a/playground/structs/flow_example.py +++ b/playground/structs/flow_example.py @@ -1,12 +1,14 @@ from swarms.models import OpenAIChat from swarms.structs import Agent -api_key = "" +api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC llm = OpenAIChat( # model_name="gpt-4" openai_api_key=api_key, + openai_org_id=org_id, temperature=0.5, # max_tokens=100, ) diff --git a/playground/structs/godmode_example.py b/playground/structs/godmode_example.py index 46f71393..8ba4086a 100644 --- a/playground/structs/godmode_example.py +++ b/playground/structs/godmode_example.py @@ -10,10 +10,11 @@ load_dotenv() # API Keys anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") openai_api_key = os.getenv("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") gemini_api_key = os.getenv("GEMINI_API_KEY") # Initialize the models -llm = OpenAIChat(openai_api_key=openai_api_key) +llm = OpenAIChat(openai_api_key=openai_api_key, openai_org_id=org_id) anthropic = Anthropic(anthropic_api_key=anthropic_api_key) mixtral = Mixtral() gemini = Gemini(gemini_api_key=gemini_api_key) diff --git a/playground/structs/groupchat_example.py b/playground/structs/groupchat_example.py index b9ab5761..79edf02e 100644 --- a/playground/structs/groupchat_example.py +++ b/playground/structs/groupchat_example.py @@ -2,10 +2,12 @@ from swarms import OpenAI, Agent from swarms.structs.groupchat import GroupChatManager, GroupChat -api_key = "" +api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") llm = OpenAI( openai_api_key=api_key, + openai_org_id=org_id, temperature=0.5, max_tokens=3000, ) diff --git a/playground/structs/gui_app_example.py b/playground/structs/gui_app_example.py index 751cb03a..e12431e4 100644 --- a/playground/structs/gui_app_example.py +++ b/playground/structs/gui_app_example.py @@ -5,7 +5,7 @@ from swarms import HierarchicalSwarm api_key = "sksdsds" # Initialize HierarchicalSwarm with your API key -swarm = HierarchicalSwarm(openai_api_key=api_key) +swarm = HierarchicalSwarm(openai_api_key=api_key, openai_org_id="org-id") # Define an objective objective = """ diff --git a/playground/structs/majority_voting.py b/playground/structs/majority_voting.py index 5eefb8ab..64dcfe75 100644 --- a/playground/structs/majority_voting.py +++ b/playground/structs/majority_voting.py @@ -1,7 +1,9 @@ from swarms import Agent, OpenAIChat, MajorityVoting +api_key = os.getenv("OPENAI_API_KEY") +org_id = os.getenv("OPENAI_ORG_ID") # Initialize the llm -llm = OpenAIChat() +llm = OpenAIChat(openai_api_key=api_key, openai_org_id=org_id, max_tokens=150) # Initialize the agents agent1 = Agent(llm=llm, max_loops=1) diff --git a/playground/structs/multi_agent_debate_example.py b/playground/structs/multi_agent_debate_example.py index 6124a21c..ea7a6a02 100644 --- a/playground/structs/multi_agent_debate_example.py +++ b/playground/structs/multi_agent_debate_example.py @@ -5,7 +5,11 @@ from swarms.swarms.multi_agent_debate import ( from swarms.workers.worker import Worker from swarms.models import OpenAIChat -llm = OpenAIChat() +api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") + + +llm = OpenAIChat(openai_api_key=api_key, openai_org_id=org_id) worker1 = Worker( llm=llm, diff --git a/playground/structs/multi_modal_rag_agent.py b/playground/structs/multi_modal_rag_agent.py index b7944638..3456609d 100644 --- a/playground/structs/multi_modal_rag_agent.py +++ b/playground/structs/multi_modal_rag_agent.py @@ -12,9 +12,11 @@ load_dotenv() # Getting the Gemini API key from environment variables gemini_api_key = os.getenv("GEMINI_API_KEY") openai_api_key = os.getenv("OPENAI_API_KEY") +openai_org_id = os.getenv("OPENAI_ORG_ID") llm = OpenAIChat( openai_api_key=openai_api_key, + openai_org_id=openai_org_id, max_tokens=1000, temperature=0.2, ) diff --git a/playground/structs/orchestrate_example.py b/playground/structs/orchestrate_example.py index b0e17588..b897def3 100644 --- a/playground/structs/orchestrate_example.py +++ b/playground/structs/orchestrate_example.py @@ -1,7 +1,11 @@ from swarms import Worker, Orchestrator +api_key = os.getenv("OPENAI_API_KEY") +org_id = os.getenv("OPENAI_ORG_ID") + node = Worker( - openai_api_key="", + openai_api_key=api_key, + openai_org_id=org_id, ai_name="Optimus Prime", ) diff --git a/playground/structs/orchestrator_example.py b/playground/structs/orchestrator_example.py index b0e17588..b897def3 100644 --- a/playground/structs/orchestrator_example.py +++ b/playground/structs/orchestrator_example.py @@ -1,7 +1,11 @@ from swarms import Worker, Orchestrator +api_key = os.getenv("OPENAI_API_KEY") +org_id = os.getenv("OPENAI_ORG_ID") + node = Worker( - openai_api_key="", + openai_api_key=api_key, + openai_org_id=org_id, ai_name="Optimus Prime", ) diff --git a/playground/structs/recursive_example.py b/playground/structs/recursive_example.py index 9760b606..4b4ba437 100644 --- a/playground/structs/recursive_example.py +++ b/playground/structs/recursive_example.py @@ -6,7 +6,7 @@ from swarms import OpenAIChat, Task, RecursiveWorkflow, Agent load_dotenv() # Load environment variables -llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY")) +llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY"), openai_org_id=os.getenv("OPENAI_ORG_ID")) agent = Agent(llm=llm, max_loops=1) # Create a workflow diff --git a/playground/structs/sequential_workflow_example.py b/playground/structs/sequential_workflow_example.py index 7fa110bc..4977a2a4 100644 --- a/playground/structs/sequential_workflow_example.py +++ b/playground/structs/sequential_workflow_example.py @@ -1,7 +1,12 @@ from swarms import OpenAIChat, Agent, Task, SequentialWorkflow +api_key = os.getenv("OPENAI_API_KEY") +org_id = os.getenv("OPENAI_ORG_ID") + # Example usage llm = OpenAIChat( + openai_api_key=api_key, + openai_org_id=org_id, temperature=0.5, max_tokens=3000, ) diff --git a/playground/structs/social_app_example.py b/playground/structs/social_app_example.py index 8bf90bf5..1b5790e3 100644 --- a/playground/structs/social_app_example.py +++ b/playground/structs/social_app_example.py @@ -1,10 +1,11 @@ from ..swarms import HierarchicalSwarm # Retrieve your API key from the environment or replace with your actual key -api_key = "sksdsds" +api_key = os.getenv("OPENAI_API_KEY") +org_id = os.getenv("OPENAI_ORG_ID") # Initialize HierarchicalSwarm with your API key -swarm = HierarchicalSwarm(openai_api_key=api_key) +swarm = HierarchicalSwarm(openai_api_key=api_key, openai_org_id=org_id, max_loops=1) # Define an objective objective = """ diff --git a/playground/structs/swarm_network_example.py b/playground/structs/swarm_network_example.py index de9c53b6..90cb1c58 100644 --- a/playground/structs/swarm_network_example.py +++ b/playground/structs/swarm_network_example.py @@ -10,11 +10,13 @@ load_dotenv() # Get the API key from the environment api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") # Initialize the language model llm = OpenAIChat( temperature=0.5, openai_api_key=api_key, + openai_org_id=org_id, ) ## Initialize the workflow diff --git a/playground/structs/task_example.py b/playground/structs/task_example.py index c2ade96a..e18adef6 100644 --- a/playground/structs/task_example.py +++ b/playground/structs/task_example.py @@ -22,6 +22,7 @@ def my_condition(): # Create an agent agent = Agent( llm=OpenAIChat(openai_api_key=os.environ["OPENAI_API_KEY"]), + openai_org_id=os.environ["OPENAI_ORG_ID"], max_loops=1, dashboard=False, ) diff --git a/playground/structs/todo_app_example.py b/playground/structs/todo_app_example.py index 627c72df..a0d7cdd6 100644 --- a/playground/structs/todo_app_example.py +++ b/playground/structs/todo_app_example.py @@ -2,10 +2,11 @@ from swarms import HierarchicalSwarm # Retrieve your API key from the environment or replace with your actual key -api_key = "sksdsds" +api_key = os.getenv("OPENAI_API_KEY") +org_id = os.getenv("OPENAI_ORG_ID") # Initialize HierarchicalSwarm with your API key -swarm = HierarchicalSwarm(openai_api_key=api_key) +swarm = HierarchicalSwarm(openai_api_key=api_key, openai_org_id=org_id, max_loops=1) # Define an objective objective = """ diff --git a/playground/structs/workflow_example.py b/playground/structs/workflow_example.py index 91bff00a..27043a02 100644 --- a/playground/structs/workflow_example.py +++ b/playground/structs/workflow_example.py @@ -1,8 +1,11 @@ from swarms.structs.workflow import Workflow from swarms.models import OpenAIChat +import os +api_key = os.getenv("OPENAI_API_KEY") +org_id = os.getenv("OPENAI_ORG_ID") -llm = OpenAIChat() +llm = OpenAIChat(openai_api_key=api_key, openai_org_id=org_id)) workflow = Workflow(llm) diff --git a/playground/tools/agent_with_tools_example.py b/playground/tools/agent_with_tools_example.py index 3bad0b1d..35d45ad8 100644 --- a/playground/tools/agent_with_tools_example.py +++ b/playground/tools/agent_with_tools_example.py @@ -6,9 +6,9 @@ from dotenv import load_dotenv load_dotenv() api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") - -llm = OpenAIChat(api_key=api_key) +llm = OpenAIChat(openai_api_key=api_key, openai_org_id=org_id) # @tool # def search_api(query: str) -> str: diff --git a/requirements.txt b/requirements.txt index 6dd8dda4..eca51130 100644 --- a/requirements.txt +++ b/requirements.txt @@ -29,7 +29,6 @@ diffusers einops==0.7.0 opencv-python-headless==4.8.1.78 numpy -openai==0.28.0 opencv-python==4.7.0.72 timm yapf diff --git a/scripts/auto_docs.py b/scripts/auto_docs.py index f469e9ec..481cc4bf 100644 --- a/scripts/auto_docs.py +++ b/scripts/auto_docs.py @@ -26,9 +26,11 @@ from swarms.tokenizers.cohere_tokenizer import CohereTokenizer load_dotenv() api_key = os.getenv("OPENAI_API_KEY") +org_id = os.getenv("OPENAI_ORG_ID") model = OpenAIChat( openai_api_key=api_key, + openai_org_id=org_id, max_tokens=4000, ) diff --git a/scripts/auto_tests_docs/auto_docs_functions.py b/scripts/auto_tests_docs/auto_docs_functions.py index 37bf376d..e7c44047 100644 --- a/scripts/auto_tests_docs/auto_docs_functions.py +++ b/scripts/auto_tests_docs/auto_docs_functions.py @@ -11,10 +11,12 @@ from swarms import OpenAIChat load_dotenv() api_key = os.getenv("OPENAI_API_KEY") +org_id = os.getenv("OPENAI_ORG_ID") model = OpenAIChat( model_name="gpt-4", openai_api_key=api_key, + openai_org_id=org_id, max_tokens=4000, ) diff --git a/scripts/auto_tests_docs/auto_docs_omni.py b/scripts/auto_tests_docs/auto_docs_omni.py index 3ae647a7..9aa08124 100644 --- a/scripts/auto_tests_docs/auto_docs_omni.py +++ b/scripts/auto_tests_docs/auto_docs_omni.py @@ -15,10 +15,12 @@ from swarms import OpenAIChat load_dotenv() api_key = os.getenv("OPENAI_API_KEY") +org_id = os.getenv("OPENAI_ORG_ID") model = OpenAIChat( model_name="gpt-4-1106-preview", openai_api_key=api_key, + openai_org_id=org_id, max_tokens=4000, ) diff --git a/scripts/auto_tests_docs/auto_tests.py b/scripts/auto_tests_docs/auto_tests.py index 87d891d2..970d7cef 100644 --- a/scripts/auto_tests_docs/auto_tests.py +++ b/scripts/auto_tests_docs/auto_tests.py @@ -24,10 +24,12 @@ from dotenv import load_dotenv load_dotenv() api_key = os.getenv("OPENAI_API_KEY") +org_id = os.getenv("OPENAI_ORG_ID") model = OpenAIChat( model_name="gpt-4", openai_api_key=api_key, + openai_org_id=org_id, max_tokens=4000, ) diff --git a/scripts/auto_tests_docs/auto_tests_functions.py b/scripts/auto_tests_docs/auto_tests_functions.py index 4fa2fafd..70edbef6 100644 --- a/scripts/auto_tests_docs/auto_tests_functions.py +++ b/scripts/auto_tests_docs/auto_tests_functions.py @@ -12,10 +12,12 @@ from swarms.utils.parse_code import extract_code_from_markdown load_dotenv() api_key = os.getenv("OPENAI_API_KEY") +org_id = os.getenv("OPENAI_ORG_ID") model = OpenAIChat( model_name="gpt-4", openai_api_key=api_key, + openai_org_id=org_id, max_tokens=4000, ) diff --git a/swarms/agents/worker_agent.py b/swarms/agents/worker_agent.py index d254acef..cd772c0b 100644 --- a/swarms/agents/worker_agent.py +++ b/swarms/agents/worker_agent.py @@ -9,6 +9,10 @@ from langchain_experimental.autonomous_agents import AutoGPT from swarms.utils.decorators import error_decorator, timing_decorator +load_dotenv() + +api_key = os.getenv("OPENAI_API_KEY") +org_id = os.getenv("OPENAI_ORG_ID") class Worker: """ @@ -48,6 +52,7 @@ class Worker: temperature: float = 0.5, llm=None, openai_api_key: str = None, + openai_org_id: str = None, tools: List[Any] = None, embedding_size: int = 1536, search_kwargs: dict = {"k": 8}, @@ -62,6 +67,7 @@ class Worker: self.temperature = temperature self.llm = llm self.openai_api_key = openai_api_key + self.openai_org_id = openai_org_id self.tools = tools self.embedding_size = embedding_size self.search_kwargs = search_kwargs @@ -103,6 +109,7 @@ class Worker: external_tools = [MyTool1(), MyTool2()] worker = Worker(model_name="gpt-4", openai_api_key="my_key", + openai_org_id="my_org_id", name="My Worker", role="Worker", external_tools=external_tools, @@ -123,9 +130,13 @@ class Worker: openai_api_key = ( os.getenv("OPENAI_API_KEY") or self.openai_api_key ) + openai_org_id = ( + os.getenv("OPENAI_ORG_ID") or self.openai_org_id + ) try: embeddings_model = OpenAIEmbeddings( - openai_api_key=openai_api_key + openai_api_key=openai_api_key, + openai_org_id=openai_org_id, ) embedding_size = self.embedding_size index = faiss.IndexFlatL2(embedding_size) diff --git a/swarms/memory/chroma_db.py b/swarms/memory/chroma_db.py index 8a5b6e91..4b9c2e97 100644 --- a/swarms/memory/chroma_db.py +++ b/swarms/memory/chroma_db.py @@ -41,6 +41,7 @@ class ChromaDB: >>> output="results", >>> llm="gpt3", >>> openai_api_key=OPENAI_API_KEY, + >>> openai_org_id=OPENAI_ORG_ID, >>> ) >>> chromadb.add(task, result, result_id) """ diff --git a/swarms/models/dalle3.py b/swarms/models/dalle3.py index 6b225b49..3585063e 100644 --- a/swarms/models/dalle3.py +++ b/swarms/models/dalle3.py @@ -65,13 +65,15 @@ class Dalle3: max_retries: int = 3 quality: str = "standard" openai_api_key: str = None or os.getenv("OPENAI_API_KEY") + openai_org_id: str = None or os.getenv("OPENAI_ORG_ID") n: int = 1 save_path: str = "images" max_time_seconds: int = 60 save_folder: str = "images" image_format: str = "png" client = OpenAI( - api_key=openai_api_key, + openai_api_key=openai_api_key, + openai_org_id=openai_org_id, ) cache = TTLCache(maxsize=100, ttl=3600) dashboard: bool = False diff --git a/swarms/models/gpt4_vision_api.py b/swarms/models/gpt4_vision_api.py index 5551648e..8b9a82fb 100644 --- a/swarms/models/gpt4_vision_api.py +++ b/swarms/models/gpt4_vision_api.py @@ -22,6 +22,7 @@ except ImportError: # Load environment variables load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") +openai_org_id = os.getenv("OPENAI_ORG_ID") gpt4_vision_system_prompt = """ @@ -67,6 +68,7 @@ class GPT4VisionAPI(BaseMultiModalModel): def __init__( self, openai_api_key: str = openai_api_key, + openai_org_id: str = openai_org_id, model_name: str = "gpt-4-vision-preview", logging_enabled: bool = False, max_workers: int = 10, @@ -81,6 +83,7 @@ class GPT4VisionAPI(BaseMultiModalModel): ): super(GPT4VisionAPI).__init__(*args, **kwargs) self.openai_api_key = openai_api_key + self.openai_org_id = openai_org_id self.logging_enabled = logging_enabled self.model_name = model_name self.max_workers = max_workers @@ -127,6 +130,7 @@ class GPT4VisionAPI(BaseMultiModalModel): headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.openai_api_key}", + "OpenAI-Organization: f"{self.openai_org_id}" } payload = { "model": self.model_name, @@ -265,6 +269,7 @@ class GPT4VisionAPI(BaseMultiModalModel): headers = { "Content-Type": "application/json", "Authorization": f"Bearer {openai_api_key}", + "OpenAI-Organization: f"{self.openai_org_id}" } payload = { "model": self.model_name, @@ -327,6 +332,7 @@ class GPT4VisionAPI(BaseMultiModalModel): headers = { "Content-Type": "application/json", "Authorization": f"Bearer {openai_api_key}", + "OpenAI-Organization: f"{self.openai_org_id}" } payload = { "model": self.model_name, diff --git a/swarms/models/openai_embeddings.py b/swarms/models/openai_embeddings.py index 4b5f18bd..b0a7c630 100644 --- a/swarms/models/openai_embeddings.py +++ b/swarms/models/openai_embeddings.py @@ -169,6 +169,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://>> tts.run("Hello world") @@ -57,6 +62,7 @@ class OpenAITTS(AbstractLLM): model_name: str = "tts-1-1106", proxy_url: str = "https://api.openai.com/v1/audio/speech", openai_api_key: str = openai_api_key_env, + openai_org_id: str = openai_org_id_env, voice: str = "onyx", chunk_size=1024 * 1024, autosave: bool = False, @@ -68,6 +74,7 @@ class OpenAITTS(AbstractLLM): self.model_name = model_name self.proxy_url = proxy_url self.openai_api_key = openai_api_key + self.openai_org_id = openai_org_id self.voice = voice self.chunk_size = chunk_size self.autosave = autosave diff --git a/swarms/structs/SWARMS.md b/swarms/structs/SWARMS.md index 1a417831..1ff21a71 100644 --- a/swarms/structs/SWARMS.md +++ b/swarms/structs/SWARMS.md @@ -292,7 +292,7 @@ class Orchestrator: self.condition.notify() def embed(self, input): - openai = embedding_functions.OpenAIEmbeddingFunction(api_key=self.api_key, model_name=self.model_name) + openai = embedding_functions.OpenAIEmbeddingFunction(api_key=self.api_key, org_id=self.org_id, model_name=self.model_name) embedding = openai(input) return embedding diff --git a/swarms/structs/autoscaler.py b/swarms/structs/autoscaler.py index f26247d5..8f2b6d14 100644 --- a/swarms/structs/autoscaler.py +++ b/swarms/structs/autoscaler.py @@ -79,6 +79,7 @@ class AutoScaler(BaseStructure): >>> llm = OpenAIChat( ... temperature=0.5, ... openai_api_key=api_key, + ... openai_org_id=org_id, ... ) >>> ## Initialize the workflow >>> agent = Agent(llm=llm, max_loops=1, dashboard=True) diff --git a/swarms/structs/base_workflow.py b/swarms/structs/base_workflow.py index 03e503cc..5d85c1a8 100644 --- a/swarms/structs/base_workflow.py +++ b/swarms/structs/base_workflow.py @@ -138,7 +138,7 @@ class BaseWorkflow(BaseStructure): Examples: >>> from swarms.models import OpenAIChat >>> from swarms.structs import SequentialWorkflow - >>> llm = OpenAIChat(openai_api_key="") + >>> llm = OpenAIChat(openai_api_key="",openai_org_id="") >>> workflow = SequentialWorkflow(max_loops=1) >>> workflow.add("What's the weather in miami", llm) >>> workflow.add("Create a report on these metrics", llm) @@ -176,7 +176,7 @@ class BaseWorkflow(BaseStructure): Examples: >>> from swarms.models import OpenAIChat >>> from swarms.structs import SequentialWorkflow - >>> llm = OpenAIChat(openai_api_key="") + >>> llm = OpenAIChat(openai_api_key="", openai_org_id="") >>> workflow = SequentialWorkflow(max_loops=1) >>> workflow.add("What's the weather in miami", llm) >>> workflow.add("Create a report on these metrics", llm) @@ -215,7 +215,7 @@ class BaseWorkflow(BaseStructure): Examples: >>> from swarms.models import OpenAIChat >>> from swarms.structs import SequentialWorkflow - >>> llm = OpenAIChat(openai_api_key="") + >>> llm = OpenAIChat(openai_api_key="", openai_org_id="") >>> workflow = SequentialWorkflow(max_loops=1) >>> workflow.add("What's the weather in miami", llm) >>> workflow.add("Create a report on these metrics", llm) @@ -287,7 +287,7 @@ class BaseWorkflow(BaseStructure): Examples: >>> from swarms.models import OpenAIChat >>> from swarms.structs import SequentialWorkflow - >>> llm = OpenAIChat(openai_api_key="") + >>> llm = OpenAIChat(openai_api_key="", openai_org_id="") >>> workflow = SequentialWorkflow(max_loops=1) >>> workflow.add("What's the weather in miami", llm) >>> workflow.add("Create a report on these metrics", llm) diff --git a/swarms/structs/concurrent_workflow.py b/swarms/structs/concurrent_workflow.py index 8aa5399b..af608ba5 100644 --- a/swarms/structs/concurrent_workflow.py +++ b/swarms/structs/concurrent_workflow.py @@ -24,7 +24,7 @@ class ConcurrentWorkflow(BaseStructure): Examples: >>> from swarms.models import OpenAIChat >>> from swarms.structs import ConcurrentWorkflow - >>> llm = OpenAIChat(openai_api_key="") + >>> llm = OpenAIChat(openai_api_key="", openai_org_id="") >>> workflow = ConcurrentWorkflow(max_workers=5) >>> workflow.add("What's the weather in miami", llm) >>> workflow.add("Create a report on these metrics", llm) diff --git a/swarms/structs/debate.py b/swarms/structs/debate.py index 9db84f06..0dedb059 100644 --- a/swarms/structs/debate.py +++ b/swarms/structs/debate.py @@ -21,6 +21,7 @@ class DebatePlayer(Agent): name (str): name of this player temperature (float): higher values make the output more random, while lower values make it more focused and deterministic openai_api_key (str): As the parameter name suggests + openai_org_id (str): As the parameter name suggests sleep_time (float): sleep because of rate limits """ super(DebatePlayer, self).__init__( @@ -37,6 +38,7 @@ class Debate: num_players (int): num of players save_file_dir (str): dir path to json file openai_api_key (str): As the parameter name suggests + openai_org_id (str): As the parameter name suggests prompts_path (str): prompts path (json file) max_round (int): maximum Rounds of Debate sleep_time (float): sleep because of rate limits @@ -277,6 +279,7 @@ class Debate: name="Judge", temperature=self.temperature, openai_api_key=self.openai_api_key, + openai_org_id=self.openai_org_id, sleep_time=self.sleep_time, ) aff_ans = self.affirmative.memory_lst[2]["content"] diff --git a/swarms/structs/model_parallizer.py b/swarms/structs/model_parallizer.py index 828d4ef4..c8219b9a 100644 --- a/swarms/structs/model_parallizer.py +++ b/swarms/structs/model_parallizer.py @@ -36,10 +36,12 @@ class ModelParallelizer: ... OpenAIChat( ... temperature=0.5, ... openai_api_key="OPENAI_API_KEY", + ... openai_org_id="OPENAI_ORG_ID", ... ), ... OpenAIChat( ... temperature=0.5, ... openai_api_key="OPENAI_API_KEY", + ... openai_org_id="OPENAI_ORG_ID", ... ), ... ] >>> mp = ModelParallelizer(llms) diff --git a/swarms/structs/multi_agent_collab.py b/swarms/structs/multi_agent_collab.py index 64b030d0..c59885ab 100644 --- a/swarms/structs/multi_agent_collab.py +++ b/swarms/structs/multi_agent_collab.py @@ -61,6 +61,8 @@ class MultiAgentCollaboration: >>> >>> # Initialize the language model >>> llm = OpenAIChat( + >>> openai_api_key="OPENAI_API_KEY", + >>> openai_org_id="OPENAI_ORG_ID", >>> temperature=0.5, >>> ) >>> diff --git a/swarms/structs/nonlinear_workflow.py b/swarms/structs/nonlinear_workflow.py index 0fc1d200..d08643a4 100644 --- a/swarms/structs/nonlinear_workflow.py +++ b/swarms/structs/nonlinear_workflow.py @@ -18,7 +18,7 @@ class NonlinearWorkflow(BaseStructure): Examples: >>> from swarms.models import OpenAIChat >>> from swarms.structs import NonlinearWorkflow, Task - >>> llm = OpenAIChat(openai_api_key="") + >>> llm = OpenAIChat(openai_api_key="", openai_org_id="") >>> task = Task(llm, "What's the weather in miami") >>> workflow = NonlinearWorkflow() >>> workflow.add(task) diff --git a/swarms/structs/recursive_workflow.py b/swarms/structs/recursive_workflow.py index afeb91b7..cbf8d0b8 100644 --- a/swarms/structs/recursive_workflow.py +++ b/swarms/structs/recursive_workflow.py @@ -24,7 +24,7 @@ class RecursiveWorkflow(BaseStructure): Examples: >>> from swarms.models import OpenAIChat >>> from swarms.structs import RecursiveWorkflow, Task - >>> llm = OpenAIChat(openai_api_key="") + >>> llm = OpenAIChat(openai_api_key="", openai_org_id="") >>> task = Task(llm, "What's the weather in miami") >>> workflow = RecursiveWorkflow() >>> workflow.add(task) diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py index 6b1d7c06..0a4f21ec 100644 --- a/swarms/structs/sequential_workflow.py +++ b/swarms/structs/sequential_workflow.py @@ -28,7 +28,7 @@ class SequentialWorkflow: Examples: >>> from swarms.models import OpenAIChat >>> from swarms.structs import SequentialWorkflow - >>> llm = OpenAIChat(openai_api_key="") + >>> llm = OpenAIChat(openai_api_key="", openai_org_id="") >>> workflow = SequentialWorkflow(max_loops=1) >>> workflow.add("What's the weather in miami", llm) >>> workflow.add("Create a report on these metrics", llm) @@ -158,7 +158,7 @@ class SequentialWorkflow: Examples: >>> from swarms.models import OpenAIChat >>> from swarms.structs import SequentialWorkflow - >>> llm = OpenAIChat(openai_api_key="") + >>> llm = OpenAIChat(openai_api_key="", openai_org_id="") >>> workflow = SequentialWorkflow(max_loops=1) >>> workflow.add("What's the weather in miami", llm) >>> workflow.add("Create a report on these metrics", llm) @@ -220,7 +220,7 @@ class SequentialWorkflow: Examples: >>> from swarms.models import OpenAIChat >>> from swarms.structs import SequentialWorkflow - >>> llm = OpenAIChat(openai_api_key="") + >>> llm = OpenAIChat(openai_api_key="", openai_org_id="") >>> workflow = SequentialWorkflow(max_loops=1) >>> workflow.add("What's the weather in miami", llm) >>> workflow.add("Create a report on these metrics", llm) @@ -271,7 +271,7 @@ class SequentialWorkflow: Examples: >>> from swarms.models import OpenAIChat >>> from swarms.structs import SequentialWorkflow - >>> llm = OpenAIChat(openai_api_key="") + >>> llm = OpenAIChat(openai_api_key="", openai_org_id="") >>> workflow = SequentialWorkflow(max_loops=1) >>> workflow.add("What's the weather in miami", llm) >>> workflow.add("Create a report on these metrics", llm) diff --git a/swarms/structs/task.py b/swarms/structs/task.py index fb89b7bf..7b30e2bd 100644 --- a/swarms/structs/task.py +++ b/swarms/structs/task.py @@ -50,7 +50,7 @@ class Task: Examples: >>> from swarms.structs import Task, Agent >>> from swarms.models import OpenAIChat - >>> agent = Agent(llm=OpenAIChat(openai_api_key=""), max_loops=1, dashboard=False) + >>> agent = Agent(llm=OpenAIChat(openai_api_key="", openai_org_id=""), max_loops=1, dashboard=False) >>> task = Task(description="What's the weather in miami", agent=agent) >>> task.execute() >>> task.result @@ -81,7 +81,7 @@ class Task: Examples: >>> from swarms.structs import Task, Agent >>> from swarms.models import OpenAIChat - >>> agent = Agent(llm=OpenAIChat(openai_api_key=""), max_loops=1, dashboard=False) + >>> agent = Agent(llm=OpenAIChat(openai_api_key="", openai_org_id=""), max_loops=1, dashboard=False) >>> task = Task(description="What's the weather in miami", agent=agent) >>> task.execute() >>> task.result diff --git a/tests/memory/test_weaviate.py b/tests/memory/test_weaviate.py index f9e61c8f..187cb6f3 100644 --- a/tests/memory/test_weaviate.py +++ b/tests/memory/test_weaviate.py @@ -15,7 +15,9 @@ def weaviate_client_mock(): grpc_secure=False, auth_client_secret="mock_api_key", additional_headers={ - "X-OpenAI-Api-Key": "mock_openai_api_key" + "X-OpenAI-Api-Key": "mock_openai_api_key", + "X-OpenAI-Organization": "mock_openai_org_id", + }, additional_config=Mock(), ) diff --git a/tests/models/test_gpt4_vision_api.py b/tests/models/test_gpt4_vision_api.py index 26f60960..cd1ff660 100644 --- a/tests/models/test_gpt4_vision_api.py +++ b/tests/models/test_gpt4_vision_api.py @@ -11,16 +11,19 @@ from swarms.models.gpt4_vision_api import GPT4VisionAPI load_dotenv() custom_api_key = os.environ.get("OPENAI_API_KEY") +custom_org_id = os.environ.get("OPENAI_ORG_ID") + img = "images/swarms.jpeg" @pytest.fixture def vision_api(): - return GPT4VisionAPI(openai_api_key="test_api_key") + return GPT4VisionAPI(openai_api_key="test_api_key", openai_org_id="test_org_id") def test_init(vision_api): assert vision_api.openai_api_key == "test_api_key" + assert vision_api.openai_org_id == "test_org_id" def test_encode_image(vision_api): @@ -83,12 +86,14 @@ def gpt_api(): def test_initialization_with_default_key(): api = GPT4VisionAPI() assert api.openai_api_key == custom_api_key + assert api.openai_org_id == custom_org_id def test_initialization_with_custom_key(): custom_key = custom_api_key api = GPT4VisionAPI(openai_api_key=custom_key) assert api.openai_api_key == custom_key + assert api.openai_org_id == custom_org_id def test_run_with_exception(gpt_api): diff --git a/tests/models/test_openaitts.py b/tests/models/test_openaitts.py index b6a4a7ff..c939e0af 100644 --- a/tests/models/test_openaitts.py +++ b/tests/models/test_openaitts.py @@ -23,6 +23,7 @@ def test_openaitts_initialization_custom_parameters(): assert tts.model_name == "custom_model" assert tts.proxy_url == "custom_url" assert tts.openai_api_key == "custom_key" + assert tts.openai_org_id == "custom_org_id" assert tts.voice == "custom_voice" assert tts.chunk_size == 2048 @@ -37,7 +38,9 @@ def test_run(mock_post): assert audio == b"chunk1chunk2" mock_post.assert_called_once_with( "https://api.openai.com/v1/audio/speech", - headers={"Authorization": f"Bearer {tts.openai_api_key}"}, + headers={"Authorization": f"Bearer {tts.openai_api_key}", + "X-OpenAI-Organization": f"{tts.openai_org_id}" + }, json={ "model": "tts-1-1106", "input": "Hello world", @@ -77,7 +80,9 @@ def test_run_custom_model(mock_post): assert audio == b"chunk1chunk2" mock_post.assert_called_once_with( "https://api.openai.com/v1/audio/speech", - headers={"Authorization": f"Bearer {tts.openai_api_key}"}, + headers={"Authorization": f"Bearer {tts.openai_api_key}", + "X-OpenAI-Organization": f"{tts.openai_org_id}" + }, json={ "model": "custom_model", "input": "Hello world", @@ -96,7 +101,9 @@ def test_run_custom_voice(mock_post): assert audio == b"chunk1chunk2" mock_post.assert_called_once_with( "https://api.openai.com/v1/audio/speech", - headers={"Authorization": f"Bearer {tts.openai_api_key}"}, + headers={"Authorization": f"Bearer {tts.openai_api_key}", + "X-OpenAI-Organization": f"{tts.openai_org_id}" + }, json={ "model": "tts-1-1106", "input": "Hello world", diff --git a/tests/structs/test_agent.py b/tests/structs/test_agent.py index 8e5b11be..19a60fb1 100644 --- a/tests/structs/test_agent.py +++ b/tests/structs/test_agent.py @@ -13,6 +13,7 @@ from swarms.utils.logger import logger load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") +openai_org_id = os.getenv("OPENAI_ORG_ID") # Mocks and Fixtures @@ -20,6 +21,7 @@ openai_api_key = os.getenv("OPENAI_API_KEY") def mocked_llm(): return OpenAIChat( openai_api_key=openai_api_key, + openai_org_id=openai_org_id, ) @@ -214,7 +216,8 @@ def test_from_llm_and_template(mocked_llm): # Mocking the OpenAIChat for testing @patch("swarms.models.OpenAIChat", autospec=True) def test_mocked_openai_chat(MockedOpenAIChat): - llm = MockedOpenAIChat(openai_api_key=openai_api_key) + llm = MockedOpenAIChat(openai_api_key=openai_api_key, + openai_org_id=openai_org_id) llm.return_value = MagicMock() agent = Agent(llm=llm, max_loops=5) agent.run("Mocked run") @@ -291,6 +294,7 @@ def flow_instance(): # You may need to adjust this based on your actual class initialization llm = OpenAIChat( openai_api_key=openai_api_key, + openai_org_id=openai_org_id, ) agent = Agent( llm=llm, diff --git a/tests/structs/test_autoscaler.py b/tests/structs/test_autoscaler.py index 313d911d..58f79336 100644 --- a/tests/structs/test_autoscaler.py +++ b/tests/structs/test_autoscaler.py @@ -12,9 +12,11 @@ from swarms.structs.autoscaler import AutoScaler load_dotenv() api_key = os.environ.get("OPENAI_API_KEY") +org_id = os.environ.get("OPENAI_ORG_ID") llm = OpenAIChat( temperature=0.5, openai_api_key=api_key, + openai_org_id=org_id, ) agent = Agent(llm=llm, max_loops=1) diff --git a/tests/structs/test_base_workflow.py b/tests/structs/test_base_workflow.py index 17be5ea8..86bda00a 100644 --- a/tests/structs/test_base_workflow.py +++ b/tests/structs/test_base_workflow.py @@ -9,10 +9,10 @@ from dotenv import load_dotenv load_dotenv() api_key = os.environ.get("OPENAI_API_KEY") - +org_id = os.environ.get("OPENAI_ORG_ID") def setup_workflow(): - llm = OpenAIChat(openai_api_key=api_key) + llm = OpenAIChat(openai_api_key=api_key, openai_org_id=org_id) workflow = BaseWorkflow(max_loops=1) workflow.add("What's the weather in miami", llm) workflow.add("Create a report on these metrics", llm) diff --git a/tests/structs/test_company.py b/tests/structs/test_company.py index 0b1ec105..7f6689ba 100644 --- a/tests/structs/test_company.py +++ b/tests/structs/test_company.py @@ -4,7 +4,9 @@ from swarms.structs.company import Company from swarms import OpenAIChat # Mock OpenAIChat instance -llm = OpenAIChat(openai_api_key="test_key", max_tokens=4000) +llm = OpenAIChat(openai_api_key="test_key", + openai_org_id=org_id, + max_tokens=4000) # Mock Agents ceo = Agent(llm=llm, name="CEO") diff --git a/tests/structs/test_nonlinear_workflow.py b/tests/structs/test_nonlinear_workflow.py index 8919fc76..801a3e67 100644 --- a/tests/structs/test_nonlinear_workflow.py +++ b/tests/structs/test_nonlinear_workflow.py @@ -5,7 +5,7 @@ from swarms.models import OpenAIChat class TestNonlinearWorkflow: def test_add_task(self): - llm = OpenAIChat(openai_api_key="") + llm = OpenAIChat(openai_api_key="", openai_org_id="") task = Task(llm, "What's the weather in miami") workflow = NonlinearWorkflow() workflow.add(task) @@ -18,7 +18,7 @@ class TestNonlinearWorkflow: workflow.run() def test_run_with_single_task(self): - llm = OpenAIChat(openai_api_key="") + llm = OpenAIChat(openai_api_key="", openai_org_id="") task = Task(llm, "What's the weather in miami") workflow = NonlinearWorkflow() workflow.add(task) @@ -26,7 +26,7 @@ class TestNonlinearWorkflow: workflow.run() def test_run_with_circular_dependency(self): - llm = OpenAIChat(openai_api_key="") + llm = OpenAIChat(openai_api_key="", openai_org_id="") task1 = Task(llm, "What's the weather in miami") task2 = Task(llm, "What's the weather in new york") workflow = NonlinearWorkflow() @@ -38,7 +38,7 @@ class TestNonlinearWorkflow: workflow.run() def test_run_with_stopping_token(self): - llm = OpenAIChat(openai_api_key="") + llm = OpenAIChat(openai_api_key="", openai_org_id="") task1 = Task(llm, "What's the weather in miami") task2 = Task(llm, "What's the weather in new york") workflow = NonlinearWorkflow(stopping_token="stop") diff --git a/tests/structs/test_sequential_workflow.py b/tests/structs/test_sequential_workflow.py index 0d12991a..a5a0d6c5 100644 --- a/tests/structs/test_sequential_workflow.py +++ b/tests/structs/test_sequential_workflow.py @@ -13,6 +13,7 @@ from swarms.structs.sequential_workflow import ( # Mock the OpenAI API key using environment variables os.environ["OPENAI_API_KEY"] = "mocked_api_key" +os.environ["OPENAI_ORG_ID"] = "mocked_org_id" # Mock OpenAIChat class for testing diff --git a/tests/structs/test_team.py b/tests/structs/test_team.py index 44d64e18..64931945 100644 --- a/tests/structs/test_team.py +++ b/tests/structs/test_team.py @@ -9,7 +9,7 @@ from swarms.structs.team import Team class TestTeam(unittest.TestCase): def setUp(self): self.agent = Agent( - llm=OpenAIChat(openai_api_key=""), + llm=OpenAIChat(openai_api_key="", openai_org_id=""), max_loops=1, dashboard=False, )