diff --git a/.github/workflows/run_examples.yml b/.github/workflows/run_examples.yml new file mode 100644 index 00000000..75988193 --- /dev/null +++ b/.github/workflows/run_examples.yml @@ -0,0 +1,34 @@ +name: Run Examples Script + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + schedule: + # Runs at 3:00 AM UTC every day + - cron: '0 3 * * *' + +jobs: + run-examples: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.9' + + - name: Install dependencies + run: | + pip install -r requirements.txt + # Assuming your script might also need pytest and swarms + pip install pytest + pip install swarms + + - name: Make Script Executable and Run + run: | + chmod +x ./swarms/scripts/run_examples.sh + ./swarms/scripts/run_examples.sh diff --git a/playground/examples/README.md b/playground/examples/README.md new file mode 100644 index 00000000..e69de29b diff --git a/playground/examples/Screenshot from 2024-02-20 05-55-34.png b/playground/examples/Screenshot from 2024-02-20 05-55-34.png new file mode 100644 index 00000000..c9f46994 Binary files /dev/null and b/playground/examples/Screenshot from 2024-02-20 05-55-34.png differ diff --git a/playground/examples/example_agent.py b/playground/examples/example_agent.py new file mode 100644 index 00000000..85b3da4b --- /dev/null +++ b/playground/examples/example_agent.py @@ -0,0 +1,29 @@ +import os +import sys + +from dotenv import load_dotenv + +# Import the OpenAIChat model and the Agent struct +from swarms import OpenAIChat, Agent + +# Load the environment variables +load_dotenv() + +# Get the API key from the environment +api_key = os.environ.get("OPENAI_API_KEY") + +# Initialize the language model +llm = OpenAIChat( + temperature=0.5, model_name="gpt-4", openai_api_key=api_key, max_tokens=4000 +) + + +print(f'this is a test msg for stdout and stderr: {sys.stdout}, {sys.stderr}') + +## Initialize the workflow +agent = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True) + +# Run the workflow on a task +out = agent.run("Generate a 10,000 word blog on health and wellness.") + +print(out) diff --git a/playground/examples/example_anthropic.py b/playground/examples/example_anthropic.py new file mode 100644 index 00000000..3414214f --- /dev/null +++ b/playground/examples/example_anthropic.py @@ -0,0 +1,13 @@ +# Import necessary modules and classes +from swarms.models import Anthropic + +# Initialize an instance of the Anthropic class +model = Anthropic(anthropic_api_key="") + +# Using the run method +# completion_1 = model.run("What is the capital of France?") +# print(completion_1) + +# Using the __call__ method +completion_2 = model("How far is the moon from the earth?", stop=["miles", "km"]) +print(completion_2) diff --git a/playground/examples/example_concurrentworkflow.py b/playground/examples/example_concurrentworkflow.py new file mode 100644 index 00000000..cc1e3a2f --- /dev/null +++ b/playground/examples/example_concurrentworkflow.py @@ -0,0 +1,24 @@ +import os +from dotenv import load_dotenv +from swarms import OpenAIChat, Task, ConcurrentWorkflow, Agent + +# Load environment variables from .env file +load_dotenv() + +# Load environment variables +llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY")) +agent = Agent(llm=llm, max_loops=1) + +# Create a workflow +workflow = ConcurrentWorkflow(max_workers=5) + +# Create tasks +task1 = Task(agent, "What's the weather in miami") +task2 = Task(agent, "What's the weather in new york") +task3 = Task(agent, "What's the weather in london") + +# Add tasks to the workflow +workflow.add(tasks=[task1, task2, task3]) + +# Run the workflow +workflow.run() diff --git a/playground/examples/example_dalle3.py b/playground/examples/example_dalle3.py new file mode 100644 index 00000000..1aae1f53 --- /dev/null +++ b/playground/examples/example_dalle3.py @@ -0,0 +1,14 @@ +'''from swarms.models import Dalle3 + +# Create an instance of the Dalle3 class with high quality +dalle3 = Dalle3(quality="high") + +# Define a text prompt +task = "A high-quality image of a sunset" + +# Generate a high-quality image from the text prompt +image_url = dalle3(task) + +# Print the generated image URL +print(image_url) +''' \ No newline at end of file diff --git a/playground/examples/example_gpt4vison.py b/playground/examples/example_gpt4vison.py new file mode 100644 index 00000000..e2c383bc --- /dev/null +++ b/playground/examples/example_gpt4vison.py @@ -0,0 +1,14 @@ +from swarms import GPT4VisionAPI + +# Initialize with default API key and custom max_tokens +api = GPT4VisionAPI(max_tokens=1000) + +# Define the task and image URL +task = "Describe the scene in the image." +img = "/home/kye/.swarms/swarms/examples/Screenshot from 2024-02-20 05-55-34.png" + +# Run the GPT-4 Vision model +response = api.run(task, img) + +# Print the model's response +print(response) diff --git a/playground/examples/example_huggingfacellm.py b/playground/examples/example_huggingfacellm.py new file mode 100644 index 00000000..ca28df58 --- /dev/null +++ b/playground/examples/example_huggingfacellm.py @@ -0,0 +1,29 @@ +from swarms.models import HuggingfaceLLM +import torch + +try: + inference = HuggingfaceLLM( + model_id="gpt2", + quantize=False, + verbose=True, + ) + + device = "cuda" if torch.cuda.is_available() else "cpu" + inference.model.to(device) + + prompt_text = "Create a list of known biggest risks of structural collapse with references" + inputs = inference.tokenizer(prompt_text, return_tensors="pt").to(device) + + generated_ids = inference.model.generate( + **inputs, + max_new_tokens=1000, # Adjust the length of the generation + temperature=0.7, # Adjust creativity + top_k=50, # Limits the vocabulary considered at each step + pad_token_id=inference.tokenizer.eos_token_id, + do_sample=True # Enable sampling to utilize temperature + ) + + generated_text = inference.tokenizer.decode(generated_ids[0], skip_special_tokens=True) + print(generated_text) +except Exception as e: + print(f"An error occurred: {e}") diff --git a/playground/examples/example_idefics.py b/playground/examples/example_idefics.py new file mode 100644 index 00000000..ea36ba77 --- /dev/null +++ b/playground/examples/example_idefics.py @@ -0,0 +1,33 @@ +# Import the idefics model from the swarms.models module +from swarms.models import Idefics + +# Create an instance of the idefics model +model = Idefics() + +# Define user input with an image URL and chat with the model +user_input = ( + "User: What is in this image?" + " https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" +) +response = model.chat(user_input) +print(response) + +# Define another user input with an image URL and chat with the model +user_input = ( + "User: And who is that?" + " https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052" +) +response = model.chat(user_input) +print(response) + +# Set the checkpoint of the model to "new_checkpoint" +model.set_checkpoint("new_checkpoint") + +# Set the device of the model to "cpu" +model.set_device("cpu") + +# Set the maximum length of the chat to 200 +model.set_max_length(200) + +# Clear the chat history of the model +model.clear_chat_history() diff --git a/playground/examples/example_kosmos.py b/playground/examples/example_kosmos.py new file mode 100644 index 00000000..dbfd108f --- /dev/null +++ b/playground/examples/example_kosmos.py @@ -0,0 +1,10 @@ +from swarms import Kosmos + +# Initialize the model +model = Kosmos() + +# Generate +out = model.run("Analyze the reciepts in this image", "docs.jpg") + +# Print the output +print(out) diff --git a/playground/examples/example_logistics.py b/playground/examples/example_logistics.py new file mode 100644 index 00000000..035ca9e5 --- /dev/null +++ b/playground/examples/example_logistics.py @@ -0,0 +1,100 @@ +from swarms.structs import Agent +import os +from dotenv import load_dotenv +from swarms.models import GPT4VisionAPI +from swarms.prompts.logistics import ( + Health_Security_Agent_Prompt, + Quality_Control_Agent_Prompt, + Productivity_Agent_Prompt, + Safety_Agent_Prompt, + Security_Agent_Prompt, + Sustainability_Agent_Prompt, + Efficiency_Agent_Prompt, +) + +# Load ENV +load_dotenv() +api_key = os.getenv("OPENAI_API_KEY") + +# GPT4VisionAPI +llm = GPT4VisionAPI(openai_api_key=api_key) + +# Image for analysis +factory_image = "factory_image1.jpg" + +# Initialize agents with respective prompts +health_security_agent = Agent( + llm=llm, + sop=Health_Security_Agent_Prompt, + max_loops=1, + multi_modal=True, +) + +# Quality control agent +quality_control_agent = Agent( + llm=llm, + sop=Quality_Control_Agent_Prompt, + max_loops=1, + multi_modal=True, +) + + +# Productivity Agent +productivity_agent = Agent( + llm=llm, + sop=Productivity_Agent_Prompt, + max_loops=1, + multi_modal=True, +) + +# Initiailize safety agent +safety_agent = Agent(llm=llm, sop=Safety_Agent_Prompt, max_loops=1, multi_modal=True) + +# Init the security agent +security_agent = Agent( + llm=llm, sop=Security_Agent_Prompt, max_loops=1, multi_modal=True +) + + +# Initialize sustainability agent +sustainability_agent = Agent( + llm=llm, + sop=Sustainability_Agent_Prompt, + max_loops=1, + multi_modal=True, +) + + +# Initialize efficincy agent +efficiency_agent = Agent( + llm=llm, + sop=Efficiency_Agent_Prompt, + max_loops=1, + multi_modal=True, +) + +# Run agents with respective tasks on the same image +health_analysis = health_security_agent.run( + "Analyze the safety of this factory", factory_image +) +quality_analysis = quality_control_agent.run( + "Examine product quality in the factory", factory_image +) +productivity_analysis = productivity_agent.run( + "Evaluate factory productivity", factory_image +) +safety_analysis = safety_agent.run( + "Inspect the factory's adherence to safety standards", + factory_image, +) +security_analysis = security_agent.run( + "Assess the factory's security measures and systems", + factory_image, +) +sustainability_analysis = sustainability_agent.run( + "Examine the factory's sustainability practices", factory_image +) +efficiency_analysis = efficiency_agent.run( + "Analyze the efficiency of the factory's manufacturing process", + factory_image, +) diff --git a/playground/examples/example_mixtral.py b/playground/examples/example_mixtral.py new file mode 100644 index 00000000..e1fddb05 --- /dev/null +++ b/playground/examples/example_mixtral.py @@ -0,0 +1,10 @@ +from swarms.models import Mixtral + +# Initialize the Mixtral model with 4 bit and flash attention! +mixtral = Mixtral(load_in_4bit=True, use_flash_attention_2=True) + +# Generate text for a simple task +generated_text = mixtral.run("Generate a creative story.") + +# Print the generated text +print(generated_text) diff --git a/playground/examples/example_qwenvlmultimodal.py b/playground/examples/example_qwenvlmultimodal.py new file mode 100644 index 00000000..f338a508 --- /dev/null +++ b/playground/examples/example_qwenvlmultimodal.py @@ -0,0 +1,14 @@ +from swarms import QwenVLMultiModal + +# Instantiate the QwenVLMultiModal model +model = QwenVLMultiModal( + model_name="Qwen/Qwen-VL-Chat", + device="cuda", + quantize=True, +) + +# Run the model +response = model("Hello, how are you?", "https://example.com/image.jpg") + +# Print the response +print(response) diff --git a/playground/examples/example_recursiveworkflow.py b/playground/examples/example_recursiveworkflow.py new file mode 100644 index 00000000..9760b606 --- /dev/null +++ b/playground/examples/example_recursiveworkflow.py @@ -0,0 +1,26 @@ +import os +from dotenv import load_dotenv +from swarms import OpenAIChat, Task, RecursiveWorkflow, Agent + +# Load environment variables from .env file +load_dotenv() + +# Load environment variables +llm = OpenAIChat(openai_api_key=os.getenv("OPENAI_API_KEY")) +agent = Agent(llm=llm, max_loops=1) + +# Create a workflow +workflow = RecursiveWorkflow(stop_token="") + +# Create tasks +task1 = Task(agent, "What's the weather in miami") +task2 = Task(agent, "What's the weather in new york") +task3 = Task(agent, "What's the weather in london") + +# Add tasks to the workflow +workflow.add(task1) +workflow.add(task2) +workflow.add(task3) + +# Run the workflow +workflow.run() diff --git a/playground/examples/example_sequentialworkflow.py b/playground/examples/example_sequentialworkflow.py new file mode 100644 index 00000000..efc5890f --- /dev/null +++ b/playground/examples/example_sequentialworkflow.py @@ -0,0 +1,46 @@ +import os +from swarms import OpenAIChat, Agent, SequentialWorkflow +from dotenv import load_dotenv + +load_dotenv() + +# Load the environment variables +api_key = os.getenv("OPENAI_API_KEY") + + +# Initialize the language agent +llm = OpenAIChat( + temperature=0.5, model_name="gpt-4", openai_api_key=api_key, max_tokens=4000 +) + + +# Initialize the agent with the language agent +agent1 = Agent(llm=llm, max_loops=1) + +# Create another agent for a different task +agent2 = Agent(llm=llm, max_loops=1) + +# Create another agent for a different task +agent3 = Agent(llm=llm, max_loops=1) + +# Create the workflow +workflow = SequentialWorkflow(max_loops=1) + +# Add tasks to the workflow +workflow.add( + agent1, + "Generate a 10,000 word blog on health and wellness.", +) + +# Suppose the next task takes the output of the first task as input +workflow.add( + agent2, + "Summarize the generated blog", +) + +# Run the workflow +workflow.run() + +# Output the results +for task in workflow.tasks: + print(f"Task: {task.description}, Result: {task.result}") diff --git a/playground/examples/example_simple_conversation_agent.py b/playground/examples/example_simple_conversation_agent.py new file mode 100644 index 00000000..25c5635e --- /dev/null +++ b/playground/examples/example_simple_conversation_agent.py @@ -0,0 +1,43 @@ +import os + +from dotenv import load_dotenv + +from swarms import ( + OpenAIChat, + Conversation, +) + +conv = Conversation( + time_enabled=True, +) + +# Load the environment variables +load_dotenv() + +# Get the API key from the environment +api_key = os.environ.get("OPENAI_API_KEY") + +# Initialize the language model +llm = OpenAIChat(openai_api_key=api_key, model_name="gpt-4") + + +# Run the language model in a loop +def interactive_conversation(llm): + conv = Conversation() + while True: + user_input = input("User: ") + conv.add("user", user_input) + if user_input.lower() == "quit": + break + task = conv.return_history_as_string() # Get the conversation history + out = llm(task) + conv.add("assistant", out) + print( + f"Assistant: {out}", + ) + conv.display_conversation() + conv.export_conversation("conversation.txt") + + +# Replace with your LLM instance +interactive_conversation(llm) diff --git a/playground/examples/example_swarmnetwork.py b/playground/examples/example_swarmnetwork.py new file mode 100644 index 00000000..f2501c4c --- /dev/null +++ b/playground/examples/example_swarmnetwork.py @@ -0,0 +1,44 @@ +import os + +from dotenv import load_dotenv + +# Import the OpenAIChat model and the Agent struct +from swarms import OpenAIChat, Agent, SwarmNetwork + +# Load the environment variables +load_dotenv() + +# Get the API key from the environment +api_key = os.environ.get("OPENAI_API_KEY") + +# Initialize the language model +llm = OpenAIChat( + temperature=0.5, + openai_api_key=api_key, +) + +## Initialize the workflow +agent = Agent(llm=llm, max_loops=1, agent_name="Social Media Manager") +agent2 = Agent(llm=llm, max_loops=1, agent_name=" Product Manager") +agent3 = Agent(llm=llm, max_loops=1, agent_name="SEO Manager") + + +# Load the swarmnet with the agents +swarmnet = SwarmNetwork( + agents=[agent, agent2, agent3], +) + +# List the agents in the swarm network +out = swarmnet.list_agents() +print(out) + +# Run the workflow on a task +out = swarmnet.run_single_agent( + agent2.id, "Generate a 10,000 word blog on health and wellness." +) +print(out) + + +# Run all the agents in the swarm network on a task +out = swarmnet.run_many_agents("Generate a 10,000 word blog on health and wellness.") +print(out) diff --git a/playground/examples/example_swarms.md b/playground/examples/example_swarms.md new file mode 100644 index 00000000..9b46a70e --- /dev/null +++ b/playground/examples/example_swarms.md @@ -0,0 +1,25 @@ +hey guys, we out here testing out swarms which is a multi-modal agent +framework which potentially makes all the agents work in a single pot +for instance take an empty pot and place all the known agents in that +pot and output a well structured answer out of it + +that's basically it, we belive that a multi-agent framework beats a single +agent framework which is not really rocket science + +ight first we gotta make sure out evn clean, install python3-pip, +this runs on python3.10 + +our current version of swarms==4.1.0 + +make sure you in a virtual env or conda + +just do + $ python3 -m venv ~/.venv + $ source ~/.venv/bin/active + +then boom we in a virtual env LFG + +now for the best we install swarms + + $ pip3 instll --upgrade swamrs==4.1.0 + diff --git a/playground/examples/example_task.py b/playground/examples/example_task.py new file mode 100644 index 00000000..c2ade96a --- /dev/null +++ b/playground/examples/example_task.py @@ -0,0 +1,53 @@ +import os + +from dotenv import load_dotenv + +from swarms.structs import Agent, OpenAIChat, Task + +# Load the environment variables +load_dotenv() + + +# Define a function to be used as the action +def my_action(): + print("Action executed") + + +# Define a function to be used as the condition +def my_condition(): + print("Condition checked") + return True + + +# Create an agent +agent = Agent( + llm=OpenAIChat(openai_api_key=os.environ["OPENAI_API_KEY"]), + max_loops=1, + dashboard=False, +) + +# Create a task +task = Task( + description=( + "Generate a report on the top 3 biggest expenses for small" + " businesses and how businesses can save 20%" + ), + agent=agent, +) + +# Set the action and condition +task.set_action(my_action) +task.set_condition(my_condition) + +# Execute the task +print("Executing task...") +task.run() + +# Check if the task is completed +if task.is_completed(): + print("Task completed") +else: + print("Task not completed") + +# Output the result of the task +print(f"Task result: {task.result}") diff --git a/playground/examples/example_toolagent.py b/playground/examples/example_toolagent.py new file mode 100644 index 00000000..f22ab8b6 --- /dev/null +++ b/playground/examples/example_toolagent.py @@ -0,0 +1,30 @@ +# Import necessary libraries +from transformers import AutoModelForCausalLM, AutoTokenizer +from swarms import ToolAgent + +# Load the pre-trained model and tokenizer +model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-12b") +tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b") + +# Define a JSON schema for person's information +json_schema = { + "type": "object", + "properties": { + "name": {"type": "string"}, + "age": {"type": "number"}, + "is_student": {"type": "boolean"}, + "courses": {"type": "array", "items": {"type": "string"}}, + }, +} + +# Define the task to generate a person's information +task = "Generate a person's information based on the following schema:" + +# Create an instance of the ToolAgent class +agent = ToolAgent(model=model, tokenizer=tokenizer, json_schema=json_schema) + +# Run the agent to generate the person's information +generated_data = agent.run(task) + +# Print the generated data +print(generated_data) diff --git a/playground/examples/example_worker.py b/playground/examples/example_worker.py new file mode 100644 index 00000000..c6a33512 --- /dev/null +++ b/playground/examples/example_worker.py @@ -0,0 +1,33 @@ +# Importing necessary modules +import os +from dotenv import load_dotenv +from swarms import Worker, OpenAIChat, tool + +# Loading environment variables from .env file +load_dotenv() + +# Retrieving the OpenAI API key from environment variables +api_key = os.getenv("OPENAI_API_KEY") + + +# Create a tool +@tool +def search_api(query: str): + pass + + +# Creating a Worker instance +worker = Worker( + name="My Worker", + role="Worker", + human_in_the_loop=False, + tools=[search_api], + temperature=0.5, + llm=OpenAIChat(openai_api_key=api_key), +) + +# Running the worker with a prompt +out = worker.run("Hello, how are you? Create an image of how your are doing!") + +# Printing the output +print(out) diff --git a/playground/examples/example_zeroscopetv.py b/playground/examples/example_zeroscopetv.py new file mode 100644 index 00000000..e4fb8264 --- /dev/null +++ b/playground/examples/example_zeroscopetv.py @@ -0,0 +1,12 @@ +# Import the model +from swarms import ZeroscopeTTV + +# Initialize the model +zeroscope = ZeroscopeTTV() + +# Specify the task +task = "A person is walking on the street." + +# Generate the video! +video_path = zeroscope(task) +print(video_path) diff --git a/scripts/run_examples.sh b/scripts/run_examples.sh new file mode 100644 index 00000000..f7978058 --- /dev/null +++ b/scripts/run_examples.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +# Define a file to keep track of successfully executed scripts +SUCCESS_LOG="successful_runs.log" + +for f in /swarms/playground/examples/example_*.py; do + # Check if the script has been logged as successful + if grep -Fxq "$f" "$SUCCESS_LOG"; then + echo "Skipping ${f} as it ran successfully in a previous run." + else + # Run the script if not previously successful + if /home/kye/miniconda3/envs/swarms/bin/python "$f" 2>>errors.txt; then + echo "(${f}) ran successfully without errors." + # Log the successful script execution + echo "$f" >> "$SUCCESS_LOG" + else + echo "Error encountered in ${f}. Check errors.txt for details." + break + fi + fi + echo "##############################################################################" +done