From 336bffea19feff85db6e6b9035d687cdc09a4b0c Mon Sep 17 00:00:00 2001 From: Kye Date: Mon, 6 Nov 2023 17:43:50 -0500 Subject: [PATCH] playground + flow docs fix --- docs/swarms/structs/flow.md | 11 ++- .../agents/simple_agent.py | 0 playground/models/multitemp.py | 56 ------------- playground/models/openai_model.py | 4 +- playground/structs/flow.py | 35 ++++++++ playground/structs/sequential_workflow.py | 31 +++++++ playground/swarms/godmode.py | 39 ++------- playground/swarms/groupchat.py | 84 ++++++++----------- 8 files changed, 120 insertions(+), 140 deletions(-) rename simple_agent.py => playground/agents/simple_agent.py (100%) delete mode 100644 playground/models/multitemp.py create mode 100644 playground/structs/sequential_workflow.py diff --git a/docs/swarms/structs/flow.md b/docs/swarms/structs/flow.md index 9300c632..13f0541c 100644 --- a/docs/swarms/structs/flow.md +++ b/docs/swarms/structs/flow.md @@ -108,8 +108,13 @@ Here are three usage examples: ```python from swarms.structs import Flow +# Select any Language model from the models folder +from swarms.models import Mistral, OpenAIChat -flow = Flow(llm=my_language_model, max_loops=5) +llm = Mistral() +# llm = OpenAIChat() + +flow = Flow(llm=llm, max_loops=5) # Define a starting task or message initial_task = "Generate an long form analysis on the transformer model architecture." @@ -126,7 +131,7 @@ from swarms.structs import Flow def stop_when_repeats(response: str) -> bool: return "Stop" in response.lower() -flow = Flow(llm=my_language_model, max_loops=5, stopping_condition=stop_when_repeats) +flow = Flow(llm=llm, max_loops=5, stopping_condition=stop_when_repeats) ``` ### Example 3: Interactive Conversation @@ -134,7 +139,7 @@ flow = Flow(llm=my_language_model, max_loops=5, stopping_condition=stop_when_rep ```python from swarms.structs import Flow -flow = Flow(llm=my_language_model, max_loops=5, interactive=True) +flow = Flow(llm=llm, max_loops=5, interactive=True) # Provide initial task initial_task = "Rank and prioritize the following financial documents and cut out 30% of our expenses" diff --git a/simple_agent.py b/playground/agents/simple_agent.py similarity index 100% rename from simple_agent.py rename to playground/agents/simple_agent.py diff --git a/playground/models/multitemp.py b/playground/models/multitemp.py deleted file mode 100644 index f4146390..00000000 --- a/playground/models/multitemp.py +++ /dev/null @@ -1,56 +0,0 @@ -from swarms.models import OpenAIChat # Replace with your actual OpenAIChat import - -if __name__ == "__main__": - api_key = "" # Your OpenAI API key here - agent = MultiTempAgent(api_key) - - prompt = "Write a blog post about health and wellness" - final_output = agent.run(prompt) - - print("Final chosen output:") - print(final_output) - - -class MultiTempAgent: - def __init__(self, api_key, default_temp=0.5, alt_temps=[0.2, 0.7, 0.9]): - self.api_key = api_key - self.default_temp = default_temp - self.alt_temps = alt_temps - - def ask_user_feedback(self, text): - print(f"Generated text: {text}") - feedback = input("Are you satisfied with this output? (yes/no): ") - return feedback.lower() == "yes" - - def present_options_to_user(self, outputs): - print("Alternative outputs:") - for temp, output in outputs.items(): - print(f"Temperature {temp}: {output}") - chosen_temp = float(input("Choose the temperature of the output you like: ")) - return outputs.get(chosen_temp, "Invalid temperature chosen.") - - def run(self, prompt): - try: - llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) - initial_output = llm(prompt) # Using llm as a callable - except Exception as e: - print(f"Error generating initial output: {e}") - initial_output = None - - user_satisfied = self.ask_user_feedback(initial_output) - - if user_satisfied: - return initial_output - else: - outputs = {} - for temp in self.alt_temps: - try: - llm = OpenAIChat( - openai_api_key=self.api_key, temperature=temp - ) # Re-initializing - outputs[temp] = llm(prompt) # Using llm as a callable - except Exception as e: - print(f"Error generating text at temperature {temp}: {e}") - outputs[temp] = None - chosen_output = self.present_options_to_user(outputs) - return chosen_output diff --git a/playground/models/openai_model.py b/playground/models/openai_model.py index eccbb8cc..e3b01715 100644 --- a/playground/models/openai_model.py +++ b/playground/models/openai_model.py @@ -1,6 +1,6 @@ from swarms.models.openai_models import OpenAIChat -openai = OpenAIChat(openai_api_key="", verbose=False) +openai = OpenAIChat(openai_api_key="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", verbose=False) -chat = openai("Are quantum fields everywhere?") +chat = openai("What are quantum fields?") print(chat) diff --git a/playground/structs/flow.py b/playground/structs/flow.py index e69de29b..8e34cce3 100644 --- a/playground/structs/flow.py +++ b/playground/structs/flow.py @@ -0,0 +1,35 @@ +from swarms.models import OpenAIChat +from swarms.structs import Flow + +api_key = "" + +# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC +llm = OpenAIChat( + # model_name="gpt-4" + openai_api_key=api_key, + temperature=0.5, + # max_tokens=100, +) + +## Initialize the workflow +flow = Flow( + llm=llm, + max_loops=2, + dashboard=True, + # stopping_condition=None, # You can define a stopping condition as needed. + # loop_interval=1, + # retry_attempts=3, + # retry_interval=1, + # interactive=False, # Set to 'True' for interactive mode. + # dynamic_temperature=False, # Set to 'True' for dynamic temperature handling. +) + +# out = flow.load_state("flow_state.json") +# temp = flow.dynamic_temperature() +# filter = flow.add_response_filter("Trump") +out = flow.run("Generate a 10,000 word blog on health and wellness.") +# out = flow.validate_response(out) +# out = flow.analyze_feedback(out) +# out = flow.print_history_and_memory() +# # out = flow.save_state("flow_state.json") +# print(out) diff --git a/playground/structs/sequential_workflow.py b/playground/structs/sequential_workflow.py new file mode 100644 index 00000000..b8e5a10b --- /dev/null +++ b/playground/structs/sequential_workflow.py @@ -0,0 +1,31 @@ +from swarms.models import OpenAIChat +from swarms.structs import Flow +from swarms.structs.sequential_workflow import SequentialWorkflow + +# Example usage +llm = OpenAIChat( + temperature=0.5, + max_tokens=3000, +) + +# Initialize the Flow with the language flow +flow1 = Flow(llm=llm, max_loops=1, dashboard=False) + +# Create another Flow for a different task +flow2 = Flow(llm=llm, max_loops=1, dashboard=False) + +# Create the workflow +workflow = SequentialWorkflow(max_loops=1) + +# Add tasks to the workflow +workflow.add("Generate a 10,000 word blog on health and wellness.", flow1) + +# Suppose the next task takes the output of the first task as input +workflow.add("Summarize the generated blog", flow2) + +# Run the workflow +workflow.run() + +# Output the results +for task in workflow.tasks: + print(f"Task: {task.description}, Result: {task.result}") diff --git a/playground/swarms/godmode.py b/playground/swarms/godmode.py index 66aec1fa..f1269d98 100644 --- a/playground/swarms/godmode.py +++ b/playground/swarms/godmode.py @@ -1,39 +1,16 @@ +from swarms.swarms import GodMode from swarms.models import OpenAIChat -from swarms.swarms import GodMode -from swarms.workers.worker import Worker +api_key = "" + +llm = OpenAIChat(openai_api_key=api_key) -llm = OpenAIChat(model_name="gpt-4", openai_api_key="api-key", temperature=0.5) -worker1 = Worker( - llm=llm, - ai_name="Bumble Bee", - ai_role="Worker in a swarm", - external_tools=None, - human_in_the_loop=False, - temperature=0.5, -) -worker2 = Worker( - llm=llm, - ai_name="Optimus Prime", - ai_role="Worker in a swarm", - external_tools=None, - human_in_the_loop=False, - temperature=0.5, -) -worker3 = Worker( - llm=llm, - ai_name="Megatron", - ai_role="Worker in a swarm", - external_tools=None, - human_in_the_loop=False, - temperature=0.5, -) -# Usage -agents = [worker1, worker2, worker3] +llms = [llm, llm, llm] -god_mode = GodMode(agents) +god_mode = GodMode(llms) -task = "What are the biggest risks facing humanity?" +task = "Generate a 10,000 word blog on health and wellness." +out = god_mode.run(task) god_mode.print_responses(task) diff --git a/playground/swarms/groupchat.py b/playground/swarms/groupchat.py index a5e8dd0d..739181d1 100644 --- a/playground/swarms/groupchat.py +++ b/playground/swarms/groupchat.py @@ -1,61 +1,49 @@ -from swarms.models import OpenAIChat -from swarms.swarms import GroupChat, GroupChatManager -from swarms.workers import Worker +from swarms import OpenAI, Flow +from swarms.swarms.groupchat import GroupChatManager, GroupChat -llm = OpenAIChat(model_name="gpt-4", openai_api_key="api-key", temperature=0.5) -node = Worker( - llm=llm, - ai_name="Optimus Prime", - ai_role="Worker in a swarm", - external_tools=None, - human_in_the_loop=False, +api_key = "" + +llm = OpenAI( + openai_api_key=api_key, temperature=0.5, + max_tokens=3000, ) -node2 = Worker( +# Initialize the flow +flow1 = Flow( llm=llm, - ai_name="Optimus Prime", - ai_role="Worker in a swarm", - external_tools=None, - human_in_the_loop=False, - temperature=0.5, + max_loops=1, + system_message="YOU ARE SILLY, YOU OFFER NOTHING OF VALUE", + name="silly", + dashboard=True, ) - -node3 = Worker( +flow2 = Flow( llm=llm, - ai_name="Optimus Prime", - ai_role="Worker in a swarm", - external_tools=None, - human_in_the_loop=False, - temperature=0.5, + max_loops=1, + system_message="YOU ARE VERY SMART AND ANSWER RIDDLES", + name="detective", + dashboard=True, ) - -nodes = [node, node2, node3] - -messages = [ - { - "role": "system", - "context": "Create an a small feedforward in pytorch", - } -] - -group = GroupChat( - workers=nodes, - messages=messages, - max_rounds=3, +flow3 = Flow( + llm=llm, + max_loops=1, + system_message="YOU MAKE RIDDLES", + name="riddler", + dashboard=True, ) - - -manager = GroupChatManager( - groupchat=group, - max_consecutive_auto_reply=3, +manager = Flow( + llm=llm, + max_loops=1, + system_message="YOU ARE A GROUP CHAT MANAGER", + name="manager", + dashboard=True, ) -output = group.run( - messages, - sender=node, - config=group, -) -print(output) +# Example usage: +agents = [flow1, flow2, flow3] + +group_chat = GroupChat(agents=agents, messages=[], max_round=10) +chat_manager = GroupChatManager(groupchat=group_chat, selector=manager) +chat_history = chat_manager("Write me a riddle")