parent
							
								
									b3a39e578d
								
							
						
					
					
						commit
						690433d39f
					
				| @ -1,56 +0,0 @@ | |||||||
| from swarms.models import OpenAIChat  # Replace with your actual OpenAIChat import |  | ||||||
| 
 |  | ||||||
| if __name__ == "__main__": |  | ||||||
|     api_key = ""  # Your OpenAI API key here |  | ||||||
|     agent = MultiTempAgent(api_key) |  | ||||||
| 
 |  | ||||||
|     prompt = "Write a blog post about health and wellness" |  | ||||||
|     final_output = agent.run(prompt) |  | ||||||
| 
 |  | ||||||
|     print("Final chosen output:") |  | ||||||
|     print(final_output) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class MultiTempAgent: |  | ||||||
|     def __init__(self, api_key, default_temp=0.5, alt_temps=[0.2, 0.7, 0.9]): |  | ||||||
|         self.api_key = api_key |  | ||||||
|         self.default_temp = default_temp |  | ||||||
|         self.alt_temps = alt_temps |  | ||||||
| 
 |  | ||||||
|     def ask_user_feedback(self, text): |  | ||||||
|         print(f"Generated text: {text}") |  | ||||||
|         feedback = input("Are you satisfied with this output? (yes/no): ") |  | ||||||
|         return feedback.lower() == "yes" |  | ||||||
| 
 |  | ||||||
|     def present_options_to_user(self, outputs): |  | ||||||
|         print("Alternative outputs:") |  | ||||||
|         for temp, output in outputs.items(): |  | ||||||
|             print(f"Temperature {temp}: {output}") |  | ||||||
|         chosen_temp = float(input("Choose the temperature of the output you like: ")) |  | ||||||
|         return outputs.get(chosen_temp, "Invalid temperature chosen.") |  | ||||||
| 
 |  | ||||||
|     def run(self, prompt): |  | ||||||
|         try: |  | ||||||
|             llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) |  | ||||||
|             initial_output = llm(prompt)  # Using llm as a callable |  | ||||||
|         except Exception as e: |  | ||||||
|             print(f"Error generating initial output: {e}") |  | ||||||
|             initial_output = None |  | ||||||
| 
 |  | ||||||
|         user_satisfied = self.ask_user_feedback(initial_output) |  | ||||||
| 
 |  | ||||||
|         if user_satisfied: |  | ||||||
|             return initial_output |  | ||||||
|         else: |  | ||||||
|             outputs = {} |  | ||||||
|             for temp in self.alt_temps: |  | ||||||
|                 try: |  | ||||||
|                     llm = OpenAIChat( |  | ||||||
|                         openai_api_key=self.api_key, temperature=temp |  | ||||||
|                     )  # Re-initializing |  | ||||||
|                     outputs[temp] = llm(prompt)  # Using llm as a callable |  | ||||||
|                 except Exception as e: |  | ||||||
|                     print(f"Error generating text at temperature {temp}: {e}") |  | ||||||
|                     outputs[temp] = None |  | ||||||
|             chosen_output = self.present_options_to_user(outputs) |  | ||||||
|             return chosen_output |  | ||||||
| @ -1,6 +1,6 @@ | |||||||
| from swarms.models.openai_models import OpenAIChat | from swarms.models.openai_models import OpenAIChat | ||||||
| 
 | 
 | ||||||
| openai = OpenAIChat(openai_api_key="", verbose=False) | openai = OpenAIChat(openai_api_key="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", verbose=False) | ||||||
| 
 | 
 | ||||||
| chat = openai("Are quantum fields everywhere?") | chat = openai("What are quantum fields?") | ||||||
| print(chat) | print(chat) | ||||||
|  | |||||||
| @ -0,0 +1,35 @@ | |||||||
|  | from swarms.models import OpenAIChat | ||||||
|  | from swarms.structs import Flow | ||||||
|  | 
 | ||||||
|  | api_key = "" | ||||||
|  | 
 | ||||||
|  | # Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC | ||||||
|  | llm = OpenAIChat( | ||||||
|  |     # model_name="gpt-4" | ||||||
|  |     openai_api_key=api_key, | ||||||
|  |     temperature=0.5, | ||||||
|  |     # max_tokens=100, | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | ## Initialize the workflow | ||||||
|  | flow = Flow( | ||||||
|  |     llm=llm, | ||||||
|  |     max_loops=2, | ||||||
|  |     dashboard=True, | ||||||
|  |     # stopping_condition=None,  # You can define a stopping condition as needed. | ||||||
|  |     # loop_interval=1, | ||||||
|  |     # retry_attempts=3, | ||||||
|  |     # retry_interval=1, | ||||||
|  |     # interactive=False,  # Set to 'True' for interactive mode. | ||||||
|  |     # dynamic_temperature=False,  # Set to 'True' for dynamic temperature handling. | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | # out = flow.load_state("flow_state.json") | ||||||
|  | # temp = flow.dynamic_temperature() | ||||||
|  | # filter = flow.add_response_filter("Trump") | ||||||
|  | out = flow.run("Generate a 10,000 word blog on health and wellness.") | ||||||
|  | # out = flow.validate_response(out) | ||||||
|  | # out = flow.analyze_feedback(out) | ||||||
|  | # out = flow.print_history_and_memory() | ||||||
|  | # # out = flow.save_state("flow_state.json") | ||||||
|  | # print(out) | ||||||
| @ -0,0 +1,31 @@ | |||||||
|  | from swarms.models import OpenAIChat | ||||||
|  | from swarms.structs import Flow | ||||||
|  | from swarms.structs.sequential_workflow import SequentialWorkflow | ||||||
|  | 
 | ||||||
|  | # Example usage | ||||||
|  | llm = OpenAIChat( | ||||||
|  |     temperature=0.5, | ||||||
|  |     max_tokens=3000, | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | # Initialize the Flow with the language flow | ||||||
|  | flow1 = Flow(llm=llm, max_loops=1, dashboard=False) | ||||||
|  | 
 | ||||||
|  | # Create another Flow for a different task | ||||||
|  | flow2 = Flow(llm=llm, max_loops=1, dashboard=False) | ||||||
|  | 
 | ||||||
|  | # Create the workflow | ||||||
|  | workflow = SequentialWorkflow(max_loops=1) | ||||||
|  | 
 | ||||||
|  | # Add tasks to the workflow | ||||||
|  | workflow.add("Generate a 10,000 word blog on health and wellness.", flow1) | ||||||
|  | 
 | ||||||
|  | # Suppose the next task takes the output of the first task as input | ||||||
|  | workflow.add("Summarize the generated blog", flow2) | ||||||
|  | 
 | ||||||
|  | # Run the workflow | ||||||
|  | workflow.run() | ||||||
|  | 
 | ||||||
|  | # Output the results | ||||||
|  | for task in workflow.tasks: | ||||||
|  |     print(f"Task: {task.description}, Result: {task.result}") | ||||||
| @ -1,39 +1,16 @@ | |||||||
|  | from swarms.swarms import GodMode | ||||||
| from swarms.models import OpenAIChat | from swarms.models import OpenAIChat | ||||||
| 
 | 
 | ||||||
| from swarms.swarms import GodMode | api_key = "" | ||||||
| from swarms.workers.worker import Worker | 
 | ||||||
|  | llm = OpenAIChat(openai_api_key=api_key) | ||||||
| 
 | 
 | ||||||
| llm = OpenAIChat(model_name="gpt-4", openai_api_key="api-key", temperature=0.5) |  | ||||||
| 
 | 
 | ||||||
| worker1 = Worker( | llms = [llm, llm, llm] | ||||||
|     llm=llm, |  | ||||||
|     ai_name="Bumble Bee", |  | ||||||
|     ai_role="Worker in a swarm", |  | ||||||
|     external_tools=None, |  | ||||||
|     human_in_the_loop=False, |  | ||||||
|     temperature=0.5, |  | ||||||
| ) |  | ||||||
| worker2 = Worker( |  | ||||||
|     llm=llm, |  | ||||||
|     ai_name="Optimus Prime", |  | ||||||
|     ai_role="Worker in a swarm", |  | ||||||
|     external_tools=None, |  | ||||||
|     human_in_the_loop=False, |  | ||||||
|     temperature=0.5, |  | ||||||
| ) |  | ||||||
| worker3 = Worker( |  | ||||||
|     llm=llm, |  | ||||||
|     ai_name="Megatron", |  | ||||||
|     ai_role="Worker in a swarm", |  | ||||||
|     external_tools=None, |  | ||||||
|     human_in_the_loop=False, |  | ||||||
|     temperature=0.5, |  | ||||||
| ) |  | ||||||
| # Usage |  | ||||||
| agents = [worker1, worker2, worker3] |  | ||||||
| 
 | 
 | ||||||
| god_mode = GodMode(agents) | god_mode = GodMode(llms) | ||||||
| 
 | 
 | ||||||
| task = "What are the biggest risks facing humanity?" | task = "Generate a 10,000 word blog on health and wellness." | ||||||
| 
 | 
 | ||||||
|  | out = god_mode.run(task) | ||||||
| god_mode.print_responses(task) | god_mode.print_responses(task) | ||||||
|  | |||||||
| @ -1,61 +1,49 @@ | |||||||
| from swarms.models import OpenAIChat | from swarms import OpenAI, Flow | ||||||
| from swarms.swarms import GroupChat, GroupChatManager | from swarms.swarms.groupchat import GroupChatManager, GroupChat | ||||||
| from swarms.workers import Worker |  | ||||||
| 
 | 
 | ||||||
| llm = OpenAIChat(model_name="gpt-4", openai_api_key="api-key", temperature=0.5) |  | ||||||
| 
 | 
 | ||||||
| node = Worker( | api_key = "" | ||||||
|     llm=llm, | 
 | ||||||
|     ai_name="Optimus Prime", | llm = OpenAI( | ||||||
|     ai_role="Worker in a swarm", |     openai_api_key=api_key, | ||||||
|     external_tools=None, |  | ||||||
|     human_in_the_loop=False, |  | ||||||
|     temperature=0.5, |     temperature=0.5, | ||||||
|  |     max_tokens=3000, | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| node2 = Worker( | # Initialize the flow | ||||||
|  | flow1 = Flow( | ||||||
|     llm=llm, |     llm=llm, | ||||||
|     ai_name="Optimus Prime", |     max_loops=1, | ||||||
|     ai_role="Worker in a swarm", |     system_message="YOU ARE SILLY, YOU OFFER NOTHING OF VALUE", | ||||||
|     external_tools=None, |     name="silly", | ||||||
|     human_in_the_loop=False, |     dashboard=True, | ||||||
|     temperature=0.5, |  | ||||||
| ) | ) | ||||||
| 
 | flow2 = Flow( | ||||||
| node3 = Worker( |  | ||||||
|     llm=llm, |     llm=llm, | ||||||
|     ai_name="Optimus Prime", |     max_loops=1, | ||||||
|     ai_role="Worker in a swarm", |     system_message="YOU ARE VERY SMART AND ANSWER RIDDLES", | ||||||
|     external_tools=None, |     name="detective", | ||||||
|     human_in_the_loop=False, |     dashboard=True, | ||||||
|     temperature=0.5, |  | ||||||
| ) | ) | ||||||
| 
 | flow3 = Flow( | ||||||
| nodes = [node, node2, node3] |     llm=llm, | ||||||
| 
 |     max_loops=1, | ||||||
| messages = [ |     system_message="YOU MAKE RIDDLES", | ||||||
|     { |     name="riddler", | ||||||
|         "role": "system", |     dashboard=True, | ||||||
|         "context": "Create an a small feedforward in pytorch", |  | ||||||
|     } |  | ||||||
| ] |  | ||||||
| 
 |  | ||||||
| group = GroupChat( |  | ||||||
|     workers=nodes, |  | ||||||
|     messages=messages, |  | ||||||
|     max_rounds=3, |  | ||||||
| ) | ) | ||||||
| 
 | manager = Flow( | ||||||
| 
 |     llm=llm, | ||||||
| manager = GroupChatManager( |     max_loops=1, | ||||||
|     groupchat=group, |     system_message="YOU ARE A GROUP CHAT MANAGER", | ||||||
|     max_consecutive_auto_reply=3, |     name="manager", | ||||||
|  |     dashboard=True, | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| output = group.run( |  | ||||||
|     messages, |  | ||||||
|     sender=node, |  | ||||||
|     config=group, |  | ||||||
| ) |  | ||||||
| 
 | 
 | ||||||
| print(output) | # Example usage: | ||||||
|  | agents = [flow1, flow2, flow3] | ||||||
|  | 
 | ||||||
|  | group_chat = GroupChat(agents=agents, messages=[], max_round=10) | ||||||
|  | chat_manager = GroupChatManager(groupchat=group_chat, selector=manager) | ||||||
|  | chat_history = chat_manager("Write me a riddle") | ||||||
|  | |||||||
					Loading…
					
					
				
		Reference in new issue