multi modal auto agent + removed workflow.py

pull/100/head
Kye 1 year ago
parent 6010c9d689
commit c520cda250

@ -114,6 +114,11 @@ nav:
- Worker: - Worker:
- Basic: "examples/worker.md" - Basic: "examples/worker.md"
- StackedWorker: "examples/stacked_worker.md" - StackedWorker: "examples/stacked_worker.md"
- Applications:
- CustomerSupport:
- Overview: "applications/customer_support.md"
- Marketing:
- Overview: "applications/marketing_agencies.md"
- Corporate: - Corporate:
- FAQ: "faq.md" - FAQ: "faq.md"
- Purpose: "purpose.md" - Purpose: "purpose.md"
@ -129,8 +134,3 @@ nav:
- Architecture: "architecture.md" - Architecture: "architecture.md"
- Checklist: "checklist.md" - Checklist: "checklist.md"
- Hiring: "hiring.md" - Hiring: "hiring.md"
- Applications:
- CustomerSupport:
- Overview: "applications/customer_support.md"
- Marketing:
- Overview: "applications/marketing_agencies.md"

@ -0,0 +1,30 @@
from swarms.structs import Flow
from swarms.models import Idefics
# Multi Modality Auto Agent
llm = Idefics(max_length=2000)
task = "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"
## Initialize the workflow
flow = Flow(
llm=llm,
max_loops=2,
dashboard=True,
# stopping_condition=None, # You can define a stopping condition as needed.
# loop_interval=1,
# retry_attempts=3,
# retry_interval=1,
# interactive=False, # Set to 'True' for interactive mode.
# dynamic_temperature=False, # Set to 'True' for dynamic temperature handling.
)
# out = flow.load_state("flow_state.json")
# temp = flow.dynamic_temperature()
# filter = flow.add_response_filter("Trump")
out = flow.run(task)
# out = flow.validate_response(out)
# out = flow.analyze_feedback(out)
# out = flow.print_history_and_memory()
# # out = flow.save_state("flow_state.json")
# print(out)

@ -0,0 +1,5 @@
"""
Base Structure for all Swarm Structures
"""

@ -111,10 +111,10 @@ class Flow:
interactive: bool = False, interactive: bool = False,
dashboard: bool = False, dashboard: bool = False,
name: str = "Flow agent", name: str = "Flow agent",
system_message: str = FLOW_SYSTEM_PROMPT, system_prompt: str = FLOW_SYSTEM_PROMPT,
# tools: List[BaseTool] = None, # tools: List[BaseTool] = None,
dynamic_temperature: bool = False, dynamic_temperature: bool = False,
saved_state: Optional[str] = None, saved_state_path: Optional[str] = "flow_state.json",
autosave: bool = False, autosave: bool = False,
**kwargs: Any, **kwargs: Any,
): ):
@ -133,9 +133,9 @@ class Flow:
self.dashboard = dashboard self.dashboard = dashboard
self.dynamic_temperature = dynamic_temperature self.dynamic_temperature = dynamic_temperature
# self.tools = tools # self.tools = tools
self.system_message = system_message self.system_prompt = system_prompt
self.name = name self.name = name
self.saved_state = saved_state self.saved_state_path = saved_state_path
self.autosave = autosave self.autosave = autosave
self.response_filters = [] self.response_filters = []
@ -206,7 +206,7 @@ class Flow:
Flow Configuration: Flow Configuration:
Name: {self.name} Name: {self.name}
System Prompt: {self.system_message} System Prompt: {self.system_prompt}
Task: {task} Task: {task}
Max Loops: {self.max_loops} Max Loops: {self.max_loops}
Stopping Condition: {self.stopping_condition} Stopping Condition: {self.stopping_condition}
@ -317,8 +317,10 @@ class Flow:
time.sleep(self.loop_interval) time.sleep(self.loop_interval)
self.memory.append(history) self.memory.append(history)
# if self.autosave: if self.autosave:
# self.save_state("flow_state.json") save_path = self.saved_state_path or "flow_state.json"
print(colored(f"Autosaving flow state to {save_path}", "green"))
self.save_state(save_path)
return response # , history return response # , history
@ -422,7 +424,7 @@ class Flow:
Returns: Returns:
str: The agent history prompt str: The agent history prompt
""" """
system_prompt = system_prompt or self.system_message system_prompt = system_prompt or self.system_prompt
agent_history_prompt = f""" agent_history_prompt = f"""
SYSTEM_PROMPT: {system_prompt} SYSTEM_PROMPT: {system_prompt}
@ -736,7 +738,7 @@ class Flow:
""" """
prompt = f""" prompt = f"""
SYSTEM_PROMPT: {self.system_message} SYSTEM_PROMPT: {self.system_prompt}
History: {history} History: {history}
@ -745,6 +747,6 @@ class Flow:
response = self.llm(prompt, **kwargs) response = self.llm(prompt, **kwargs)
return {"role": self.name, "content": response} return {"role": self.name, "content": response}
def update_system_message(self, system_message: str): def update_system_prompt(self, system_prompt: str):
"""Upddate the system message""" """Upddate the system message"""
self.system_message = system_message self.system_prompt = system_prompt

@ -78,8 +78,6 @@ class AbstractSwarm(ABC):
Scale down the number of workers Scale down the number of workers
""" """
# TODO: Pass in abstract LLM class that can utilize Hf or Anthropic models, Move away from OPENAI # TODO: Pass in abstract LLM class that can utilize Hf or Anthropic models, Move away from OPENAI

@ -1,11 +0,0 @@
from swarms.models import OpenAIChat
from swarms.structs import Workflow
llm = OpenAIChat(openai_api_key="")
workflow = Workflow(llm)
workflow.add("What's the weather in miami")
workflow.run()
Loading…
Cancel
Save