auto saved + fixed run method of flow

pull/56/head
Kye 1 year ago
parent d4bd4fa4a4
commit ba28f40e57

@ -23,7 +23,7 @@ Distribution Agent:
""" """
from swarms import OpenAIChat from swarms.models import OpenAIChat
from termcolor import colored from termcolor import colored
TOPIC_GENERATOR = f""" TOPIC_GENERATOR = f"""
@ -264,7 +264,7 @@ Denote the social media's by using the social media name in HTML like tags
{{ARTICLE}} {{ARTICLE}}
""" """
llm = OpenAIChat(openai_api_key="") llm = OpenAIChat(openai_api_key="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")
def get_review_prompt(article): def get_review_prompt(article):

@ -1,7 +1,7 @@
from swarms.models import OpenAIChat from swarms.models import OpenAIChat
from swarms.structs import Flow from swarms.structs import Flow
api_key = "" api_key = "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"
# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC # Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC
llm = OpenAIChat( llm = OpenAIChat(
@ -14,7 +14,7 @@ llm = OpenAIChat(
## Initialize the workflow ## Initialize the workflow
flow = Flow( flow = Flow(
llm=llm, llm=llm,
max_loops=1, max_loops=2,
dashboard=True, dashboard=True,
# stopping_condition=None, # You can define a stopping condition as needed. # stopping_condition=None, # You can define a stopping condition as needed.
# loop_interval=1, # loop_interval=1,
@ -31,5 +31,5 @@ out = flow.run("Generate a 10,000 word blog on health and wellness.")
# out = flow.validate_response(out) # out = flow.validate_response(out)
# out = flow.analyze_feedback(out) # out = flow.analyze_feedback(out)
# out = flow.print_history_and_memory() # out = flow.print_history_and_memory()
# out = flow.save_state("flow_state.json") # # out = flow.save_state("flow_state.json")
print(out) # print(out)

@ -1,14 +0,0 @@
{
"memory": [
[
"Human: Generate a 10,000 word blog on health and wellness."
]
],
"llm_params": {},
"loop_interval": 1,
"retry_attempts": 3,
"retry_interval": 1,
"interactive": false,
"dashboard": true,
"dynamic_temperature": false
}

@ -60,7 +60,5 @@ class Fuyu:
for k, v in model_inputs.items(): for k, v in model_inputs.items():
model_inputs[k] = v.to(self.device_map) model_inputs[k] = v.to(self.device_map)
output = self.model.generate( output = self.model.generate(**model_inputs, max_new_tokens=self.max_new_tokens)
**model_inputs, max_new_tokens=self.max_new_tokens
)
text = self.processor.batch_decode(output[:, -7:], skip_special_tokens=True) text = self.processor.batch_decode(output[:, -7:], skip_special_tokens=True)

@ -105,6 +105,8 @@ class Flow:
system_message: str = FLOW_SYSTEM_PROMPT, system_message: str = FLOW_SYSTEM_PROMPT,
# tools: List[BaseTool] = None, # tools: List[BaseTool] = None,
dynamic_temperature: bool = False, dynamic_temperature: bool = False,
saved_state: Optional[str] = None,
autosave: bool = False,
**kwargs: Any, **kwargs: Any,
): ):
self.llm = llm self.llm = llm
@ -124,6 +126,9 @@ class Flow:
# self.tools = tools # self.tools = tools
self.system_message = system_message self.system_message = system_message
self.name = name self.name = name
self.saved_state = saved_state
self.autosave = autosave
self.response_filters = []
def provide_feedback(self, feedback: str) -> None: def provide_feedback(self, feedback: str) -> None:
"""Allow users to provide feedback on the responses.""" """Allow users to provide feedback on the responses."""
@ -206,7 +211,7 @@ class Flow:
print(dashboard) print(dashboard)
def run(self, task: str, save: bool = True, **kwargs): def run(self, task: str, **kwargs):
""" """
Run the autonomous agent loop Run the autonomous agent loop
@ -220,15 +225,15 @@ class Flow:
4. If stopping condition is not met, generate a response 4. If stopping condition is not met, generate a response
5. Repeat until stopping condition is met or max_loops is reached 5. Repeat until stopping condition is met or max_loops is reached
Example:
>>> out = flow.run("Generate a 10,000 word blog on health and wellness.")
""" """
# Start with a new history or continue from the last saved state # Restore from saved state if provided, ortherwise start with a new history
if not self.memory or not self.memory[-1]: # if self.saved_state:
history = [f"Human: {task}"] # self.load_state(self.saved_state)
else: # history = self.memory[-1]
history = self.memory[-1] # print(f"Loaded state from {self.saved_state}")
# else:
# history = [f"Human: {task}"]
# self.memory.append(history)
response = task response = task
history = [f"Human: {task}"] history = [f"Human: {task}"]
@ -237,12 +242,9 @@ class Flow:
if self.dashboard: if self.dashboard:
self.print_dashboard(task) self.print_dashboard(task)
# Start or continue the loop process for i in range(self.max_loops):
for i in range(len(history), self.max_loops):
print(colored(f"\nLoop {i+1} of {self.max_loops}", "blue")) print(colored(f"\nLoop {i+1} of {self.max_loops}", "blue"))
print("\n") print("\n")
response = history[-1].split(": ", 1)[-1] # Get the last response
if self._check_stopping_condition(response) or parse_done_token(response): if self._check_stopping_condition(response) or parse_done_token(response):
break break
@ -254,8 +256,8 @@ class Flow:
while attempt < self.retry_attempts: while attempt < self.retry_attempts:
try: try:
response = self.llm( response = self.llm(
self.agent_history_prompt(FLOW_SYSTEM_PROMPT, response) self.agent_history_prompt(FLOW_SYSTEM_PROMPT, response),
** kwargs, **kwargs,
) )
# print(f"Next query: {response}") # print(f"Next query: {response}")
# break # break
@ -277,8 +279,8 @@ class Flow:
time.sleep(self.loop_interval) time.sleep(self.loop_interval)
self.memory.append(history) self.memory.append(history)
if save: # if self.autosave:
self.save("flow_history.json") # self.save_state("flow_state.json")
return response # , history return response # , history
@ -353,8 +355,8 @@ class Flow:
time.sleep(self.loop_interval) time.sleep(self.loop_interval)
self.memory.append(history) self.memory.append(history)
if save: # if save:
self.save_state("flow_history.json") # self.save_state("flow_history.json")
return response # , history return response # , history
@ -409,7 +411,13 @@ class Flow:
json.dump(self.memory, f) json.dump(self.memory, f)
print(f"Saved flow history to {file_path}") print(f"Saved flow history to {file_path}")
def load(self, file_path) -> None: def load(self, file_path: str):
"""
Load the flow history from a file.
Args:
file_path (str): The path to the file containing the saved flow history.
"""
with open(file_path, "r") as f: with open(file_path, "r") as f:
self.memory = json.load(f) self.memory = json.load(f)
print(f"Loaded flow history from {file_path}") print(f"Loaded flow history from {file_path}")
@ -660,10 +668,6 @@ class Flow:
with open(file_path, "r") as f: with open(file_path, "r") as f:
state = json.load(f) state = json.load(f)
# Assuming 'llm_class' is a class reference to the language
# llm_params = state.get("llm_params", {})
# self.llm = self.llm(**llm_params)
# Restore other saved attributes # Restore other saved attributes
self.memory = state.get("memory", []) self.memory = state.get("memory", [])
self.max_loops = state.get("max_loops", 5) self.max_loops = state.get("max_loops", 5)

@ -3,6 +3,7 @@ import pytest
from unittest.mock import Mock, patch from unittest.mock import Mock, patch
from swarms.models.anthropic import Anthropic from swarms.models.anthropic import Anthropic
@pytest.fixture @pytest.fixture
def mock_anthropic_env(): def mock_anthropic_env():
os.environ["ANTHROPIC_API_URL"] = "https://test.anthropic.com" os.environ["ANTHROPIC_API_URL"] = "https://test.anthropic.com"
@ -11,15 +12,18 @@ def mock_anthropic_env():
del os.environ["ANTHROPIC_API_URL"] del os.environ["ANTHROPIC_API_URL"]
del os.environ["ANTHROPIC_API_KEY"] del os.environ["ANTHROPIC_API_KEY"]
@pytest.fixture @pytest.fixture
def mock_requests_post(): def mock_requests_post():
with patch("requests.post") as mock_post: with patch("requests.post") as mock_post:
yield mock_post yield mock_post
@pytest.fixture @pytest.fixture
def anthropic_instance(): def anthropic_instance():
return Anthropic(model="test-model") return Anthropic(model="test-model")
def test_anthropic_init_default_values(anthropic_instance): def test_anthropic_init_default_values(anthropic_instance):
assert anthropic_instance.model == "test-model" assert anthropic_instance.model == "test-model"
assert anthropic_instance.max_tokens_to_sample == 256 assert anthropic_instance.max_tokens_to_sample == 256
@ -31,6 +35,7 @@ def test_anthropic_init_default_values(anthropic_instance):
assert anthropic_instance.anthropic_api_url == "https://test.anthropic.com" assert anthropic_instance.anthropic_api_url == "https://test.anthropic.com"
assert anthropic_instance.anthropic_api_key == "test_api_key" assert anthropic_instance.anthropic_api_key == "test_api_key"
def test_anthropic_init_custom_values(): def test_anthropic_init_custom_values():
anthropic_instance = Anthropic( anthropic_instance = Anthropic(
model="custom-model", model="custom-model",
@ -49,6 +54,7 @@ def test_anthropic_init_custom_values():
assert anthropic_instance.streaming is True assert anthropic_instance.streaming is True
assert anthropic_instance.default_request_timeout == 300 assert anthropic_instance.default_request_timeout == 300
def test_anthropic_default_params(anthropic_instance): def test_anthropic_default_params(anthropic_instance):
default_params = anthropic_instance._default_params() default_params = anthropic_instance._default_params()
assert default_params == { assert default_params == {
@ -56,6 +62,7 @@ def test_anthropic_default_params(anthropic_instance):
"model": "test-model", "model": "test-model",
} }
def test_anthropic_run(mock_anthropic_env, mock_requests_post, anthropic_instance): def test_anthropic_run(mock_anthropic_env, mock_requests_post, anthropic_instance):
mock_response = Mock() mock_response = Mock()
mock_response.json.return_value = {"completion": "Generated text"} mock_response.json.return_value = {"completion": "Generated text"}
@ -79,6 +86,7 @@ def test_anthropic_run(mock_anthropic_env, mock_requests_post, anthropic_instanc
timeout=600, timeout=600,
) )
def test_anthropic_call(mock_anthropic_env, mock_requests_post, anthropic_instance): def test_anthropic_call(mock_anthropic_env, mock_requests_post, anthropic_instance):
mock_response = Mock() mock_response = Mock()
mock_response.json.return_value = {"completion": "Generated text"} mock_response.json.return_value = {"completion": "Generated text"}
@ -102,7 +110,10 @@ def test_anthropic_call(mock_anthropic_env, mock_requests_post, anthropic_instan
timeout=600, timeout=600,
) )
def test_anthropic_exception_handling(mock_anthropic_env, mock_requests_post, anthropic_instance):
def test_anthropic_exception_handling(
mock_anthropic_env, mock_requests_post, anthropic_instance
):
mock_response = Mock() mock_response = Mock()
mock_response.json.return_value = {"error": "An error occurred"} mock_response.json.return_value = {"error": "An error occurred"}
mock_requests_post.return_value = mock_response mock_requests_post.return_value = mock_response

@ -117,4 +117,3 @@ async def test_async_transcribe_with_mocked_model(mocked_model, audio_file_path)
model_wrapper = DistilWhisperModel() model_wrapper = DistilWhisperModel()
transcription = await model_wrapper.async_transcribe(audio_file_path) transcription = await model_wrapper.async_transcribe(audio_file_path)
assert transcription == "mocked transcription" assert transcription == "mocked transcription"

Loading…
Cancel
Save