From ba28f40e579861e8e1bb524f15c2866599b66d7d Mon Sep 17 00:00:00 2001 From: Kye Date: Sun, 5 Nov 2023 10:37:10 -0500 Subject: [PATCH] auto saved + fixed run method of flow --- demos/positive_med.py | 4 +-- example.py | 8 ++--- flow_state.json | 14 -------- swarms/models/fuyu.py | 4 +-- swarms/structs/flow.py | 52 ++++++++++++++++-------------- tests/models/anthropic.py | 13 +++++++- tests/models/distilled_whisperx.py | 1 - 7 files changed, 47 insertions(+), 49 deletions(-) delete mode 100644 flow_state.json diff --git a/demos/positive_med.py b/demos/positive_med.py index e8f879c9..2d191c55 100644 --- a/demos/positive_med.py +++ b/demos/positive_med.py @@ -23,7 +23,7 @@ Distribution Agent: """ -from swarms import OpenAIChat +from swarms.models import OpenAIChat from termcolor import colored TOPIC_GENERATOR = f""" @@ -264,7 +264,7 @@ Denote the social media's by using the social media name in HTML like tags {{ARTICLE}} """ -llm = OpenAIChat(openai_api_key="") +llm = OpenAIChat(openai_api_key="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") def get_review_prompt(article): diff --git a/example.py b/example.py index aeae1b02..3af9fc57 100644 --- a/example.py +++ b/example.py @@ -1,7 +1,7 @@ from swarms.models import OpenAIChat from swarms.structs import Flow -api_key = "" +api_key = "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" # Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC llm = OpenAIChat( @@ -14,7 +14,7 @@ llm = OpenAIChat( ## Initialize the workflow flow = Flow( llm=llm, - max_loops=1, + max_loops=2, dashboard=True, # stopping_condition=None, # You can define a stopping condition as needed. # loop_interval=1, @@ -31,5 +31,5 @@ out = flow.run("Generate a 10,000 word blog on health and wellness.") # out = flow.validate_response(out) # out = flow.analyze_feedback(out) # out = flow.print_history_and_memory() -# out = flow.save_state("flow_state.json") -print(out) +# # out = flow.save_state("flow_state.json") +# print(out) diff --git a/flow_state.json b/flow_state.json deleted file mode 100644 index 8ed134a0..00000000 --- a/flow_state.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "memory": [ - [ - "Human: Generate a 10,000 word blog on health and wellness." - ] - ], - "llm_params": {}, - "loop_interval": 1, - "retry_attempts": 3, - "retry_interval": 1, - "interactive": false, - "dashboard": true, - "dynamic_temperature": false -} \ No newline at end of file diff --git a/swarms/models/fuyu.py b/swarms/models/fuyu.py index bdd3f904..0fd1fd85 100644 --- a/swarms/models/fuyu.py +++ b/swarms/models/fuyu.py @@ -60,7 +60,5 @@ class Fuyu: for k, v in model_inputs.items(): model_inputs[k] = v.to(self.device_map) - output = self.model.generate( - **model_inputs, max_new_tokens=self.max_new_tokens - ) + output = self.model.generate(**model_inputs, max_new_tokens=self.max_new_tokens) text = self.processor.batch_decode(output[:, -7:], skip_special_tokens=True) diff --git a/swarms/structs/flow.py b/swarms/structs/flow.py index afbcf536..d40e4fb4 100644 --- a/swarms/structs/flow.py +++ b/swarms/structs/flow.py @@ -105,6 +105,8 @@ class Flow: system_message: str = FLOW_SYSTEM_PROMPT, # tools: List[BaseTool] = None, dynamic_temperature: bool = False, + saved_state: Optional[str] = None, + autosave: bool = False, **kwargs: Any, ): self.llm = llm @@ -124,6 +126,9 @@ class Flow: # self.tools = tools self.system_message = system_message self.name = name + self.saved_state = saved_state + self.autosave = autosave + self.response_filters = [] def provide_feedback(self, feedback: str) -> None: """Allow users to provide feedback on the responses.""" @@ -206,7 +211,7 @@ class Flow: print(dashboard) - def run(self, task: str, save: bool = True, **kwargs): + def run(self, task: str, **kwargs): """ Run the autonomous agent loop @@ -220,15 +225,15 @@ class Flow: 4. If stopping condition is not met, generate a response 5. Repeat until stopping condition is met or max_loops is reached - Example: - >>> out = flow.run("Generate a 10,000 word blog on health and wellness.") - """ - # Start with a new history or continue from the last saved state - if not self.memory or not self.memory[-1]: - history = [f"Human: {task}"] - else: - history = self.memory[-1] + # Restore from saved state if provided, ortherwise start with a new history + # if self.saved_state: + # self.load_state(self.saved_state) + # history = self.memory[-1] + # print(f"Loaded state from {self.saved_state}") + # else: + # history = [f"Human: {task}"] + # self.memory.append(history) response = task history = [f"Human: {task}"] @@ -237,12 +242,9 @@ class Flow: if self.dashboard: self.print_dashboard(task) - # Start or continue the loop process - for i in range(len(history), self.max_loops): + for i in range(self.max_loops): print(colored(f"\nLoop {i+1} of {self.max_loops}", "blue")) print("\n") - response = history[-1].split(": ", 1)[-1] # Get the last response - if self._check_stopping_condition(response) or parse_done_token(response): break @@ -254,8 +256,8 @@ class Flow: while attempt < self.retry_attempts: try: response = self.llm( - self.agent_history_prompt(FLOW_SYSTEM_PROMPT, response) - ** kwargs, + self.agent_history_prompt(FLOW_SYSTEM_PROMPT, response), + **kwargs, ) # print(f"Next query: {response}") # break @@ -277,8 +279,8 @@ class Flow: time.sleep(self.loop_interval) self.memory.append(history) - if save: - self.save("flow_history.json") + # if self.autosave: + # self.save_state("flow_state.json") return response # , history @@ -353,8 +355,8 @@ class Flow: time.sleep(self.loop_interval) self.memory.append(history) - if save: - self.save_state("flow_history.json") + # if save: + # self.save_state("flow_history.json") return response # , history @@ -409,7 +411,13 @@ class Flow: json.dump(self.memory, f) print(f"Saved flow history to {file_path}") - def load(self, file_path) -> None: + def load(self, file_path: str): + """ + Load the flow history from a file. + + Args: + file_path (str): The path to the file containing the saved flow history. + """ with open(file_path, "r") as f: self.memory = json.load(f) print(f"Loaded flow history from {file_path}") @@ -660,10 +668,6 @@ class Flow: with open(file_path, "r") as f: state = json.load(f) - # Assuming 'llm_class' is a class reference to the language - # llm_params = state.get("llm_params", {}) - # self.llm = self.llm(**llm_params) - # Restore other saved attributes self.memory = state.get("memory", []) self.max_loops = state.get("max_loops", 5) diff --git a/tests/models/anthropic.py b/tests/models/anthropic.py index 844415aa..4dbd365d 100644 --- a/tests/models/anthropic.py +++ b/tests/models/anthropic.py @@ -3,6 +3,7 @@ import pytest from unittest.mock import Mock, patch from swarms.models.anthropic import Anthropic + @pytest.fixture def mock_anthropic_env(): os.environ["ANTHROPIC_API_URL"] = "https://test.anthropic.com" @@ -11,15 +12,18 @@ def mock_anthropic_env(): del os.environ["ANTHROPIC_API_URL"] del os.environ["ANTHROPIC_API_KEY"] + @pytest.fixture def mock_requests_post(): with patch("requests.post") as mock_post: yield mock_post + @pytest.fixture def anthropic_instance(): return Anthropic(model="test-model") + def test_anthropic_init_default_values(anthropic_instance): assert anthropic_instance.model == "test-model" assert anthropic_instance.max_tokens_to_sample == 256 @@ -31,6 +35,7 @@ def test_anthropic_init_default_values(anthropic_instance): assert anthropic_instance.anthropic_api_url == "https://test.anthropic.com" assert anthropic_instance.anthropic_api_key == "test_api_key" + def test_anthropic_init_custom_values(): anthropic_instance = Anthropic( model="custom-model", @@ -49,6 +54,7 @@ def test_anthropic_init_custom_values(): assert anthropic_instance.streaming is True assert anthropic_instance.default_request_timeout == 300 + def test_anthropic_default_params(anthropic_instance): default_params = anthropic_instance._default_params() assert default_params == { @@ -56,6 +62,7 @@ def test_anthropic_default_params(anthropic_instance): "model": "test-model", } + def test_anthropic_run(mock_anthropic_env, mock_requests_post, anthropic_instance): mock_response = Mock() mock_response.json.return_value = {"completion": "Generated text"} @@ -79,6 +86,7 @@ def test_anthropic_run(mock_anthropic_env, mock_requests_post, anthropic_instanc timeout=600, ) + def test_anthropic_call(mock_anthropic_env, mock_requests_post, anthropic_instance): mock_response = Mock() mock_response.json.return_value = {"completion": "Generated text"} @@ -102,7 +110,10 @@ def test_anthropic_call(mock_anthropic_env, mock_requests_post, anthropic_instan timeout=600, ) -def test_anthropic_exception_handling(mock_anthropic_env, mock_requests_post, anthropic_instance): + +def test_anthropic_exception_handling( + mock_anthropic_env, mock_requests_post, anthropic_instance +): mock_response = Mock() mock_response.json.return_value = {"error": "An error occurred"} mock_requests_post.return_value = mock_response diff --git a/tests/models/distilled_whisperx.py b/tests/models/distilled_whisperx.py index bab8cd0e..4bdd10f3 100644 --- a/tests/models/distilled_whisperx.py +++ b/tests/models/distilled_whisperx.py @@ -117,4 +117,3 @@ async def test_async_transcribe_with_mocked_model(mocked_model, audio_file_path) model_wrapper = DistilWhisperModel() transcription = await model_wrapper.async_transcribe(audio_file_path) assert transcription == "mocked transcription" -