From ac991872e4aab690f7ab714b05af838642d5b6e9 Mon Sep 17 00:00:00 2001 From: Kye Date: Tue, 31 Oct 2023 14:54:36 -0400 Subject: [PATCH] fasting clean up --- flow.py | 1 - godmode.py | 14 ++-- playground/apps/discord_example.py | 13 ---- playground/posmed/example_outputs.py | 101 --------------------------- playground/posmed/positive_med.py | 4 +- swarms/agents/simple_agent.py | 14 ++++ swarms/models/__init__.py | 1 - swarms/models/huggingface.py | 10 +-- swarms/models/mpt.py | 2 +- swarms/models/openai_models.py | 11 +-- swarms/structs/flow.py | 89 ++++++++++------------- swarms/structs/workflow.py | 2 +- swarms/swarms/dialogue_simulator.py | 2 +- swarms/swarms/multi_agent_collab.py | 1 + swarms/swarms/multi_agent_debate.py | 10 +-- swarms/swarms/simple_swarm.py | 4 +- tests/structs/flow.py | 61 ++++++++++++---- workflow.py | 6 +- 18 files changed, 127 insertions(+), 219 deletions(-) delete mode 100644 playground/apps/discord_example.py delete mode 100644 playground/posmed/example_outputs.py create mode 100644 swarms/agents/simple_agent.py diff --git a/flow.py b/flow.py index 77fdc8e7..65a5c20f 100644 --- a/flow.py +++ b/flow.py @@ -19,4 +19,3 @@ flow = Flow( out = flow.run("Generate a 10,000 word blog on health and wellness.") print(out) - diff --git a/godmode.py b/godmode.py index 785e8659..f1269d98 100644 --- a/godmode.py +++ b/godmode.py @@ -3,20 +3,14 @@ from swarms.models import OpenAIChat api_key = "" -llm = OpenAIChat( - openai_api_key=api_key -) +llm = OpenAIChat(openai_api_key=api_key) -llms = [ - llm, - llm, - llm -] +llms = [llm, llm, llm] god_mode = GodMode(llms) -task = 'Generate a 10,000 word blog on health and wellness.' +task = "Generate a 10,000 word blog on health and wellness." out = god_mode.run(task) -god_mode.print_responses(task) \ No newline at end of file +god_mode.print_responses(task) diff --git a/playground/apps/discord_example.py b/playground/apps/discord_example.py deleted file mode 100644 index a3a90cf6..00000000 --- a/playground/apps/discord_example.py +++ /dev/null @@ -1,13 +0,0 @@ -from swarms.models import OpenAIChat -from apps.discord import Bot - -llm = OpenAIChat( - openai_api_key="Enter in your key", - temperature=0.5, -) - -bot = Bot(llm=llm) -task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times." - -bot.send_text(task) -bot.run() diff --git a/playground/posmed/example_outputs.py b/playground/posmed/example_outputs.py deleted file mode 100644 index 3f0d9bea..00000000 --- a/playground/posmed/example_outputs.py +++ /dev/null @@ -1,101 +0,0 @@ -review = """# AI in Healthcare: Revolutionizing the Future of Medicine - -## Introduction -In recent years, artificial intelligence (AI) has become a transformative force in numerous industries, including healthcare. The potential of AI to revolutionize medicine by improving diagnostic accuracy, optimizing treatment plans, and streamlining healthcare operations is truly groundbreaking. This comprehensive article explores the applications of AI in healthcare, delving into the benefits, challenges, and future prospects of this cutting-edge technology. - -## AI in Healthcare: A Game-Changer -AI in healthcare involves the use of intelligent algorithms and machine learning techniques to analyze complex medical data and assist healthcare professionals in clinical decision-making. From diagnosing diseases to developing new drugs, AI has the capacity to enhance every aspect of patient care. - -### Diagnosing Diseases with Precision -One of the most significant applications of AI in healthcare is its ability to assist in disease diagnosis. AI algorithms can analyze vast amounts of medical data, including electronic health records, medical images, and lab results, to identify patterns and detect abnormalities with greater accuracy than human doctors. - -### Optimizing Treatment Plans -AI can also help healthcare professionals develop personalized treatment plans for patients. By analyzing large datasets and comparing outcomes across similar cases, AI algorithms can provide insights into the most effective interventions, dosage recommendations, and potential adverse reactions. - -### Enhancing Medical Imaging -Medical imaging plays a critical role in diagnosing and treating various conditions. AI algorithms can analyze medical images, such as X-rays, MRIs, and CT scans, to identify anomalies and assist radiologists in making accurate interpretations. This not only improves diagnostic accuracy but also reduces the time taken for diagnosis. - -### Streamlining Healthcare Operations -AI can optimize healthcare operations by automating administrative tasks and improving efficiency. Intelligent chatbots can handle patient queries, freeing up healthcare staff to focus on more critical tasks. Predictive analytics can also help hospitals and healthcare providers anticipate patient demand, allocate resources effectively, and minimize waiting times. - - -### Data Privacy and Security -The use of AI in healthcare relies heavily on patient data. Ensuring the privacy and security of this data is crucial, as it contains sensitive and personal information. Healthcare organizations must implement robust measures to protect patient confidentiality and prevent unauthorized access or data breaches. - -### Bias and Discrimination -AI algorithms are only as good as the data they are trained on. If the training data is biased or incomplete, the algorithms may perpetuate these biases and lead to discriminatory outcomes. Striving for diversity and inclusivity in the data used to train AI models should be a top priority to mitigate these risks. - -### Regulatory Frameworks and Liability -The integration of AI into healthcare raises questions about regulatory frameworks and liability. Who is responsible if an AI algorithm makes an incorrect diagnosis or recommends an inappropriate treatment? Establishing clear guidelines and ethical frameworks is crucial to ensure accountability and safeguard patient well-being. - -## The Future of AI in Healthcare -Despite the challenges, the future of AI in healthcare looks promising. As technology continues to advance, there are several areas where AI could make a significant impact. - -### Drug Discovery and Development -Developing new drugs is a complex and time-consuming process. AI has the potential to accelerate this process by analyzing vast amounts of genomic and molecular data, identifying potential drug targets, and predicting drug efficacy. This could lead to faster drug development and more targeted therapies. - -### Remote Patient Monitoring -AI-powered devices and wearables can enable remote patient monitoring, allowing healthcare providers to track vital signs, detect abnormalities, and intervene early. This can improve patient outcomes, reduce the burden on healthcare facilities, and enable better management of chronic conditions. - -### Precision Medicine -AI can facilitate the implementation of precision medicine, an approach that considers individual variability in genes, environment, and lifestyle for targeted treatments. By analyzing vast amounts of patient data, including genomic information, AI algorithms can identify genetic markers and predict patient responses to specific treatments. - -### AI in Surgery -Robotic-assisted surgeries have become increasingly prevalent, enabling surgeons to perform complex procedures with greater precision and minimal invasiveness. AI algorithms can enhance surgical planning, assist during surgeries, and improve post-operative outcomes. - -## Conclusion -The potential of AI to transform healthcare by improving diagnostic accuracy, optimizing treatment plans, and streamlining healthcare operations is monumental. However, it is crucial to address challenges related to data privacy, bias, and regulatory frameworks to ensure the ethical and responsible use of AI in healthcare. As technology continues to advance, the future of AI in healthcare looks promising, offering opportunities to revolutionize patient care and pave the way for a new era of medicine. Embracing AI in healthcare can lead to better patient outcomes, increased efficiency, and ultimately, a healthier population. -""" - - - - -draft = """# AI in Healthcare: Revolutionizing the Future of Medicine - -## Introduction -In recent years, the advent of artificial intelligence (AI) has transformed various industries, and healthcare is no exception. AI has the potential to revolutionize medicine by improving diagnostic accuracy, optimizing treatment plans, and streamlining healthcare operations. This article delves into the applications of AI in healthcare, exploring the benefits, challenges, and future prospects of this cutting-edge technology. - -## AI in Healthcare: A Game-Changer -AI in healthcare refers to the use of intelligent algorithms and machine learning techniques to analyze complex medical data and assist healthcare professionals in clinical decision-making. From the diagnosis of diseases to the development of new drugs, AI has the potential to enhance every aspect of patient care. - -### Diagnosing Diseases with Precision -One of the most significant applications of AI in healthcare is its ability to assist in the diagnosis of diseases. AI algorithms can analyze vast amounts of medical data, including electronic health records, medical images, and lab results, to identify patterns and detect abnormalities with greater accuracy than human doctors. - -### Optimizing Treatment Plans -AI can also help healthcare professionals in developing personalized treatment plans for patients. By analyzing large datasets and comparing outcomes across similar cases, AI algorithms can provide insights into the most effective interventions, dosage recommendations, and potential adverse reactions. - -### Enhancing Medical Imaging -Medical imaging plays a critical role in the diagnosis and treatment of various conditions. AI algorithms can analyze medical images, such as X-rays, MRIs, and CT scans, to identify anomalies and assist radiologists in making accurate interpretations. This not only improves diagnostic accuracy but also reduces the time taken for diagnosis. - -### Streamlining Healthcare Operations -AI can optimize healthcare operations by automating administrative tasks and improving efficiency. Intelligent chatbots can handle patient queries, freeing up healthcare staff to focus on more critical tasks. Predictive analytics can also help hospitals and healthcare providers anticipate patient demand, allocate resources effectively, and minimize waiting times. - -## Challenges and Ethical Considerations -While the potential of AI in healthcare is immense, it also poses various challenges and ethical considerations that need to be addressed. - -### Data Privacy and Security -The use of AI in healthcare relies heavily on patient data. Ensuring the privacy and security of this data is crucial, as it contains sensitive and personal information. Healthcare organizations must implement robust measures to protect patient confidentiality and prevent unauthorized access or data breaches. - -### Bias and Discrimination -AI algorithms are only as good as the data they are trained on. If the training data is biased or incomplete, the algorithms may perpetuate these biases and lead to discriminatory outcomes. Striving for diversity and inclusivity in the data used to train AI models should be a top priority to mitigate these risks. - -### Regulatory Frameworks and Liability -The integration of AI into healthcare raises questions about regulatory frameworks and liability. Who is responsible if an AI algorithm makes an incorrect diagnosis or recommends an inappropriate treatment? Establishing clear guidelines and ethical frameworks is crucial to ensure accountability and safeguard patient well-being. - -## The Future of AI in Healthcare -Despite the challenges, the future of AI in healthcare looks promising. As technology continues to advance, there are several areas where AI could make a significant impact. - -### Drug Discovery and Development -Developing new drugs is a complex and time-consuming process. AI has the potential to accelerate this process by analyzing vast amounts of genomic and molecular data, identifying potential drug targets, and predicting drug efficacy. This could lead to faster drug development and more targeted therapies. - -### Remote Patient Monitoring -AI-powered devices and wearables can enable remote patient monitoring, allowing healthcare providers to track vital signs, detect abnormalities, and intervene early. This can improve patient outcomes, reduce the burden on healthcare facilities, and enable better management of chronic conditions. - -### Precision Medicine -AI can facilitate the implementation of precision medicine, an approach that considers individual variability in genes, environment, and lifestyle for targeted treatments. By analyzing vast amounts of patient data, including genomic information, AI algorithms can identify genetic markers and predict patient responses to specific treatments. - -### AI in Surgery -Robotic-assisted surgeries have become increasingly prevalent, enabling surgeons to perform complex procedures with greater precision and minimal invasiveness. AI algorithms can enhance surgical planning, assist during surgeries, and improve post-operative outcomes. - -## Conclusion -AI holds immense potential to transform healthcare by improving diagnostic accuracy, optimizing treatment plans, and streamlining healthcare operations. However, it is crucial to address challenges related to data privacy, bias, and regulatory frameworks to ensure the ethical and responsible use of AI in healthcare. As technology continues to advance, the future of AI in healthcare looks promising, offering opportunities to revolutionize patient care and pave the way for a new era of medicine. Embracing AI in healthcare can lead to better patient outcomes, increased efficiency, and ultimately, a healthier population.""" diff --git a/playground/posmed/positive_med.py b/playground/posmed/positive_med.py index 9508fdd8..97c2ec60 100644 --- a/playground/posmed/positive_med.py +++ b/playground/posmed/positive_med.py @@ -58,16 +58,18 @@ Your role involves content analysis, editorial precision, expert validation, leg Re-Write the article, taking into account all review instructions and standards """ + def get_draft_prompt(topic, theme): prompt = DRAFT_PROMPT.replace("{{TOPIC}}", topic).replace("{{THEME}}", theme) return prompt + def get_review_prompt(article): prompt = REVIEW_PROMPT.replace("{{ARTICLE}}", article) return prompt -openai = OpenAIChat(openai_api_key="sk-S4xHnFJu7juD33jxjJZfZU1cZYi") +openai = OpenAIChat(openai_api_key="S4xHnFJu7juD33jxjJZfZU1cZYi") draft = openai(get_draft_prompt("AI in healthcare", "Pyschology")) review = openai(get_review_prompt(draft)) diff --git a/swarms/agents/simple_agent.py b/swarms/agents/simple_agent.py new file mode 100644 index 00000000..78e09954 --- /dev/null +++ b/swarms/agents/simple_agent.py @@ -0,0 +1,14 @@ +class SimpleAgent: + def __init__( + self, + name: str, + llm, + ): + self.name = name + self.llm = llm + self.message_history = [] + + def run(self, task: str) -> str: + response = self.model(task) + self.message_history.append((self.name, response)) + return response diff --git a/swarms/models/__init__.py b/swarms/models/__init__.py index 49a673af..328dd013 100644 --- a/swarms/models/__init__.py +++ b/swarms/models/__init__.py @@ -10,7 +10,6 @@ from swarms.models.wizard_storytelling import WizardLLMStoryTeller from swarms.models.mpt import MPT7B - # MultiModal Models from swarms.models.idefics import Idefics from swarms.models.kosmos_two import Kosmos diff --git a/swarms/models/huggingface.py b/swarms/models/huggingface.py index d3a921f1..895b316e 100644 --- a/swarms/models/huggingface.py +++ b/swarms/models/huggingface.py @@ -157,7 +157,7 @@ class HuggingfaceLLM: except Exception as e: self.logger.error(f"Failed to generate the text: {e}") raise - + async def run_async(self, task: str, *args, **kwargs) -> str: """ Run the model asynchronously @@ -235,7 +235,7 @@ class HuggingfaceLLM: except Exception as e: self.logger.error(f"Failed to generate the text: {e}") raise - + async def __call_async__(self, task: str, *args, **kwargs) -> str: """Call the model asynchronously""" "" return await self.run_async(task, *args, **kwargs) @@ -243,7 +243,7 @@ class HuggingfaceLLM: def save_model(self, path: str): self.model.save_pretrained(path) self.tokenizer.save_pretrained(path) - + def gpu_available(self) -> bool: return torch.cuda.is_available() @@ -252,6 +252,6 @@ class HuggingfaceLLM: torch.cuda.synchronize() allocated = torch.cuda.memory_allocated() reserved = torch.cuda.memory_reserved() - return {'allocated': allocated, 'reserved': reserved} + return {"allocated": allocated, "reserved": reserved} else: - return {'error': 'GPU not available'} + return {"error": "GPU not available"} diff --git a/swarms/models/mpt.py b/swarms/models/mpt.py index 6df8e0b1..035e2b54 100644 --- a/swarms/models/mpt.py +++ b/swarms/models/mpt.py @@ -22,7 +22,7 @@ class MPT7B: Examples: - >>> + >>> """ diff --git a/swarms/models/openai_models.py b/swarms/models/openai_models.py index f4eb393c..a928cf52 100644 --- a/swarms/models/openai_models.py +++ b/swarms/models/openai_models.py @@ -23,7 +23,7 @@ from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) -from langchain.llms.base import BaseLLM, create_base_retry_decorator +from swarms.models.base import BaseLLM, create_base_retry_decorator from langchain.pydantic_v1 import Field, root_validator from langchain.schema import Generation, LLMResult from langchain.schema.output import GenerationChunk @@ -628,13 +628,14 @@ class OpenAI(BaseOpenAI): environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed - in, even if not explicitly saved on this class. + in, even if not explicitly saved on this class.., Example: .. code-block:: python - from langchain.llms import OpenAI + from swarms.models import OpenAI openai = OpenAI(model_name="text-davinci-003") + openai("What is the report on the 2022 oympian games?") """ @property @@ -654,7 +655,7 @@ class AzureOpenAI(BaseOpenAI): Example: .. code-block:: python - from langchain.llms import AzureOpenAI + from swarms.models import AzureOpenAI openai = AzureOpenAI(model_name="text-davinci-003") """ @@ -716,7 +717,7 @@ class OpenAIChat(BaseLLM): Example: .. code-block:: python - from langchain.llms import OpenAIChat + from swarms.models import OpenAIChat openaichat = OpenAIChat(model_name="gpt-3.5-turbo") """ diff --git a/swarms/structs/flow.py b/swarms/structs/flow.py index 4f68c549..08c384ff 100644 --- a/swarms/structs/flow.py +++ b/swarms/structs/flow.py @@ -4,11 +4,13 @@ import time from typing import Any, Callable, Dict, List, Optional, Tuple, Generator from termcolor import colored + # Custome stopping condition def stop_when_repeats(response: str) -> bool: # Stop if the word stop appears in the response return "Stop" in response.lower() + def parse_done_token(response: str) -> bool: """Parse the response to see if the done token is present""" return "" in response @@ -29,7 +31,7 @@ class Flow: * Ability to provide a stopping condition * Ability to provide a retry mechanism * Ability to provide a loop interval - + Args: llm (Any): The language model to use max_loops (int): The maximum number of loops to run @@ -55,6 +57,7 @@ class Flow: >>> flow.run("Generate a 10,000 word blog") >>> flow.save("path/flow.yaml") """ + def __init__( self, llm: Any, @@ -118,7 +121,7 @@ class Flow: response = task history = [task] for i in range(self.max_loops): - print(colored(f"\nLoop {i+1} of {self.max_loops}", 'blue')) + print(colored(f"\nLoop {i+1} of {self.max_loops}", "blue")) print("\n") if self._check_stopping_condition(response): break @@ -135,7 +138,7 @@ class Flow: history.append(response) time.sleep(self.loop_interval) self.memory.append(history) - return response #, history + return response # , history def _run(self, **kwargs: Any) -> str: """Generate a result using the provided keyword args.""" @@ -147,12 +150,8 @@ class Flow: def bulk_run(self, inputs: List[Dict[str, Any]]) -> List[str]: """Generate responses for multiple input sets.""" return [self.run(**input_data) for input_data in inputs] - - def run_dynamically( - self, - task: str, - max_loops: Optional[int] = None - ): + + def run_dynamically(self, task: str, max_loops: Optional[int] = None): """ Run the autonomous agent loop dynamically based on the @@ -171,7 +170,7 @@ class Flow: """ if "" in task: self.stopping_condition = parse_done_token - self.max_loops = max_loops or float('inf') + self.max_loops = max_loops or float("inf") response = self.run(task) return response @@ -188,15 +187,15 @@ class Flow: return Flow(llm=llm, template=template) def save(self, file_path) -> None: - with open(file_path, 'w') as f: + with open(file_path, "w") as f: json.dump(self.memory, f) print(f"Saved flow history to {file_path}") def load(self, file_path) -> None: - with open(file_path, 'r') as f: + with open(file_path, "r") as f: self.memory = json.load(f) print(f"Loaded flow history from {file_path}") - + def validate_response(self, response: str) -> bool: """Validate the response based on certain criteria""" if len(response) < 5: @@ -204,28 +203,21 @@ class Flow: return False return True - def run_with_timeout( - self, - task: str, - timeout: int = 60 - ) -> str: + def run_with_timeout(self, task: str, timeout: int = 60) -> str: """Run the loop but stop if it takes longer than the timeout""" start_time = time.time() response = self.run(task) end_time = time.time() if end_time - start_time > timeout: print("Operaiton timed out") - return 'Timeout' + return "Timeout" return response - - def backup_memory_to_s3( - self, - bucket_name: str, - object_name: str - ): + + def backup_memory_to_s3(self, bucket_name: str, object_name: str): """Backup the memory to S3""" import boto3 - s3 = boto3.client('s3') + + s3 = boto3.client("s3") s3.put_object(Bucket=bucket_name, Key=object_name, Body=json.dumps(self.memory)) print(f"Backed up memory to S3: {bucket_name}/{object_name}") @@ -249,7 +241,7 @@ class Flow: print(f"Response: {response}") previous_state, message = flow.undo_last() print(message) - + """ if len(self.memory) < 2: return None, None @@ -260,7 +252,7 @@ class Flow: # Get the previous state previous_state = self.memory[-1][-1] return previous_state, f"Restored to {previous_state}" - + # Response Filtering def add_response_filter(self, filter_word: str) -> None: """ @@ -270,15 +262,15 @@ class Flow: flow.add_response_filter("Trump") flow.run("Generate a report on Trump") - + """ self.reponse_filters.append(filter_word) - + def apply_reponse_filters(self, response: str) -> str: """ Apply the response filters to the response - + """ for word in self.response_filters: response = response.replace(word, "[FILTERED]") @@ -293,11 +285,8 @@ class Flow: """ raw_response = self.run(task) return self.apply_response_filters(raw_response) - - def interactive_run( - self, - max_loops: int = 5 - ) -> None: + + def interactive_run(self, max_loops: int = 5) -> None: """Interactive run mode""" response = input("Start the cnversation") @@ -307,22 +296,19 @@ class Flow: # Get user input response = input("You: ") - - def streamed_generation( - self, - prompt: str - ) -> str: + + def streamed_generation(self, prompt: str) -> str: """ Stream the generation of the response - + Args: prompt (str): The prompt to use - + Example: # Feature 4: Streamed generation response = flow.streamed_generation("Generate a report on finance") print(response) - + """ tokens = list(prompt) response = "" @@ -333,17 +319,14 @@ class Flow: print() return response - def streamed_token_generation( - self, - prompt: str - ) -> Generator[str, None, None]: + def streamed_token_generation(self, prompt: str) -> Generator[str, None, None]: """ Generate tokens in real-time for a given prompt. - This method simulates the real-time generation of each token. - For simplicity, we treat each character of the input as a token - and yield them with a slight delay. In a real-world scenario, - this would involve using the LLM's internal methods to generate + This method simulates the real-time generation of each token. + For simplicity, we treat each character of the input as a token + and yield them with a slight delay. In a real-world scenario, + this would involve using the LLM's internal methods to generate the response token by token. Args: @@ -355,4 +338,4 @@ class Flow: tokens = list(prompt) for token in tokens: time.sleep(0.1) - yield token \ No newline at end of file + yield token diff --git a/swarms/structs/workflow.py b/swarms/structs/workflow.py index 9df49404..762ee6cc 100644 --- a/swarms/structs/workflow.py +++ b/swarms/structs/workflow.py @@ -80,4 +80,4 @@ class Workflow: if isinstance(task.execute(), Exception): return else: - self.__run_from_task(next(iter(task.children), None)) \ No newline at end of file + self.__run_from_task(next(iter(task.children), None)) diff --git a/swarms/swarms/dialogue_simulator.py b/swarms/swarms/dialogue_simulator.py index b4f6e33a..6eec2aa9 100644 --- a/swarms/swarms/dialogue_simulator.py +++ b/swarms/swarms/dialogue_simulator.py @@ -8,7 +8,7 @@ class DialogueSimulator: Args: ------ - + diff --git a/swarms/swarms/multi_agent_collab.py b/swarms/swarms/multi_agent_collab.py index 620ba655..6413b662 100644 --- a/swarms/swarms/multi_agent_collab.py +++ b/swarms/swarms/multi_agent_collab.py @@ -2,6 +2,7 @@ import random import tenacity from langchain.output_parsers import RegexParser + # utils class BidOutputParser(RegexParser): def get_format_instructions(self) -> str: diff --git a/swarms/swarms/multi_agent_debate.py b/swarms/swarms/multi_agent_debate.py index 63669297..4bba3619 100644 --- a/swarms/swarms/multi_agent_debate.py +++ b/swarms/swarms/multi_agent_debate.py @@ -1,5 +1,6 @@ from typing import List, Callable + # Define a selection function def select_speaker(step: int, agents) -> int: # This function selects the speaker in a round-robin fashion @@ -16,7 +17,9 @@ class MultiAgentDebate: """ def __init__( - self, agents, selection_func, + self, + agents, + selection_func, ): self.agents = agents self.selection_func = selection_func @@ -43,10 +46,7 @@ class MultiAgentDebate: def format_results(self, results): formatted_results = "\n".join( - [ - f"Agent responded: {result['response']}" - for result in results - ] + [f"Agent responded: {result['response']}" for result in results] ) return formatted_results diff --git a/swarms/swarms/simple_swarm.py b/swarms/swarms/simple_swarm.py index 81fe1c6d..7e806215 100644 --- a/swarms/swarms/simple_swarm.py +++ b/swarms/swarms/simple_swarm.py @@ -41,9 +41,7 @@ class SimpleSwarm: """ self.llm = llm - self.agents = [ - self.llm for _ in range(num_agents) - ] + self.agents = [self.llm for _ in range(num_agents)] self.task_queue = Queue() self.priority_queue = PriorityQueue() diff --git a/tests/structs/flow.py b/tests/structs/flow.py index 74720b55..3cfeca8d 100644 --- a/tests/structs/flow.py +++ b/tests/structs/flow.py @@ -12,6 +12,7 @@ load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") + # Mocks and Fixtures @pytest.fixture def mocked_llm(): @@ -19,19 +20,23 @@ def mocked_llm(): openai_api_key=openai_api_key, ) + @pytest.fixture def basic_flow(mocked_llm): return Flow(llm=mocked_llm, max_loops=5) + @pytest.fixture def flow_with_condition(mocked_llm): return Flow(llm=mocked_llm, max_loops=5, stopping_condition=stop_when_repeats) + # Basic Tests def test_stop_when_repeats(): assert stop_when_repeats("Please Stop now") assert not stop_when_repeats("Continue the process") + def test_flow_initialization(basic_flow): assert basic_flow.max_loops == 5 assert basic_flow.stopping_condition is None @@ -44,124 +49,146 @@ def test_flow_initialization(basic_flow): assert basic_flow.stopping_token == "" assert not basic_flow.interactive + def test_provide_feedback(basic_flow): feedback = "Test feedback" basic_flow.provide_feedback(feedback) assert feedback in basic_flow.feedback -@patch('time.sleep', return_value=None) # to speed up tests + +@patch("time.sleep", return_value=None) # to speed up tests def test_run_without_stopping_condition(mocked_sleep, basic_flow): response = basic_flow.run("Test task") assert response == "Test task" # since our mocked llm doesn't modify the response -@patch('time.sleep', return_value=None) # to speed up tests + +@patch("time.sleep", return_value=None) # to speed up tests def test_run_with_stopping_condition(mocked_sleep, flow_with_condition): response = flow_with_condition.run("Stop") assert response == "Stop" -@patch('time.sleep', return_value=None) # to speed up tests + +@patch("time.sleep", return_value=None) # to speed up tests def test_run_with_exception(mocked_sleep, basic_flow): basic_flow.llm.side_effect = Exception("Test Exception") with pytest.raises(Exception, match="Test Exception"): basic_flow.run("Test task") + def test_bulk_run(basic_flow): inputs = [{"task": "Test1"}, {"task": "Test2"}] responses = basic_flow.bulk_run(inputs) assert responses == ["Test1", "Test2"] + # Tests involving file IO def test_save_and_load(basic_flow, tmp_path): file_path = tmp_path / "memory.json" basic_flow.memory.append(["Test1", "Test2"]) basic_flow.save(file_path) - + new_flow = Flow(llm=mocked_llm, max_loops=5) new_flow.load(file_path) assert new_flow.memory == [["Test1", "Test2"]] + # Environment variable mock test def test_env_variable_handling(monkeypatch): monkeypatch.setenv("API_KEY", "test_key") assert os.getenv("API_KEY") == "test_key" + # TODO: Add more tests, especially edge cases and exception cases. Implement parametrized tests for varied inputs. + # Test initializing the flow with different stopping conditions def test_flow_with_custom_stopping_condition(mocked_llm): def stopping_condition(x): return "terminate" in x.lower() + flow = Flow(llm=mocked_llm, max_loops=5, stopping_condition=stopping_condition) assert flow.stopping_condition("Please terminate now") assert not flow.stopping_condition("Continue the process") + # Test calling the flow directly def test_flow_call(basic_flow): response = basic_flow("Test call") assert response == "Test call" + # Test formatting the prompt def test_format_prompt(basic_flow): formatted_prompt = basic_flow.format_prompt("Hello {name}", name="John") assert formatted_prompt == "Hello John" + # Test with max loops -@patch('time.sleep', return_value=None) +@patch("time.sleep", return_value=None) def test_max_loops(mocked_sleep, basic_flow): basic_flow.max_loops = 3 response = basic_flow.run("Looping") assert response == "Looping" + # Test stopping token -@patch('time.sleep', return_value=None) +@patch("time.sleep", return_value=None) def test_stopping_token(mocked_sleep, basic_flow): basic_flow.stopping_token = "Terminate" response = basic_flow.run("Loop until Terminate") assert response == "Loop until Terminate" + # Test interactive mode def test_interactive_mode(basic_flow): basic_flow.interactive = True assert basic_flow.interactive + # Test bulk run with varied inputs def test_bulk_run_varied_inputs(basic_flow): inputs = [{"task": "Test1"}, {"task": "Test2"}, {"task": "Stop now"}] responses = basic_flow.bulk_run(inputs) assert responses == ["Test1", "Test2", "Stop now"] + # Test loading non-existent file def test_load_non_existent_file(basic_flow, tmp_path): file_path = tmp_path / "non_existent.json" with pytest.raises(FileNotFoundError): basic_flow.load(file_path) + # Test saving with different memory data def test_save_different_memory(basic_flow, tmp_path): file_path = tmp_path / "memory.json" basic_flow.memory.append(["Task1", "Task2", "Task3"]) basic_flow.save(file_path) - with open(file_path, 'r') as f: + with open(file_path, "r") as f: data = json.load(f) assert data == [["Task1", "Task2", "Task3"]] + # Test the stopping condition check def test_check_stopping_condition(flow_with_condition): assert flow_with_condition._check_stopping_condition("Stop this process") assert not flow_with_condition._check_stopping_condition("Continue the task") + # Test without providing max loops (default value should be 5) def test_default_max_loops(mocked_llm): flow = Flow(llm=mocked_llm) assert flow.max_loops == 5 + # Test creating flow from llm and template def test_from_llm_and_template(mocked_llm): flow = Flow.from_llm_and_template(mocked_llm, "Test template") assert isinstance(flow, Flow) + # Mocking the OpenAIChat for testing -@patch('swarms.models.OpenAIChat', autospec=True) +@patch("swarms.models.OpenAIChat", autospec=True) def test_mocked_openai_chat(MockedOpenAIChat): llm = MockedOpenAIChat(openai_api_key=openai_api_key) llm.return_value = MagicMock() @@ -169,34 +196,39 @@ def test_mocked_openai_chat(MockedOpenAIChat): flow.run("Mocked run") assert MockedOpenAIChat.called + # Test retry attempts -@patch('time.sleep', return_value=None) +@patch("time.sleep", return_value=None) def test_retry_attempts(mocked_sleep, basic_flow): basic_flow.retry_attempts = 2 basic_flow.llm.side_effect = [Exception("Test Exception"), "Valid response"] response = basic_flow.run("Test retry") assert response == "Valid response" + # Test different loop intervals -@patch('time.sleep', return_value=None) +@patch("time.sleep", return_value=None) def test_different_loop_intervals(mocked_sleep, basic_flow): basic_flow.loop_interval = 2 response = basic_flow.run("Test loop interval") assert response == "Test loop interval" + # Test different retry intervals -@patch('time.sleep', return_value=None) +@patch("time.sleep", return_value=None) def test_different_retry_intervals(mocked_sleep, basic_flow): basic_flow.retry_interval = 2 response = basic_flow.run("Test retry interval") assert response == "Test retry interval" + # Test invoking the flow with additional kwargs -@patch('time.sleep', return_value=None) +@patch("time.sleep", return_value=None) def test_flow_call_with_kwargs(mocked_sleep, basic_flow): response = basic_flow("Test call", param1="value1", param2="value2") assert response == "Test call" + # Test initializing the flow with all parameters def test_flow_initialization_all_params(mocked_llm): flow = Flow( @@ -208,7 +240,7 @@ def test_flow_initialization_all_params(mocked_llm): retry_interval=2, interactive=True, param1="value1", - param2="value2" + param2="value2", ) assert flow.max_loops == 10 assert flow.loop_interval == 2 @@ -216,8 +248,9 @@ def test_flow_initialization_all_params(mocked_llm): assert flow.retry_interval == 2 assert flow.interactive + # Test the stopping token is in the response -@patch('time.sleep', return_value=None) +@patch("time.sleep", return_value=None) def test_stopping_token_in_response(mocked_sleep, basic_flow): response = basic_flow.run("Test stopping token") assert basic_flow.stopping_token in response diff --git a/workflow.py b/workflow.py index 601de86a..bc757108 100644 --- a/workflow.py +++ b/workflow.py @@ -2,12 +2,10 @@ from swarms.models import OpenAIChat from swarms.structs import Workflow -llm = OpenAIChat( - openai_api_key="" -) +llm = OpenAIChat(openai_api_key="") workflow = Workflow(llm) workflow.add("What's the weather in miami") -workflow.run() \ No newline at end of file +workflow.run()