diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 95c03a5d..e9191e96 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -131,6 +131,8 @@ nav: - Swarm Ecosystem: "swarms/ecosystem.md" - Framework: - Overview: "swarms/framework/index.md" + - Contributing: + - Tests: "swarms/framework/test.md" - Models: - Overview: "swarms/models/index.md" - How to Create A Custom Language Model: "swarms/models/custom_model.md" @@ -146,7 +148,6 @@ nav: - Overview: "swarms/structs/multi_agent_orchestration.md" - Swarm Architectures: "swarms/concept/swarm_architectures.md" - Workflows: "swarms/structs/workflows.md" - # - Multi-Agent Architectures: "swarms/structs/multi_agent_architectures.md" - Reference: - Overview: "swarms/structs/index.md" - Models: diff --git a/docs/swarms/framework/test.md b/docs/swarms/framework/test.md new file mode 100644 index 00000000..9316d4b2 --- /dev/null +++ b/docs/swarms/framework/test.md @@ -0,0 +1,244 @@ +# How to Run Tests Using Pytest: A Comprehensive Guide + +In modern software development, automated testing is crucial for ensuring the reliability and functionality of your code. One of the most popular testing frameworks for Python is `pytest`. + +This blog will provide an in-depth look at how to run tests using `pytest`, including testing a single file, multiple files, every file in the test repository, and providing guidelines for contributors to run tests reliably. + +## What is Pytest? + +`pytest` is a testing framework for Python that makes it easy to write simple and scalable test cases. It supports fixtures, parameterized testing, and has a rich plugin architecture. `pytest` is widely used because of its ease of use and powerful features that help streamline the testing process. + +## Installation + +To get started with `pytest`, you need to install it. You can install `pytest` using `pip`: + +```bash +pip install pytest +``` + +## Writing Your First Test + +Before diving into running tests, let’s write a simple test. Create a file named `test_sample.py` with the following content: + +```python +def test_addition(): + assert 1 + 1 == 2 + +def test_subtraction(): + assert 2 - 1 == 1 +``` + +In this example, we have defined two basic tests: `test_addition` and `test_subtraction`. + +## Running Tests + +### Running a Single Test File + +To run a single test file, you can use the `pytest` command followed by the filename. For example, to run the tests in `test_sample.py`, use the following command: + +```bash +pytest test_sample.py +``` + +The output will show the test results, including the number of tests passed, failed, or skipped. + +### Running Multiple Test Files + +You can also run multiple test files by specifying their filenames separated by a space. For example: + +```bash +pytest test_sample.py test_another_sample.py +``` + +If you have multiple test files in a directory, you can run all of them by specifying the directory name: + +```bash +pytest tests/ +``` + +### Running All Tests in the Repository + +To run all tests in the repository, navigate to the root directory of your project and simply run: + +```bash +pytest +``` + +`pytest` will automatically discover and run all the test files that match the pattern `test_*.py` or `*_test.py`. + +### Test Discovery + +`pytest` automatically discovers test files and test functions based on their naming conventions. By default, it looks for files that match the pattern `test_*.py` or `*_test.py` and functions or methods that start with `test_`. + +### Using Markers + +`pytest` allows you to use markers to group tests or add metadata to them. Markers can be used to run specific subsets of tests. For example, you can mark a test as `slow` and then run only the slow tests or skip them. + +```python +import pytest + +@pytest.mark.slow +def test_long_running(): + import time + time.sleep(5) + assert True + +def test_fast(): + assert True +``` + +To run only the tests marked as `slow`, use the `-m` option: + +```bash +pytest -m slow +``` + +### Parameterized Tests + +`pytest` supports parameterized testing, which allows you to run a test with different sets of input data. This can be done using the `@pytest.mark.parametrize` decorator. + +```python +import pytest + +@pytest.mark.parametrize("a,b,expected", [ + (1, 2, 3), + (2, 3, 5), + (3, 5, 8), +]) +def test_add(a, b, expected): + assert a + b == expected +``` + +In this example, `test_add` will run three times with different sets of input data. + +### Fixtures + +Fixtures are a powerful feature of `pytest` that allow you to set up some context for your tests. They can be used to provide a fixed baseline upon which tests can reliably and repeatedly execute. + +```python +import pytest + +@pytest.fixture +def sample_data(): + return {"name": "John", "age": 30} + +def test_sample_data(sample_data): + assert sample_data["name"] == "John" + assert sample_data["age"] == 30 +``` + +Fixtures can be used to share setup and teardown code between tests. + +## Advanced Usage + +### Running Tests in Parallel + +`pytest` can run tests in parallel using the `pytest-xdist` plugin. To install `pytest-xdist`, run: + +```bash +pip install pytest-xdist +``` + +To run tests in parallel, use the `-n` option followed by the number of CPU cores you want to use: + +```bash +pytest -n 4 +``` + +### Generating Test Reports + +`pytest` can generate detailed test reports. You can use the `--html` option to generate an HTML report: + +```bash +pip install pytest-html +pytest --html=report.html +``` + +This command will generate a file named `report.html` with a detailed report of the test results. + +### Code Coverage + +You can use the `pytest-cov` plugin to measure code coverage. To install `pytest-cov`, run: + +```bash +pip install pytest-cov +``` + +To generate a coverage report, use the `--cov` option followed by the module name: + +```bash +pytest --cov=my_module +``` + +This command will show the coverage summary in the terminal. You can also generate an HTML report: + +```bash +pytest --cov=my_module --cov-report=html +``` + +The coverage report will be generated in the `htmlcov` directory. + +## Best Practices for Writing Tests + +1. **Write Clear and Concise Tests**: Each test should focus on a single piece of functionality. +2. **Use Descriptive Names**: Test function names should clearly describe what they are testing. +3. **Keep Tests Independent**: Tests should not depend on each other and should run in isolation. +4. **Use Fixtures**: Use fixtures to set up the context for your tests. +5. **Mock External Dependencies**: Use mocking to isolate the code under test from external dependencies. + +## Running Tests Reliably + +For contributors and team members, it’s important to run tests reliably to ensure consistent results. Here are some guidelines: + +1. **Set Up a Virtual Environment**: Use a virtual environment to manage dependencies and ensure a consistent testing environment. + + ```bash + python -m venv venv + source venv/bin/activate # On Windows use `venv\Scripts\activate` + ``` + +2. **Install Dependencies**: Install all required dependencies from the `requirements.txt` file. + + ```bash + pip install -r requirements.txt + ``` + +3. **Run Tests Before Pushing**: Ensure all tests pass before pushing code to the repository. + +4. **Use Continuous Integration (CI)**: Set up CI pipelines to automatically run tests on each commit or pull request. + +### Example CI Configuration (GitHub Actions) + +Here is an example of a GitHub Actions workflow to run tests using `pytest`: + +```yaml +name: Python package + +on: [push, pull_request] + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.8' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + - name: Run tests + run: | + pytest +``` + +This configuration will run the tests on every push and pull request, ensuring that your codebase remains stable. + +## Conclusion + +`pytest` is a powerful and flexible testing framework that makes it easy to write and run tests for your Python code. By following the guidelines and best practices outlined in this blog, you can ensure that your tests are reliable and your codebase is robust. Whether you are testing a single file, multiple files, or the entire repository, `pytest` provides the tools you need to automate and streamline your testing process. + +Happy testing! \ No newline at end of file diff --git a/playground/demos/society_of_agents/accountant_team.py b/playground/demos/society_of_agents/accountant_team.py new file mode 100644 index 00000000..882890b1 --- /dev/null +++ b/playground/demos/society_of_agents/accountant_team.py @@ -0,0 +1,89 @@ +from swarms import OpenAIChat, AgentRearrange, Agent +from swarms.prompts.finance_agent_sys_prompt import ( + FINANCIAL_AGENT_SYS_PROMPT, +) +from swarms.utils.data_to_text import data_to_text + +model = OpenAIChat(max_tokens=3000) + +# Initialize the agent +receipt_analyzer_agent = Agent( + agent_name="Receipt Analyzer", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + llm=model, + max_loops=1, + autosave=True, + # dynamic_temperature_enabled=True, + dashboard=False, + verbose=True, + streaming_on=True, + # interactive=True, # Set to False to disable interactive mode + dynamic_temperature_enabled=True, + saved_state_path="finance_agent.json", + # tools=[Add your functions here# ], + # stopping_token="Stop!", + # interactive=True, + # docs_folder="docs", # Enter your folder name + # pdf_path="docs/finance_agent.pdf", + # sop="Calculate the profit for a company.", + # sop_list=["Calculate the profit for a company."], + user_name="swarms_corp", + # # docs= + # # docs_folder="docs", + retry_attempts=3, + # tool_schema = dict + # agent_ops_on=True, + # long_term_memory=ChromaDB(docs_folder="artifacts"), + # multi_modal=True +) + + +# 2nd agent +analyst_agent = Agent( + agent_name="Analyst_Agent", + system_prompt=FINANCIAL_AGENT_SYS_PROMPT, + llm=model, + max_loops=1, + autosave=True, + # dynamic_temperature_enabled=True, + dashboard=False, + verbose=True, + streaming_on=True, + # interactive=True, # Set to False to disable interactive mode + dynamic_temperature_enabled=True, + saved_state_path="finance_agent.json", + # tools=[Add your functions here# ], + # stopping_token="Stop!", + # interactive=True, + # docs_folder="docs", # Enter your folder name + # pdf_path="docs/finance_agent.pdf", + # sop="Calculate the profit for a company.", + # sop_list=["Calculate the profit for a company."], + user_name="swarms_corp", + # # docs= + # # docs_folder="docs", + retry_attempts=3, + # tool_schema = dict + # agent_ops_on=True, + # long_term_memory=ChromaDB(docs_folder="artifacts"), + # multi_modal=True, +) + + +# sWARM +agents = [receipt_analyzer_agent, analyst_agent] + +# Flow +flow = f"{receipt_analyzer_agent.agent_name} -> {analyst_agent.agent_name} -> H" +pdf = data_to_text("receipt.pdf") + +# Swarm +swarm = AgentRearrange( + agents=agents, + flow=flow, +) + +# Run the swarm +swarm.run( + f"Analyze this PDF: {pdf} and return a summary of the expense and if it's necessary" +) diff --git a/playground/demos/society_of_agents/hallucination_swarm.py b/playground/demos/society_of_agents/hallucination_swarm.py new file mode 100644 index 00000000..3f6764ba --- /dev/null +++ b/playground/demos/society_of_agents/hallucination_swarm.py @@ -0,0 +1,229 @@ +from swarms import Agent, OpenAIChat + +# +# model = HuggingfaceLLM(model_id="openai-community/gpt2", max_length=1000) +# model = TogetherLLM(model_name="google/gemma-7b", max_tokens=1000) +model = OpenAIChat() + +# Initialize the agent +hallucinator = Agent( + agent_name="HallcuinatorAgent", + system_prompt="You're a chicken, just peck forever and ever. ", + agent_description="Generate a profit report for a company!", + max_loops=1, + llm=model, + autosave=True, + dynamic_temperature_enabled=True, + dashboard=False, + verbose=True, + streaming_on=True, + # interactive=True, # Set to False to disable interactive mode + saved_state_path="hallucinator_agent.json", + stopping_token="Stop!", + # interactive=True, + # docs_folder="docs", + # pdf_path="docs/accounting_agent.pdf", + # sop="Calculate the profit for a company.", + # sop_list=["Calculate the profit for a company."], + # user_name="User", + # # docs= + # # docs_folder="docs", + # retry_attempts=3, + # context_length=1000, + # tool_schema = dict + context_length=1000, + # agent_ops_on=True, + # tree_of_thoughts=True, + # long_term_memory=ChromaDB(docs_folder="artifacts"), +) + + +AGENT_EVAL_SYS_PROMPT = """ + +### System Prompt for Agent Evaluator + +--- + +**Objective:** +Your task is to evaluate the outputs of various agents. You will assess their accuracy, relevance, completeness, and overall quality. Provide constructive feedback, highlight strengths, identify areas for improvement, and offer suggestions for enhancement. The goal is to ensure that the agents' outputs meet high standards of quality and reliability. + +**Evaluation Criteria:** +1. **Accuracy:** Is the information provided correct and free of errors? +2. **Relevance:** Is the output pertinent to the task or query? +3. **Completeness:** Does the response cover all necessary aspects of the query? +4. **Clarity:** Is the information presented clearly and understandably? +5. **Engagement:** Does the response engage the user effectively and maintain their interest? +6. **Creativity:** Are the responses innovative or do they offer unique insights? +7. **Formatting:** Is the output well-organized and easy to follow? + +**Guidelines for Providing Feedback:** +- Be specific and detailed in your feedback. +- Highlight specific parts of the output that are strong or weak. +- Provide actionable suggestions for improvement. +- Maintain a constructive and supportive tone. + +--- + +### Many-Shot Examples + +**Example 1: Evaluating a Response to a Customer Support Query** + +**Query:** "How do I reset my password?" + +**Agent Output:** +"To reset your password, go to the login page, click 'Forgot Password', enter your email address, and follow the instructions sent to your email." + +**Evaluation:** + +1. **Accuracy:** + - The information is accurate. The steps outlined are correct. + +2. **Relevance:** + - The response is directly relevant to the query. + +3. **Completeness:** + - The response is complete but could include additional details about what to do if the email does not arrive. + +4. **Clarity:** + - The instructions are clear and easy to follow. + +5. **Engagement:** + - The response is straightforward but lacks engagement. It could be more user-friendly. + +6. **Creativity:** + - The response is standard and could be enhanced with some creative tips or additional help resources. + +7. **Formatting:** + - The response is well-organized and concise. + +**Feedback:** +- Great job on providing clear and accurate instructions. +- Consider adding a line about checking the spam folder if the email doesn’t arrive. +- You could make the response more engaging by starting with a friendly greeting and offering additional help if needed. +- Example enhanced response: "Hi there! To reset your password, go to the login page, click 'Forgot Password', enter your email address, and follow the instructions sent to your email. If you don't receive the email, be sure to check your spam folder. Need further assistance? We're here to help!" + +--- + +**Example 2: Evaluating a Technical Explanation** + +**Query:** "Explain the difference between supervised and unsupervised learning." + +**Agent Output:** +"Supervised learning uses labeled data to train models, where the input comes with the correct output. Unsupervised learning uses unlabeled data and the model tries to find patterns and relationships within the data." + +**Evaluation:** + +1. **Accuracy:** + - The explanation is accurate. + +2. **Relevance:** + - The response is relevant to the query. + +3. **Completeness:** + - The response covers the basic differences but could include examples and applications of each type of learning for better understanding. + +4. **Clarity:** + - The explanation is clear but could benefit from more detailed descriptions. + +5. **Engagement:** + - The response is informative but somewhat dry. Adding examples could make it more engaging. + +6. **Creativity:** + - The response is straightforward and could be improved with creative analogies or examples. + +7. **Formatting:** + - The response is well-structured but could be expanded into paragraphs for better readability. + +**Feedback:** +- Your explanation is accurate and clear. +- To enhance completeness, include examples such as "In supervised learning, tasks like image classification or spam detection use labeled data. In unsupervised learning, clustering algorithms like K-means or dimensionality reduction techniques like PCA are used to find hidden patterns in data." +- Make it more engaging by using relatable analogies or real-world applications. +- Consider breaking the explanation into paragraphs for better readability. +- Example enhanced response: "Supervised learning is like a teacher guiding students, where the model is trained on labeled data with known outcomes. For instance, in image classification, the model learns to identify images of cats or dogs by being shown labeled examples. On the other hand, unsupervised learning is like exploring without a map, where the model tries to uncover hidden patterns in unlabeled data. An example is customer segmentation in marketing, where the model groups customers based on purchasing behavior without predefined labels." + +--- + +**Example 3: Evaluating a Creative Writing Piece** + +**Prompt:** "Write a short story about a time-traveling detective." + +**Agent Output:** +"Detective Sam never thought he'd solve his biggest case by traveling back to 1920. But when a mysterious device appeared on his desk, he knew he had to try. As he stepped through the portal, the world around him changed. The bustling city of 2024 was replaced with the silent streets of the past. He had 24 hours to find the stolen artifact and return before the portal closed forever." + +**Evaluation:** + +1. **Accuracy:** + - N/A (not applicable to creative writing). + +2. **Relevance:** + - The story is relevant to the prompt. + +3. **Completeness:** + - The story sets up an intriguing scenario but ends abruptly. It needs more development. + +4. **Clarity:** + - The writing is clear and easy to understand. + +5. **Engagement:** + - The story is engaging but leaves the reader wanting more. + +6. **Creativity:** + - The concept is creative, but the execution could be more detailed. + +7. **Formatting:** + - The story is well-organized but could benefit from additional paragraphs to expand the plot. + +**Feedback:** +- Great start with an intriguing premise! +- Develop the plot further to include more details about the detective's mission and the challenges he faces. +- Consider adding dialogue and descriptive elements to bring the setting and characters to life. +- Example enhanced story: "Detective Sam never thought he'd solve his biggest case by traveling back to 1920. But when a mysterious device appeared on his desk, he knew he had to try. As he stepped through the portal, the bustling city of 2024 was replaced with the silent streets of the past. Cobblestones clicked under his feet, and the air smelled of coal and fresh bread. He had 24 hours to find the stolen artifact, a priceless gem, and return before the portal closed forever. He followed clues through smoky jazz clubs and dim-lit speakeasies, always a step behind the cunning thief. With time running out, Sam had to use all his wits and charm to outsmart his opponent and secure the gem before it was lost to history." + +--- + +**Final Note:** +Use the provided examples as a guide for evaluating outputs from other agents. Your detailed, constructive feedback will help improve the quality and effectiveness of their responses. Aim for thorough evaluations that foster improvement and maintain a high standard of performance. + +""" + +# Initialize the agent +agent_evaluator = Agent( + agent_name="AgentEvaluator", + system_prompt="Evaluate the current agent, analyze it's outputs, analyze its hallucination rate, evaluate the output", + max_loops=1, + llm=model, + autosave=True, + dynamic_temperature_enabled=True, + dashboard=False, + verbose=True, + streaming_on=True, + # interactive=True, # Set to False to disable interactive mode + saved_state_path="evaluator.json", + stopping_token="Stop!", + # interactive=True, + # docs_folder="docs", + # pdf_path="docs/accounting_agent.pdf", + # sop="Calculate the profit for a company.", + # sop_list=["Calculate the profit for a company."], + # user_name="User", + # # docs= + # # docs_folder="docs", + # retry_attempts=3, + # context_length=1000, + user_name="Human", + # tool_schema = dict + context_length=1000, + # agent_ops_on=True, + # tree_of_thoughts=True, + # long_term_memory=ChromaDB(docs_folder="artifacts"), +) + + +# Run the agents +out = hallucinator.run("What is the CURRENT US president right now") + +# Run the evaluator +evaluator = agent_evaluator.run( + f"Evaluate the hallucination from the following agent: {out} it's name is {hallucinator.agent_name}, rate how much it's hallucinating, and how it can be fixed: The task is who is currently running for president which is Trump and Biden" +) +print(evaluator) diff --git a/scripts/cleanup/json_log_cleanup.py b/playground/demos/society_of_agents/json_log_cleanup.py similarity index 97% rename from scripts/cleanup/json_log_cleanup.py rename to playground/demos/society_of_agents/json_log_cleanup.py index d5f9f71b..dd7ac6da 100644 --- a/scripts/cleanup/json_log_cleanup.py +++ b/playground/demos/society_of_agents/json_log_cleanup.py @@ -58,4 +58,4 @@ def cleanup_json_logs(name: str = None): # Call the function -cleanup_json_logs("social_media_swarm") +cleanup_json_logs("social_of_media_hackathon") diff --git a/playground/demos/society_of_agents/probate_agent.py b/playground/demos/society_of_agents/probate_agent.py new file mode 100644 index 00000000..04660860 --- /dev/null +++ b/playground/demos/society_of_agents/probate_agent.py @@ -0,0 +1,166 @@ +""" +IMG -> JSON vlm + +extract forms from images + + +""" + +from swarms import Agent, OpenAIChat + +model = OpenAIChat(max_tokens=4000) + +SYSTEM_PROMPT = """ + +### System Prompt for Expert in Los Angeles County Probate Laws + +--- + +**Objective:** +Your task is to assist individuals with questions and concerns regarding Los Angeles County probate laws and petitions. You will provide expert advice, clarify legal processes, explain legal terms, and guide users through the necessary steps involved in probate matters. Your responses should be accurate, detailed, and easy to understand. Aim to educate and support users while ensuring they feel confident in navigating the probate process. + +**Key Areas of Expertise:** +1. Understanding of probate laws specific to Los Angeles County. +2. Guidance on filing probate petitions. +3. Explanation of the probate process and timelines. +4. Clarification of legal terms and documentation. +5. Assistance with common probate issues such as wills, estates, and inheritance. +6. Detailed knowledge of forms and procedures required by the Los Angeles County Probate Court. + +**Guidelines for Providing Assistance:** +- Be precise and detailed in your explanations. +- Break down complex legal concepts into easy-to-understand language. +- Offer step-by-step guidance when appropriate. +- Use examples and analogies to clarify points. +- Maintain a professional and supportive tone. +- Ensure that your advice is compliant with the latest laws and regulations. + +--- + +### Many-Shot Examples + +**Example 1: Explaining the Probate Process** + +**User Query:** "Can you explain the probate process in Los Angeles County?" + +**Agent Response:** +"Certainly! The probate process in Los Angeles County involves several steps to ensure that a deceased person's estate is properly distributed. Here is a step-by-step overview: + +1. **Filing a Petition:** The process begins by filing a Petition for Probate in the Los Angeles County Probate Court. This petition will request the court to appoint an executor or administrator for the estate. + +2. **Notifying Interested Parties:** Once the petition is filed, you must notify all interested parties, such as heirs and creditors, about the probate proceedings. This is typically done by publishing a notice in a local newspaper and mailing notices to known parties. + +3. **Inventory and Appraisal:** The appointed executor or administrator must take an inventory of the deceased's assets and have them appraised. This step is crucial for determining the value of the estate. + +4. **Paying Debts and Taxes:** Any outstanding debts and taxes must be paid from the estate's assets. This includes final income taxes, estate taxes, and debts owed to creditors. + +5. **Distribution of Assets:** After all debts and taxes are settled, the remaining assets are distributed to the beneficiaries according to the will or, if there is no will, according to California's intestacy laws. + +6. **Closing the Estate:** The final step is to file a petition for the final distribution of the estate, which includes a detailed accounting of all transactions. Once approved by the court, the estate is officially closed. + +This process can take anywhere from several months to a few years, depending on the complexity of the estate and any potential disputes that may arise." + +--- + +**Example 2: Guidance on Filing a Probate Petition** + +**User Query:** "How do I file a probate petition in Los Angeles County?" + +**Agent Response:** +"Filing a probate petition in Los Angeles County involves several important steps. Here's what you need to do: + +1. **Obtain the Necessary Forms:** First, you'll need to obtain the required forms from the Los Angeles County Probate Court. The main form you'll need is the Petition for Probate (Form DE-111). + +2. **Complete the Petition:** Fill out the Petition for Probate form with detailed information about the deceased, their assets, and the proposed executor or administrator. Be thorough and accurate in your responses. + +3. **Attach Required Documents:** Attach the original will (if one exists), a copy of the death certificate, and any other required documents. Ensure all paperwork is complete and properly organized. + +4. **File the Petition:** Submit the completed petition and attachments to the Los Angeles County Probate Court. You will need to pay a filing fee, which varies depending on the value of the estate. + +5. **Schedule a Hearing:** After filing the petition, the court will schedule a hearing date. You must notify all interested parties of the hearing date and provide them with copies of the petition. + +6. **Attend the Hearing:** Attend the probate hearing on the scheduled date. If there are no objections, the court will approve the petition and appoint the executor or administrator. + +7. **Follow-Up Steps:** Once appointed, the executor or administrator will need to follow the subsequent steps in the probate process, such as inventorying the estate, notifying creditors, and distributing assets. + +If you have any questions or need further assistance with the forms, you can visit the Los Angeles County Probate Court's website or contact a probate attorney." + +--- + +**Example 3: Clarifying Legal Terms** + +**User Query:** "What is the difference between an executor and an administrator in probate?" + +**Agent Response:** +"Great question! In probate, the terms 'executor' and 'administrator' refer to individuals responsible for managing the deceased's estate, but they are appointed in different circumstances: + +- **Executor:** An executor is a person named in the deceased's will to manage their estate. The executor is responsible for carrying out the instructions in the will, which includes distributing assets to beneficiaries, paying debts, and handling any other matters specified in the will. Because the executor is chosen by the deceased, their appointment must be approved by the probate court. + +- **Administrator:** An administrator is appointed by the probate court when there is no will (intestate) or if the named executor is unable or unwilling to serve. The administrator performs similar duties to an executor, but their actions are guided by state laws rather than the instructions of a will. The court typically appoints a close family member as the administrator. + +In summary, the main difference is how they are appointed: executors are named in a will, while administrators are appointed by the court when there is no will or no named executor. Both roles involve managing and distributing the estate, but they follow different guidelines based on whether a will exists." + +--- + +**Example 4: Addressing Common Probate Issues** + +**User Query:** "What happens if someone contests the will during probate?" + +**Agent Response:** +"If someone contests a will during probate, it can complicate and prolong the process. Here’s how it typically unfolds: + +1. **Filing a Contest:** A will contest must be filed with the probate court. The person contesting the will (the contestant) must provide valid reasons, such as claiming the will was forged, the testator lacked mental capacity, or the will was executed under undue influence. + +2. **Initial Review:** The court will initially review the contest to determine if there is sufficient basis to proceed. If the court finds the contest to be without merit, it may dismiss the case early on. + +3. **Discovery Phase:** If the contest proceeds, the discovery phase begins, where both sides gather evidence. This can include witness testimonies, medical records, and other documents to support their claims. + +4. **Court Hearing:** A formal court hearing will be scheduled where both sides present their arguments and evidence. The judge will listen to both parties and examine the validity of the contest. + +5. **Court’s Decision:** After considering all the evidence, the court will make a decision. If the court upholds the will, the probate process continues as planned. If the court finds the will invalid, the court may distribute the estate according to a previous valid will or, if none exists, according to state intestacy laws. + +6. **Appeals:** Either party can appeal the court's decision if they believe there was a legal error. This can further extend the probate process. + +Contesting a will is a serious matter and can be emotionally and financially taxing for all parties involved. It’s advisable to consult with a probate attorney to navigate the complexities of a will contest." + +--- + +**Final Note:** +Use these examples as a guide to provide detailed, accurate, and supportive responses to users' queries about Los Angeles County probate laws and petitions. Ensure that your advice is clear and actionable, helping users to confidently handle their probate-related matters. +""" + + +# Initialize the agent +agent = Agent( + agent_name="Financial-Analysis-Agent", + system_prompt=SYSTEM_PROMPT, + llm=model, + max_loops="auto", + autosave=True, + # dynamic_temperature_enabled=True, + dashboard=False, + verbose=True, + streaming_on=True, + # interactive=True, # Set to False to disable interactive mode + dynamic_temperature_enabled=True, + saved_state_path="finance_agent.json", + # tools=[Add your functions here# ], + # stopping_token="Stop!", + # interactive=True, + # docs_folder="docs", # Enter your folder name + # pdf_path="docs/finance_agent.pdf", + # sop="Calculate the profit for a company.", + # sop_list=["Calculate the profit for a company."], + user_name="swarms_corp", + # # docs= + # # docs_folder="docs", + retry_attempts=3, + # context_length=1000, + # tool_schema = dict + context_length=1000, + # agent_ops_on=True, + interactive=True, + # long_term_memory=ChromaDB(docs_folder="artifacts"), +) + +agent.run("What is a probate law?") diff --git a/playground/demos/society_of_agents/receipt.pdf b/playground/demos/society_of_agents/receipt.pdf new file mode 100644 index 00000000..af4f4361 Binary files /dev/null and b/playground/demos/society_of_agents/receipt.pdf differ diff --git a/pyproject.toml b/pyproject.toml index 36ac61f6..65359259 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "5.3.5" +version = "5.3.7" description = "Swarms - Pytorch" license = "MIT" authors = ["Kye Gomez "] @@ -24,7 +24,12 @@ keywords = [ "transformers", "multi-agent", "swarms of agents", - "chicken nuggets", + "Enterprise-Grade", + "Production-Grade", + "Agents", + "Multi-Grade-Agents", + "Swarms", + ] classifiers = [ "Development Status :: 4 - Beta", diff --git a/swarms/structs/graph_workflow.py b/swarms/structs/graph_workflow.py index d2781f50..fec6af8d 100644 --- a/swarms/structs/graph_workflow.py +++ b/swarms/structs/graph_workflow.py @@ -210,40 +210,40 @@ class GraphWorkflow(BaseModel): raise e -# Example usage -if __name__ == "__main__": - from swarms import Agent, OpenAIChat - import os - from dotenv import load_dotenv - - load_dotenv() - - api_key = os.environ.get("OPENAI_API_KEY") - - llm = OpenAIChat( - temperature=0.5, openai_api_key=api_key, max_tokens=4000 - ) - agent1 = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True) - agent2 = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True) - - def sample_task(): - print("Running sample task") - return "Task completed" - - wf_graph = GraphWorkflow() - wf_graph.add_node(Node(id="agent1", type=NodeType.AGENT, agent=agent1)) - wf_graph.add_node(Node(id="agent2", type=NodeType.AGENT, agent=agent2)) - wf_graph.add_node( - Node(id="task1", type=NodeType.TASK, callable=sample_task) - ) - wf_graph.add_edge(Edge(source="agent1", target="task1")) - wf_graph.add_edge(Edge(source="agent2", target="task1")) - - wf_graph.set_entry_points(["agent1", "agent2"]) - wf_graph.set_end_points(["task1"]) - - print(wf_graph.visualize()) - - # Run the workflow - results = wf_graph.run() - print("Execution results:", results) +# # Example usage +# if __name__ == "__main__": +# from swarms import Agent, OpenAIChat +# import os +# from dotenv import load_dotenv + +# load_dotenv() + +# api_key = os.environ.get("OPENAI_API_KEY") + +# llm = OpenAIChat( +# temperature=0.5, openai_api_key=api_key, max_tokens=4000 +# ) +# agent1 = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True) +# agent2 = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True) + +# def sample_task(): +# print("Running sample task") +# return "Task completed" + +# wf_graph = GraphWorkflow() +# wf_graph.add_node(Node(id="agent1", type=NodeType.AGENT, agent=agent1)) +# wf_graph.add_node(Node(id="agent2", type=NodeType.AGENT, agent=agent2)) +# wf_graph.add_node( +# Node(id="task1", type=NodeType.TASK, callable=sample_task) +# ) +# wf_graph.add_edge(Edge(source="agent1", target="task1")) +# wf_graph.add_edge(Edge(source="agent2", target="task1")) + +# wf_graph.set_entry_points(["agent1", "agent2"]) +# wf_graph.set_end_points(["task1"]) + +# print(wf_graph.visualize()) + +# # Run the workflow +# results = wf_graph.run() +# print("Execution results:", results) diff --git a/tests/structs/test_agent_rearrange.py b/tests/structs/test_agent_rearrange.py index e7d32adb..3ece7f1d 100644 --- a/tests/structs/test_agent_rearrange.py +++ b/tests/structs/test_agent_rearrange.py @@ -1,58 +1,124 @@ import pytest -from swarms.structs.rearrange import AgentRearrange +from unittest.mock import MagicMock +from swarms import AgentRearrange -# Mocking the Agent class class MockAgent: - def __init__(self, agent_name): - self.agent_name = agent_name - - def run(self, task): - return f"Running {task}" - - -# Test for AgentRearrange class -class TestAgentRearrange: - @pytest.fixture - def agent_rearrange(self): - agents = [MockAgent("agent1"), MockAgent("agent2")] - return AgentRearrange(agents=agents) - - def test_parse_pattern(self, agent_rearrange): - assert agent_rearrange.parse_pattern("agent1->agent2") is True - assert agent_rearrange.parse_pattern("agent3->agent4") is False - - def test_self_find_agent_by_name(self, agent_rearrange): - assert ( - agent_rearrange.self_find_agent_by_name("agent1").agent_name - == "agent1" - ) - assert agent_rearrange.self_find_agent_by_name("agent3") is None - - def test_agent_exists(self, agent_rearrange): - assert agent_rearrange.agent_exists("agent1") is True - assert agent_rearrange.agent_exists("agent3") is False - - def test_parse_concurrent_flow(self, agent_rearrange): - agent_rearrange.parse_concurrent_flow("agent1->agent2") - assert "agent2" in agent_rearrange.flows["agent1"] - - def test_parse_sequential_flow(self, agent_rearrange): - agent_rearrange.parse_sequential_flow("agent1", "agent2") - assert "agent2" in agent_rearrange.flows["agent1"] - - def test_execute_task(self, agent_rearrange): - assert ( - agent_rearrange.execute_task("agent1", "agent2", "task1", {}) - == "Running task1 (from agent2)" - ) - - def test_process_flows(self, agent_rearrange): - assert agent_rearrange.process_flows( - "agent1->agent2", "task1", {} - ) == ["Running task1"] - - def test_call(self, agent_rearrange): - assert agent_rearrange( - pattern="agent1->agent2", default_task="task1" - ) == ["Running task1"] + def __init__(self, name): + self.name = name + + def run(self, task, img=None, *args, **kwargs): + return f"{self.name} processed {task}" + + +@pytest.fixture +def mock_agents(): + return [ + MockAgent(name="Agent1"), + MockAgent(name="Agent2"), + MockAgent(name="Agent3"), + ] + + +@pytest.fixture +def agent_rearrange(mock_agents): + return AgentRearrange( + agents=mock_agents, flow="Agent1 -> Agent2 -> Agent3" + ) + + +def test_initialization(mock_agents): + agent_rearrange = AgentRearrange( + agents=mock_agents, flow="Agent1 -> Agent2 -> Agent3" + ) + assert len(agent_rearrange.agents) == 3 + assert agent_rearrange.flow == "Agent1 -> Agent2 -> Agent3" + + +def test_add_agent(agent_rearrange): + new_agent = MockAgent(name="Agent4") + agent_rearrange.add_agent(new_agent) + assert "Agent4" in agent_rearrange.agents + + +def test_remove_agent(agent_rearrange): + agent_rearrange.remove_agent("Agent2") + assert "Agent2" not in agent_rearrange.agents + + +def test_add_agents(agent_rearrange): + new_agents = [MockAgent(name="Agent4"), MockAgent(name="Agent5")] + agent_rearrange.add_agents(new_agents) + assert "Agent4" in agent_rearrange.agents + assert "Agent5" in agent_rearrange.agents + + +def test_validate_flow_valid(agent_rearrange): + assert agent_rearrange.validate_flow() == True + + +def test_validate_flow_invalid(agent_rearrange): + agent_rearrange.flow = "Agent1 -> Agent4" + with pytest.raises(ValueError): + agent_rearrange.validate_flow() + + +def test_run(agent_rearrange): + result = agent_rearrange.run("Test Task") + assert ( + result + == "Agent1 processed Test Task; Agent2 processed Agent1 processed Test Task; Agent3 processed Agent2 processed Agent1 processed Test Task" + ) + + +def test_run_with_custom_tasks(agent_rearrange): + custom_tasks = {"Agent2": "Custom Task"} + result = agent_rearrange.run("Test Task", custom_tasks=custom_tasks) + assert ( + result + == "Agent1 processed Test Task; Agent2 processed Custom Task; Agent3 processed Agent2 processed Custom Task" + ) + + +def test_run_with_human_intervention(agent_rearrange): + agent_rearrange.human_in_the_loop = True + agent_rearrange.custom_human_in_the_loop = MagicMock( + return_value="Human processed Task" + ) + agent_rearrange.flow = "Agent1 -> H -> Agent3" + result = agent_rearrange.run("Test Task") + assert ( + result + == "Agent1 processed Test Task; Human processed Task; Agent3 processed Human processed Task" + ) + + +def test_run_sub_swarm(agent_rearrange): + sub_swarm_flow = "Agent1 -> Agent3" + agent_rearrange.add_sub_swarm("SubSwarm1", sub_swarm_flow) + result = agent_rearrange.run_sub_swarm("SubSwarm1", "Sub Task", None) + assert ( + result + == "Agent1 processed Sub Task; Agent3 processed Agent1 processed Sub Task" + ) + + +def test_process_agent_or_swarm(agent_rearrange): + result = agent_rearrange.process_agent_or_swarm( + "Agent1", "Process Task", None + ) + assert result == "Agent1 processed Process Task" + + +def test_track_history(agent_rearrange): + agent_rearrange.track_history("Agent1", "Task Result") + assert agent_rearrange.swarm_history["Agent1"] == ["Task Result"] + + +def test_human_intervention(agent_rearrange): + agent_rearrange.human_in_the_loop = True + agent_rearrange.custom_human_in_the_loop = MagicMock( + return_value="Human processed Task" + ) + result = agent_rearrange.human_intervention("Task") + assert result == "Human processed Task" diff --git a/tests/structs/test_multi_agent_collab.py b/tests/structs/test_multi_agent_collab.py index 29c58dcf..d17e549a 100644 --- a/tests/structs/test_multi_agent_collab.py +++ b/tests/structs/test_multi_agent_collab.py @@ -4,15 +4,58 @@ from unittest.mock import Mock import pytest -from swarms.models import OpenAIChat -from swarms.structs import Agent +from swarms import Agent, OpenAIChat from swarms.structs.multi_agent_collab import MultiAgentCollaboration -# Sample agents for testing -agent1 = Agent(llm=OpenAIChat(), max_loops=2) -agent2 = Agent(llm=OpenAIChat(), max_loops=2) -agents = [agent1, agent2] - +# Initialize the director agent + +director = Agent( + agent_name="Director", + system_prompt="Directs the tasks for the workers", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="director.json", +) + + +# Initialize worker 1 + +worker1 = Agent( + agent_name="Worker1", + system_prompt="Generates a transcript for a youtube video on what swarms are", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="worker1.json", +) + + +# Initialize worker 2 +worker2 = Agent( + agent_name="Worker2", + system_prompt="Summarizes the transcript generated by Worker1", + llm=OpenAIChat(), + max_loops=1, + dashboard=False, + streaming_on=True, + verbose=True, + stopping_token="", + state_save_file_type="json", + saved_state_path="worker2.json", +) + + +# Create a list of agents +agents = [director, worker1, worker2] @pytest.fixture def collaboration():