From c45a5a3881cd50924861d916fbd5195149392fb3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Aug 2023 10:54:58 +0000 Subject: [PATCH 01/15] Bump pypa/gh-action-pypi-publish from 1.8.8 to 1.8.10 Bumps [pypa/gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish) from 1.8.8 to 1.8.10. - [Release notes](https://github.com/pypa/gh-action-pypi-publish/releases) - [Commits](https://github.com/pypa/gh-action-pypi-publish/compare/f8c70e705ffc13c3b4d1221169b84f12a75d6ca8...b7f401de30cb6434a1e19f805ff006643653240e) --- updated-dependencies: - dependency-name: pypa/gh-action-pypi-publish dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/python-publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml index 4734d02f..545e3432 100644 --- a/.github/workflows/python-publish.yml +++ b/.github/workflows/python-publish.yml @@ -26,7 +26,7 @@ jobs: - name: Build package run: python -m build - name: Publish package - uses: pypa/gh-action-pypi-publish@f8c70e705ffc13c3b4d1221169b84f12a75d6ca8 + uses: pypa/gh-action-pypi-publish@b7f401de30cb6434a1e19f805ff006643653240e with: user: __token__ password: ${{ secrets.PYPI_API_TOKEN }} \ No newline at end of file From 35a1735a31211d42e0c95929447fa930f93b7900 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 10:54:58 +0000 Subject: [PATCH 02/15] Bump actions/first-interaction from 1.1.1 to 1.2.0 Bumps [actions/first-interaction](https://github.com/actions/first-interaction) from 1.1.1 to 1.2.0. - [Release notes](https://github.com/actions/first-interaction/releases) - [Commits](https://github.com/actions/first-interaction/compare/v1.1.1...v1.2.0) --- updated-dependencies: - dependency-name: actions/first-interaction dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/welcome.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/welcome.yml b/.github/workflows/welcome.yml index a993236c..eadc0b68 100644 --- a/.github/workflows/welcome.yml +++ b/.github/workflows/welcome.yml @@ -11,7 +11,7 @@ jobs: name: 👋 Welcome runs-on: ubuntu-latest steps: - - uses: actions/first-interaction@v1.1.1 + - uses: actions/first-interaction@v1.2.0 with: repo-token: ${{ secrets.GITHUB_TOKEN }} issue-message: "Hello there, thank you for opening an Issue ! 🙏🏻 The team was notified and they will get back to you asap." From ef1f6603a015094f9096c395687990525d0b9bd1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 10:55:06 +0000 Subject: [PATCH 03/15] Bump actions/setup-python from 2 to 4 Bumps [actions/setup-python](https://github.com/actions/setup-python) from 2 to 4. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v2...v4) --- updated-dependencies: - dependency-name: actions/setup-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/code_quality_control.yml | 2 +- .github/workflows/cos_integration.yml | 2 +- .github/workflows/docs_test.yml | 2 +- .github/workflows/lints.yml | 2 +- .github/workflows/pr_request_checks.yml | 2 +- .github/workflows/testing.yml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/code_quality_control.yml b/.github/workflows/code_quality_control.yml index 4b94b454..261a4fdc 100644 --- a/.github/workflows/code_quality_control.yml +++ b/.github/workflows/code_quality_control.yml @@ -14,7 +14,7 @@ jobs: uses: actions/checkout@v3 - name: Set up Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: 3.x diff --git a/.github/workflows/cos_integration.yml b/.github/workflows/cos_integration.yml index abae70b2..4a20c9dd 100644 --- a/.github/workflows/cos_integration.yml +++ b/.github/workflows/cos_integration.yml @@ -13,7 +13,7 @@ jobs: uses: actions/checkout@v2 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: 3.x diff --git a/.github/workflows/docs_test.yml b/.github/workflows/docs_test.yml index c7b1ce6e..b9d67583 100644 --- a/.github/workflows/docs_test.yml +++ b/.github/workflows/docs_test.yml @@ -14,7 +14,7 @@ jobs: uses: actions/checkout@v3 - name: Set up Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: 3.x diff --git a/.github/workflows/lints.yml b/.github/workflows/lints.yml index b61e471c..dcce52c2 100644 --- a/.github/workflows/lints.yml +++ b/.github/workflows/lints.yml @@ -14,7 +14,7 @@ jobs: uses: actions/checkout@v2 - name: Set up Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: 3.x diff --git a/.github/workflows/pr_request_checks.yml b/.github/workflows/pr_request_checks.yml index dccdb9e2..046e5bae 100644 --- a/.github/workflows/pr_request_checks.yml +++ b/.github/workflows/pr_request_checks.yml @@ -14,7 +14,7 @@ jobs: uses: actions/checkout@v2 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: 3.x diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index d6a40768..080bcbb6 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -14,7 +14,7 @@ jobs: uses: actions/checkout@v2 - name: Set up Python - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: 3.x From 70aad75fc69faeecea206b57a1b03aa597a0da79 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 10:55:13 +0000 Subject: [PATCH 04/15] Bump actions/checkout from 2 to 4 Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/code_quality_control.yml | 2 +- .github/workflows/cos_integration.yml | 2 +- .github/workflows/docs.yml | 2 +- .github/workflows/docs_test.yml | 2 +- .github/workflows/lints.yml | 2 +- .github/workflows/pr_request_checks.yml | 2 +- .github/workflows/testing.yml | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/code_quality_control.yml b/.github/workflows/code_quality_control.yml index 4b94b454..89a1f48a 100644 --- a/.github/workflows/code_quality_control.yml +++ b/.github/workflows/code_quality_control.yml @@ -11,7 +11,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v3 diff --git a/.github/workflows/cos_integration.yml b/.github/workflows/cos_integration.yml index abae70b2..6eed30ad 100644 --- a/.github/workflows/cos_integration.yml +++ b/.github/workflows/cos_integration.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v2 diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index a7e64a06..0f89cb4c 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -10,7 +10,7 @@ jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: 3.x diff --git a/.github/workflows/docs_test.yml b/.github/workflows/docs_test.yml index c7b1ce6e..cef7db71 100644 --- a/.github/workflows/docs_test.yml +++ b/.github/workflows/docs_test.yml @@ -11,7 +11,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v3 diff --git a/.github/workflows/lints.yml b/.github/workflows/lints.yml index b61e471c..f4f4b726 100644 --- a/.github/workflows/lints.yml +++ b/.github/workflows/lints.yml @@ -11,7 +11,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v3 diff --git a/.github/workflows/pr_request_checks.yml b/.github/workflows/pr_request_checks.yml index dccdb9e2..4f1e990d 100644 --- a/.github/workflows/pr_request_checks.yml +++ b/.github/workflows/pr_request_checks.yml @@ -11,7 +11,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v2 diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index d6a40768..0a5cab4b 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -11,7 +11,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v3 From 53751a235989df70d4d6ac81b90a85de6ff0d47c Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 2 Nov 2023 17:14:51 -0400 Subject: [PATCH 05/15] flow example and flow walkthrough guide --- positive_med.py => demos/positive_med.py | 4 - docs/examples/flow.md | 449 +++++++++++ errors.txt | 22 - flow.py | 21 +- groupchat.py | 113 ++- mkdocs.yml | 1 + stacked_worker.py | 134 ---- swarms/structs/flow.py | 33 +- swarms/tools/developer.py | 925 ----------------------- swarms/tools/file_mangagement.py | 17 - 10 files changed, 592 insertions(+), 1127 deletions(-) rename positive_med.py => demos/positive_med.py (99%) create mode 100644 docs/examples/flow.md delete mode 100644 errors.txt delete mode 100644 stacked_worker.py delete mode 100644 swarms/tools/developer.py delete mode 100644 swarms/tools/file_mangagement.py diff --git a/positive_med.py b/demos/positive_med.py similarity index 99% rename from positive_med.py rename to demos/positive_med.py index 25ecfe73..e8f879c9 100644 --- a/positive_med.py +++ b/demos/positive_med.py @@ -397,7 +397,3 @@ distribution_agent_out = print( "magenta", ) ) - - - - diff --git a/docs/examples/flow.md b/docs/examples/flow.md new file mode 100644 index 00000000..3403d55b --- /dev/null +++ b/docs/examples/flow.md @@ -0,0 +1,449 @@ +# Walkthrough Guide: Getting Started with Swarms Module's Flow Feature + +## Introduction + +Welcome to the walkthrough guide for beginners on using the "Flow" feature within the Swarms module. This guide is designed to help you understand and utilize the capabilities of the Flow class for seamless interactions with AI language models. + +**Target Audience:** + +- This guide is primarily intended for beginners who want to learn how to use the Flow feature in the Swarms module to interact with AI language models effectively. + +## Table of Contents + +1\. **Understanding the Flow Feature** + +   - 1.1 What is the Flow Feature? + +   - 1.2 Key Concepts + +2\. **Setting Up the Environment** + +   - 2.1 Prerequisites + +   - 2.2 Installing Required Libraries + +   - 2.3 Importing Necessary Modules + +3\. **Creating a Flow Instance** + +   - 3.1 Importing the Required Modules + +   - 3.2 Initializing the Language Model + +   - 3.3 Creating a Flow Instance + +4\. **Running a Flow** + +   - 4.1 Defining the Task + +   - 4.2 Running the Flow + +   - 4.3 Interacting with the AI + +   - 4.4 Dynamic Temperature Handling + +5\. **Customizing Flow Behavior** + +   - 5.1 Stopping Conditions + +   - 5.2 Retry Mechanism + +   - 5.3 Loop Interval + +   - 5.4 Interactive Mode + +6\. **Saving and Loading Flows** + +   - 6.1 Saving a Flow + +   - 6.2 Loading a Saved Flow + +7\. **Analyzing Feedback and Undoing Actions** + +   - 7.1 Providing Feedback + +   - 7.2 Undoing the Last Action + +   - 7.3 Response Filtering + +8\. **Advanced Features** + +   - 8.1 Streamed Generation + +   - 8.2 Real-time Token Generation + +9\. **Best Practices** + +   - 9.1 Conciseness and Clarity + +   - 9.2 Active Voice + +   - 9.3 Highlighting Important Points + +   - 9.4 Consistent Style + +10\. **Conclusion** + +--- + +## 1. Understanding the Flow Feature + +### 1.1 What is the Flow Feature? + +The Flow feature is a powerful component of the Swarms framework that allows developers to create a sequential, conversational interaction with AI language models. It enables developers to build multi-step conversations, generate long-form content, and perform complex tasks using AI. The Flow class provides autonomy to language models, enabling them to generate responses in a structured manner. + +### 1.2 Key Concepts + +Before diving into the practical aspects, let's clarify some key concepts related to the Flow feature: + +- **Flow:** A Flow is an instance of the Flow class that represents an ongoing interaction with an AI language model. It consists of a series of steps and responses. + +- **Stopping Condition:** A stopping condition is a criterion that, when met, allows the Flow to stop generating responses. This can be user-defined and can depend on the content of the responses. + +- **Loop Interval:** The loop interval specifies the time delay between consecutive interactions with the AI model. + +- **Retry Mechanism:** In case of errors or failures during AI model interactions, the Flow can be configured to make multiple retry attempts with a specified interval. + +- **Interactive Mode:** Interactive mode allows developers to have a back-and-forth conversation with the AI model, making it suitable for real-time interactions. + +## 2. Setting Up the Environment + +### 2.1 Prerequisites + +Before you begin, ensure that you have the following prerequisites in place: + +- Basic understanding of Python programming. + +- Access to an AI language model or API key for language model services. + +### 2.2 Installing Required Libraries + +To use the Flow feature, you'll need to install the required libraries. Make sure you have these libraries installed: + +- `termcolor`: For colorful console output. + +- `inspects`: For introspecting the language model. + +- `random`: For handling dynamic temperature. + +- Other dependencies as needed for your specific environment. + +You can install these libraries using pip: + +```bash + +pip install termcolor inspects + +``` + +### 2.3 Importing Necessary Modules + +In your Python script or environment, import the necessary modules from the Swarms framework: + +```python + +import json + +import logging + +import time + +from typing import Any, Callable, Dict, List, Optional, Tuple, Generator + +from termcolor import colored + +import inspect + +import random + +``` + +Ensure that you have these modules imported to proceed with the guide. + +## 3. Creating a Flow Instance + +To use the Flow feature, you need to create an instance of the Flow class. This instance will allow you to interact with the AI language model. + +### 3.1 Importing the Required Modules + +In your script, import the required modules for the Flow class: + +```python + +from swarms.structs import Flow + +from swarms.models import OpenAIChat  # Adjust this import according to your specific language model. + +``` + +### 3.2 Initializing the Language Model + +Initialize the language model you want to use for interactions. In this example, we're using the `OpenAIChat` model: + +```python + +# Replace 'api_key' with your actual API key or configuration. + +llm = OpenAIChat( + +    openai_api_key='your_api_key', + +    temperature=0.5, + +    max_tokens=3000, + +) + +``` + +Make sure to provide the necessary configuration, such as your API key and any model-specific parameters. + +### 3.3 Creating a Flow Instance + +Now, create an instance of the Flow class by passing the initialized language model: + +```python + +flow = Flow( + +    llm=llm, + +    max_loops=5, + +    dashboard=True, + +    stopping_condition=None,  # You can define a stopping condition as needed. + +    loop_interval=1, + +    retry_attempts=3, + +    retry_interval=1, + +    interactive=False,  # Set to 'True' for interactive mode. + +    dynamic_temperature=False,  # Set to 'True' for dynamic temperature handling. + +) + +``` + +This sets up your Flow instance with the specified parameters. Adjust these parameters based on your requirements. + +## 4. Running a Flow + +Now that you have created a Flow instance, let's run a simple interaction with the AI model using the Flow. + +### 4.1 Defining the Task + +Define the task you want the AI model to perform. This can be any prompt or question you have in mind. For example: + +```python + +task = "Generate a 10,000 word blog on health and wellness." + +``` + +### 4.2 Running the Flow + +Run the Flow by providing the task you defined: + +```python + +out = flow.run(task) + +``` + +The Flow will interact with the AI model, generate responses, and store the conversation history. + +### 4.3 Interacting with the AI + +Depending on whether you set the `interactive` parameter to `True` or `False` during Flow initialization, you can interact with the AI in real-time or simply receive the generated responses in sequence. + +If `interactive` is set to `True`, you'll have a back-and-forth conversation with the AI, where you provide input after each AI response. + +### 4.4 Dynamic Temperature Handling + +If you set the `dynamic_temperature + +` parameter to `True` during Flow initialization, the Flow class will handle temperature dynamically. Temperature affects the randomness of responses generated by the AI model. The dynamic temperature feature allows the temperature to change randomly within a specified range, enhancing response diversity. + +## 5. Customizing Flow Behavior + +The Flow feature provides various customization options to tailor its behavior to your specific use case. + +### 5.1 Stopping Conditions + +You can define custom stopping conditions that instruct the Flow to stop generating responses based on specific criteria. For example, you can stop when a certain keyword appears in the response: + +```python + +def custom_stopping_condition(response: str) -> bool: + +    return "Stop" in response.lower() + +# Set the custom stopping condition when creating the Flow instance. + +flow = Flow( + +    llm=llm, + +    max_loops=5, + +    stopping_condition=custom_stopping_condition, + +    # Other parameters... + +) + +``` + +### 5.2 Retry Mechanism + +In case of errors or issues during AI model interactions, you can configure a retry mechanism. Specify the number of retry attempts and the interval between retries: + +```python + +flow = Flow( + +    llm=llm, + +    max_loops=5, + +    retry_attempts=3, + +    retry_interval=1, + +    # Other parameters... + +) + +``` + +### 5.3 Loop Interval + +The `loop_interval` parameter determines the time delay between consecutive interactions with the AI model. Adjust this value based on your desired pace of conversation. + +### 5.4 Interactive Mode + +Set the `interactive` parameter to `True` if you want to have real-time conversations with the AI model. In interactive mode, you provide input after each AI response. + +## 6. Saving and Loading Flows + +You can save and load Flow instances to maintain conversation history or switch between different tasks. + +### 6.1 Saving a Flow + +To save a Flow instance along with its conversation history: + +```python + +flow.save("path/flow_history.json") + +``` + +This stores the conversation history as a JSON file for future reference. + +### 6.2 Loading a Saved Flow + +To load a previously saved Flow instance: + +```python + +loaded_flow = Flow(llm=llm, max_loops=5) + +loaded_flow.load("path/flow_history.json") + +``` + +This loads the conversation history into the new Flow instance, allowing you to continue the conversation or analyze past interactions. + +## 7. Analyzing Feedback and Undoing Actions + +The Flow feature supports feedback collection and the ability to undo actions. + +### 7.1 Providing Feedback + +You can provide feedback on AI responses within the Flow. Feedback can be used to analyze the quality of responses or highlight issues: + +```python + +flow.provide_feedback("The response was unclear.") + +``` + +### 7.2 Undoing the Last Action + +If you want to undo the last action taken within the Flow and revert to the previous state, you can use the `undo_last` method: + +```python + +previous_state, message = flow.undo_last() + +``` + +This helps you correct or modify previous interactions. + +### 7.3 Response Filtering + +The Flow feature allows you to add response filters to filter out specific words or content from AI responses. This can be useful for content moderation or filtering sensitive information: + +```python + +flow.add_response_filter("sensitive_word") + +``` + +The response filters will replace filtered words with placeholders, ensuring that sensitive content is not displayed. + +## 8. Advanced Features + +### 8.1 Streamed Generation + +Streamed generation allows you to generate responses token by token in real-time. This can be useful for creating interactive and dynamic conversations: + +```python + +response = flow.streamed_generation("Generate a report on finance") + +``` + +This function streams each token of the response with a slight delay, simulating real-time conversation. + +### 8.2 Real-time Token Generation + +For even finer control over token generation, you can use the `streamed_token_generation` method. This generates tokens one by one, allowing you to have precise control over the conversation pace: + +```python + +for token in flow.streamed_token_generation("Generate a report on finance"): + +    print(token, end="") + +``` + +## 9. Best Practices + +To create effective and user-friendly interactions with the AI model using the Flow feature, consider the following best practices: + +### 9.1 Conciseness and Clarity + +Ensure that your prompts and responses are concise and to the point. Avoid unnecessary verbosity. + +### 9.2 Active Voice + +Use an active voice when giving instructions or prompts. For example, say, "Generate a report" instead of "A report should be generated." + +### 9.3 Highlighting Important Points + +Use formatting options like bold text, italics, or color highlights to draw attention to important points within the conversation. + +### 9.4 Consistent Style + +Maintain a consistent tone and style throughout the conversation. If there is a style guide or specific formatting conventions, adhere to them. + +## 10. Conclusion + +In conclusion, the Flow feature in the Swarms module provides a versatile and interactive way to interact with AI language models. By following this walkthrough guide and considering the best practices, you can effectively harness the power of Flow for a wide range of applications, from generating content to performing complex tasks. + +Start creating your own interactive conversations and enjoy the benefits of seamless AI interactions with the Flow feature. Happy coding! \ No newline at end of file diff --git a/errors.txt b/errors.txt deleted file mode 100644 index 3ab96b03..00000000 --- a/errors.txt +++ /dev/null @@ -1,22 +0,0 @@ -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "user", "content": "Generate a 10,000 word blog on health and wellness."}], "model": "gpt-3.5-turbo", "temperature": 0.5, "max_tokens": 3000}' message='Post details' -Converted retries value: 2 -> Retry(total=2, connect=None, read=None, redirect=None, status=None) -Starting new HTTPS connection (1): api.openai.com:443 -https://api.openai.com:443 "POST /v1/chat/completions HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=13516 request_id=971b8437917cf6e46e5fe1340060f0e4 response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "user", "content": "Title: The Ultimate Guide to Health and Wellness: Unlocking Your Full Potential\\n\\nIntroduction (Word Count: 500)\\nHealth and wellness are essential aspects of our lives that directly impact our overall well-being. In this comprehensive guide, we will explore various dimensions of health and wellness, providing valuable insights, practical tips, and evidence-based strategies to help you achieve optimal physical, mental, and emotional well-being. From nutrition and exercise to stress management and self-care, we will delve into every aspect of leading a healthy and fulfilling life. So, let\'s embark on this transformative journey together!\\n\\nTable of Contents:\\n\\n1. Understanding Health and Wellness (Word Count: 800)\\n1.1 Defining Health and Wellness\\n1.2 The Importance of Health and Wellness\\n1.3 The Connection between Physical, Mental, and Emotional Well-being\\n1.4 The Role of Lifestyle Choices in Health and Wellness\\n\\n2. Nourishing Your Body (Word Count: 1,200)\\n2.1 The Fundamentals of a Balanced Diet\\n2.2 The Power of Whole Foods and Nutrient Density\\n2.3 Understanding Macronutrients and Micronutrients\\n2.4 The Role of Hydration in Health\\n2.5 Exploring Different Dietary Approaches\\n\\n3. Moving Towards Fitness (Word Count: 1,200)\\n3.1 The Benefits of Regular Physical Activity\\n3.2 Designing an Effective Exercise Routine\\n3.3 Cardiovascular Exercise and Its Impact on Health\\n3.4 Strength Training for Optimal Fitness\\n3.5 The Importance of Flexibility and Balance\\n\\n4. Prioritizing Mental and Emotional Well-being (Word Count: 1,500)\\n4.1 Understanding Mental Health and Emotional Well-being\\n4.2 Stress Management Techniques and Coping Strategies\\n4.3 The Power of Mindfulness and Meditation\\n4.4 Building Resilience and Emotional Intelligence\\n4.5 Seeking Professional Help and Support\\n\\n5. Cultivating Healthy Habits (Word Count: 1,500)\\n5.1 The Science of Habit Formation\\n5.2 The Role of Sleep in Health and Wellness\\n5.3 Strategies for Effective Time Management\\n5.4 Creating a Healthy Home Environment\\n5.5 The Importance of Social Connections and Relationships\\n\\n6. Embracing Self-Care (Word Count: 1,000)\\n6.1 Understanding Self-Care and Its Impact on Well-being\\n6.2 Developing a Personalized Self-Care Routine\\n6.3 The Benefits of Regular Relaxation and Recreation\\n6.4 Exploring Creative Outlets for Self-Expression\\n6.5 Practicing Gratitude and Positive Thinking\\n\\n7. Navigating Common Health Concerns (Word Count: 1,800)\\n7.1 Preventing and Managing Chronic Diseases\\n7.2 Mental Health Disorders: Causes, Symptoms, and Treatments\\n7.3 Women\'s Health: From Menstruation to Menopause\\n7.4 Maintaining a Healthy Heart and Cardiovascular System\\n7.5 Strategies for Boosting Immune Function\\n\\n8. Holistic Approaches to Health and Wellness (Word Count: 1,000)\\n8.1 Traditional Medicine and Integrative Health Practices\\n8.2 The Benefits of Herbal Medicine and Natural Remedies\\n8.3 Exploring Alternative Therapies: Acupuncture, Ayurveda, and more\\n8.4 Harnessing the Power of Energy Healing and Chakra Balancing\\n8.5 The Role of Spirituality and Mind-Body Connection\\n\\nConclusion (Word Count: 300)\\nIn this extensive guide, we have covered a wide range of topics related to health and wellness, equipping you with the knowledge and tools to embark on your personal journey towards optimal well-being. Remember, true health and wellness are not achieved overnight but require consistent effort, commitment, and self-care. By implementing the strategies outlined in this guide, you can unlock your full potential and live a vibrant, fulfilling life. So, embrace the power of health and wellness and start your transformative journey today!\\n\\nWord Count: 10,000"}], "model": "gpt-3.5-turbo", "temperature": 0.5, "max_tokens": 3000}' message='Post details' -https://api.openai.com:443 "POST /v1/chat/completions HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=14472 request_id=351166c14151ef9e628dcd036573e36e response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "user", "content": "Note: The word count provided is an estimation and may vary slightly.\\n\\nTitle: The Ultimate Guide to Health and Wellness: Unlocking Your Full Potential\\n\\nIntroduction (Word Count: 500)\\nHealth and wellness are essential aspects of our lives that directly impact our overall well-being. In this comprehensive guide, we will explore various dimensions of health and wellness, providing valuable insights, practical tips, and evidence-based strategies to help you achieve optimal physical, mental, and emotional well-being. From nutrition and exercise to stress management and self-care, we will delve into every aspect of leading a healthy and fulfilling life. So, let\'s embark on this transformative journey together!\\n\\nTable of Contents:\\n\\n1. Understanding Health and Wellness (Word Count: 800)\\n1.1 Defining Health and Wellness\\n1.2 The Importance of Health and Wellness\\n1.3 The Connection between Physical, Mental, and Emotional Well-being\\n1.4 The Role of Lifestyle Choices in Health and Wellness\\n\\n2. Nourishing Your Body (Word Count: 1,200)\\n2.1 The Fundamentals of a Balanced Diet\\n2.2 The Power of Whole Foods and Nutrient Density\\n2.3 Understanding Macronutrients and Micronutrients\\n2.4 The Role of Hydration in Health\\n2.5 Exploring Different Dietary Approaches\\n\\n3. Moving Towards Fitness (Word Count: 1,200)\\n3.1 The Benefits of Regular Physical Activity\\n3.2 Designing an Effective Exercise Routine\\n3.3 Cardiovascular Exercise and Its Impact on Health\\n3.4 Strength Training for Optimal Fitness\\n3.5 The Importance of Flexibility and Balance\\n\\n4. Prioritizing Mental and Emotional Well-being (Word Count: 1,500)\\n4.1 Understanding Mental Health and Emotional Well-being\\n4.2 Stress Management Techniques and Coping Strategies\\n4.3 The Power of Mindfulness and Meditation\\n4.4 Building Resilience and Emotional Intelligence\\n4.5 Seeking Professional Help and Support\\n\\n5. Cultivating Healthy Habits (Word Count: 1,500)\\n5.1 The Science of Habit Formation\\n5.2 The Role of Sleep in Health and Wellness\\n5.3 Strategies for Effective Time Management\\n5.4 Creating a Healthy Home Environment\\n5.5 The Importance of Social Connections and Relationships\\n\\n6. Embracing Self-Care (Word Count: 1,000)\\n6.1 Understanding Self-Care and Its Impact on Well-being\\n6.2 Developing a Personalized Self-Care Routine\\n6.3 The Benefits of Regular Relaxation and Recreation\\n6.4 Exploring Creative Outlets for Self-Expression\\n6.5 Practicing Gratitude and Positive Thinking\\n\\n7. Navigating Common Health Concerns (Word Count: 1,800)\\n7.1 Preventing and Managing Chronic Diseases\\n7.2 Mental Health Disorders: Causes, Symptoms, and Treatments\\n7.3 Women\'s Health: From Menstruation to Menopause\\n7.4 Maintaining a Healthy Heart and Cardiovascular System\\n7.5 Strategies for Boosting Immune Function\\n\\n8. Holistic Approaches to Health and Wellness (Word Count: 1,000)\\n8.1 Traditional Medicine and Integrative Health Practices\\n8.2 The Benefits of Herbal Medicine and Natural Remedies\\n8.3 Exploring Alternative Therapies: Acupuncture, Ayurveda, and more\\n8.4 Harnessing the Power of Energy Healing and Chakra Balancing\\n8.5 The Role of Spirituality and Mind-Body Connection\\n\\nConclusion (Word Count: 300)\\nIn this extensive guide, we have covered a wide range of topics related to health and wellness, equipping you with the knowledge and tools to embark on your personal journey towards optimal well-being. Remember, true health and wellness are not achieved overnight but require consistent effort, commitment, and self-care. By implementing the strategies outlined in this guide, you can unlock your full potential and live a vibrant, fulfilling life. So, embrace the power of health and wellness and start your transformative journey today!\\n\\nWord Count: 10,000"}], "model": "gpt-3.5-turbo", "temperature": 0.5, "max_tokens": 3000}' message='Post details' -https://api.openai.com:443 "POST /v1/chat/completions HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=13492 request_id=adff9627a295fd94fb7d164f9f67acbe response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "user", "content": "Disclaimer: The word count provided is an estimation and may vary slightly.\\n\\nTitle: The Ultimate Guide to Health and Wellness: Unlocking Your Full Potential\\n\\nIntroduction (Word Count: 500)\\nHealth and wellness are essential aspects of our lives that directly impact our overall well-being. In this comprehensive guide, we will explore various dimensions of health and wellness, providing valuable insights, practical tips, and evidence-based strategies to help you achieve optimal physical, mental, and emotional well-being. From nutrition and exercise to stress management and self-care, we will delve into every aspect of leading a healthy and fulfilling life. So, let\'s embark on this transformative journey together!\\n\\nTable of Contents:\\n\\n1. Understanding Health and Wellness (Word Count: 800)\\n1.1 Defining Health and Wellness\\n1.2 The Importance of Health and Wellness\\n1.3 The Connection between Physical, Mental, and Emotional Well-being\\n1.4 The Role of Lifestyle Choices in Health and Wellness\\n\\n2. Nourishing Your Body (Word Count: 1,200)\\n2.1 The Fundamentals of a Balanced Diet\\n2.2 The Power of Whole Foods and Nutrient Density\\n2.3 Understanding Macronutrients and Micronutrients\\n2.4 The Role of Hydration in Health\\n2.5 Exploring Different Dietary Approaches\\n\\n3. Moving Towards Fitness (Word Count: 1,200)\\n3.1 The Benefits of Regular Physical Activity\\n3.2 Designing an Effective Exercise Routine\\n3.3 Cardiovascular Exercise and Its Impact on Health\\n3.4 Strength Training for Optimal Fitness\\n3.5 The Importance of Flexibility and Balance\\n\\n4. Prioritizing Mental and Emotional Well-being (Word Count: 1,500)\\n4.1 Understanding Mental Health and Emotional Well-being\\n4.2 Stress Management Techniques and Coping Strategies\\n4.3 The Power of Mindfulness and Meditation\\n4.4 Building Resilience and Emotional Intelligence\\n4.5 Seeking Professional Help and Support\\n\\n5. Cultivating Healthy Habits (Word Count: 1,500)\\n5.1 The Science of Habit Formation\\n5.2 The Role of Sleep in Health and Wellness\\n5.3 Strategies for Effective Time Management\\n5.4 Creating a Healthy Home Environment\\n5.5 The Importance of Social Connections and Relationships\\n\\n6. Embracing Self-Care (Word Count: 1,000)\\n6.1 Understanding Self-Care and Its Impact on Well-being\\n6.2 Developing a Personalized Self-Care Routine\\n6.3 The Benefits of Regular Relaxation and Recreation\\n6.4 Exploring Creative Outlets for Self-Expression\\n6.5 Practicing Gratitude and Positive Thinking\\n\\n7. Navigating Common Health Concerns (Word Count: 1,800)\\n7.1 Preventing and Managing Chronic Diseases\\n7.2 Mental Health Disorders: Causes, Symptoms, and Treatments\\n7.3 Women\'s Health: From Menstruation to Menopause\\n7.4 Maintaining a Healthy Heart and Cardiovascular System\\n7.5 Strategies for Boosting Immune Function\\n\\n8. Holistic Approaches to Health and Wellness (Word Count: 1,000)\\n8.1 Traditional Medicine and Integrative Health Practices\\n8.2 The Benefits of Herbal Medicine and Natural Remedies\\n8.3 Exploring Alternative Therapies: Acupuncture, Ayurveda, and more\\n8.4 Harnessing the Power of Energy Healing and Chakra Balancing\\n8.5 The Role of Spirituality and Mind-Body Connection\\n\\nConclusion (Word Count: 300)\\nIn this extensive guide, we have covered a wide range of topics related to health and wellness, equipping you with the knowledge and tools to embark on your personal journey towards optimal well-being. Remember, true health and wellness are not achieved overnight but require consistent effort, commitment, and self-care. By implementing the strategies outlined in this guide, you can unlock your full potential and live a vibrant, fulfilling life. So, embrace the power of health and wellness and start your transformative journey today!\\n\\nWord Count: 10,000"}], "model": "gpt-3.5-turbo", "temperature": 0.5, "max_tokens": 3000}' message='Post details' -https://api.openai.com:443 "POST /v1/chat/completions HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=334 request_id=d29d279c03c16a49192a468a6de16400 response_code=200 -message='Request to OpenAI API' method=post path=https://api.openai.com/v1/chat/completions -api_version=None data='{"messages": [{"role": "user", "content": "Disclaimer: The word count provided is an estimation and may vary slightly."}], "model": "gpt-3.5-turbo", "temperature": 0.5, "max_tokens": 3000}' message='Post details' -https://api.openai.com:443 "POST /v1/chat/completions HTTP/1.1" 200 None -message='OpenAI API response' path=https://api.openai.com/v1/chat/completions processing_ms=704 request_id=a3c58cd690f5bd4d88ac37d8cd64a540 response_code=200 diff --git a/flow.py b/flow.py index 05b361b5..fd7a02b2 100644 --- a/flow.py +++ b/flow.py @@ -3,9 +3,7 @@ from swarms.structs import Flow api_key = "" - -# Initialize the language model, -# This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC +# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC llm = OpenAIChat( openai_api_key=api_key, temperature=0.5, @@ -13,8 +11,21 @@ llm = OpenAIChat( ) # Initialize the flow -flow = Flow(llm=llm, max_loops=5, dashboard=True) +flow = Flow(llm=llm, max_loops=5, dashboard=True,) + +flow = Flow( + llm=llm, + max_loops=5, + dashboard=True, + # stopping_condition=None, # You can define a stopping condition as needed. + # loop_interval=1, + # retry_attempts=3, + # retry_interval=1, + # interactive=False, # Set to 'True' for interactive mode. + # dynamic_temperature=False, # Set to 'True' for dynamic temperature handling. +) + out = flow.run("Generate a 10,000 word blog on health and wellness.") -print(out) +print(out) \ No newline at end of file diff --git a/groupchat.py b/groupchat.py index c004b266..6694d71f 100644 --- a/groupchat.py +++ b/groupchat.py @@ -1,24 +1,109 @@ -from swarms.structs import Flow +# from swarms.structs import Flow +# from swarms.models import OpenAIChat +# from swarms.swarms.groupchat import GroupChat +# from swarms.agents import SimpleAgent + +# api_key = "" + +# llm = OpenAIChat( +# openai_api_key=api_key, +# ) + +# agent1 = SimpleAgent("Captain Price", Flow(llm=llm, max_loops=4)) +# agent2 = SimpleAgent("John Mactavis", Flow(llm=llm, max_loops=4)) + +# # Create a groupchat with the 2 agents +# chat = GroupChat([agent1, agent2]) + +# # Assign duties to the agents +# chat.assign_duty(agent1.name, "Buy the groceries") +# chat.assign_duty(agent2.name, "Clean the house") + +# # Initate a chat +# response = chat.run("Captain Price", "Hello, how are you John?") +# print(response) + + from swarms.models import OpenAIChat -from swarms.swarms.groupchat import GroupChat -from swarms.agents import SimpleAgent +from swarms.structs import Flow +import random + +api_key = "" # Your API Key here + + +class GroupChat: + """ + GroupChat class that facilitates agent-to-agent communication using multiple instances of the Flow class. + """ + + def __init__(self, agents: list): + self.agents = {f"agent_{i}": agent for i, agent in enumerate(agents)} + self.message_log = [] + + def add_agent(self, agent: Flow): + agent_id = f"agent_{len(self.agents)}" + self.agents[agent_id] = agent + + def remove_agent(self, agent_id: str): + if agent_id in self.agents: + del self.agents[agent_id] + + def send_message(self, sender_id: str, recipient_id: str, message: str): + if sender_id not in self.agents or recipient_id not in self.agents: + raise ValueError("Invalid sender or recipient ID.") + formatted_message = f"{sender_id} to {recipient_id}: {message}" + self.message_log.append(formatted_message) + recipient_agent = self.agents[recipient_id] + recipient_agent.run(message) + + def broadcast_message(self, sender_id: str, message: str): + for agent_id, agent in self.agents.items(): + if agent_id != sender_id: + self.send_message(sender_id, agent_id, message) + + def get_message_log(self): + return self.message_log + + +class EnhancedGroupChatV2(GroupChat): + def __init__(self, agents: list): + super().__init__(agents) + + def multi_round_conversation(self, rounds: int = 5): + """ + Initiate a multi-round conversation between agents. + + Args: + rounds (int): The number of rounds of conversation. + """ + for _ in range(rounds): + # Randomly select a sender and recipient agent for the conversation + sender_id = random.choice(list(self.agents.keys())) + recipient_id = random.choice(list(self.agents.keys())) + while recipient_id == sender_id: # Ensure the recipient is not the sender + recipient_id = random.choice(list(self.agents.keys())) + + # Generate a message (for simplicity, a generic message is used) + message = f"Hello {recipient_id}, how are you today?" + self.send_message(sender_id, recipient_id, message) -api_key = "" +# Sample usage with EnhancedGroupChatV2 +# Initialize the language model llm = OpenAIChat( openai_api_key=api_key, + temperature=0.5, + max_tokens=3000, ) -agent1 = SimpleAgent("Captain Price", Flow(llm=llm, max_loops=4)) -agent2 = SimpleAgent("John Mactavis", Flow(llm=llm, max_loops=4)) +# Initialize two Flow agents +agent1 = Flow(llm=llm, max_loops=5, dashboard=True) +agent2 = Flow(llm=llm, max_loops=5, dashboard=True) -# Create a groupchat with the 2 agents -chat = GroupChat([agent1, agent2]) +# Create an enhanced group chat with the two agents +enhanced_group_chat_v2 = EnhancedGroupChatV2(agents=[agent1, agent2]) -# Assign duties to the agents -chat.assign_duty(agent1.name, "Buy the groceries") -chat.assign_duty(agent2.name, "Clean the house") +# Simulate multi-round agent to agent communication +enhanced_group_chat_v2.multi_round_conversation(rounds=5) -# Initate a chat -response = chat.run("Captain Price", "Hello, how are you John?") -print(response) +enhanced_group_chat_v2.get_message_log() # Get the conversation log diff --git a/mkdocs.yml b/mkdocs.yml index 8b948587..4f5134a7 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -118,6 +118,7 @@ nav: - PdfChunker: "swarms/chunkers/pdf_chunker.md" - Examples: - Overview: "examples/index.md" + - Flow: "examples/flow.md" - Agents: - OmniAgent: "examples/omni_agent.md" - Worker: diff --git a/stacked_worker.py b/stacked_worker.py deleted file mode 100644 index 2930c51b..00000000 --- a/stacked_worker.py +++ /dev/null @@ -1,134 +0,0 @@ -import os - -import interpreter - -from swarms.agents.hf_agents import HFAgent -from swarms.agents.omni_modal_agent import OmniModalAgent -from swarms.models import OpenAIChat -from swarms.tools.autogpt import tool -from swarms.workers import Worker -from swarms.prompts.task_assignment_prompt import task_planner_prompt - - -# Initialize API Key -api_key = "" - - -# Initialize the language model, -# This model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC -llm = OpenAIChat( - openai_api_key=api_key, - temperature=0.5, - max_tokens=200, -) - - -# wrap a function with the tool decorator to make it a tool, then add docstrings for tool documentation -@tool -def hf_agent(task: str = None): - """ - An tool that uses an openai model to call and respond to a task by search for a model on huggingface - It first downloads the model then uses it. - - Rules: Don't call this model for simple tasks like generating a summary, only call this tool for multi modal tasks like generating images, videos, speech, etc - - """ - agent = HFAgent(model="text-davinci-003", api_key=api_key) - response = agent.run(task, text="¡Este es un API muy agradable!") - return response - - -@tool -def task_planner_worker_agent(task: str): - """ - Task planner tool that creates a plan for a given task. - Input: an objective to create a todo list for. Output: a todo list for that objective. - - """ - task = task_planner_prompt(task) - return llm(task) - - -# wrap a function with the tool decorator to make it a tool -@tool -def omni_agent(task: str = None): - """ - An tool that uses an openai Model to utilize and call huggingface models and guide them to perform a task. - - Rules: Don't call this model for simple tasks like generating a summary, only call this tool for multi modal tasks like generating images, videos, speech - The following tasks are what this tool should be used for: - - Tasks omni agent is good for: - -------------- - document-question-answering - image-captioning - image-question-answering - image-segmentation - speech-to-text - summarization - text-classification - text-question-answering - translation - huggingface-tools/text-to-image - huggingface-tools/text-to-video - text-to-speech - huggingface-tools/text-download - huggingface-tools/image-transformation - """ - agent = OmniModalAgent(llm) - response = agent.run(task) - return response - - -# Code Interpreter -@tool -def compile(task: str): - """ - Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. - You can chat with Open Interpreter through a ChatGPT-like interface in your terminal - by running $ interpreter after installing. - - This provides a natural-language interface to your computer's general-purpose capabilities: - - Create and edit photos, videos, PDFs, etc. - Control a Chrome browser to perform research - Plot, clean, and analyze large datasets - ...etc. - ⚠️ Note: You'll be asked to approve code before it's run. - - Rules: Only use when given to generate code or an application of some kind - """ - task = interpreter.chat(task, return_messages=True) - interpreter.chat() - interpreter.reset(task) - - os.environ["INTERPRETER_CLI_AUTO_RUN"] = True - os.environ["INTERPRETER_CLI_FAST_MODE"] = True - os.environ["INTERPRETER_CLI_DEBUG"] = True - - -# Append tools to an list -# tools = [hf_agent, omni_agent, compile] -tools = [task_planner_worker_agent] - - -# Initialize a single Worker node with previously defined tools in addition to it's -# predefined tools -node = Worker( - llm=llm, - ai_name="Optimus Prime", - openai_api_key=api_key, - ai_role="Worker in a swarm", - external_tools=tools, - human_in_the_loop=False, - temperature=0.5, -) - -# Specify task -task = "Use the task planner to agent to create a plan to Locate 5 trending topics on healthy living, locate a website like NYTimes, and then generate an image of people doing those topics." - -# Run the node on the task -response = node.run(task) - -# Print the response -print(response) diff --git a/swarms/structs/flow.py b/swarms/structs/flow.py index bc11522b..8d7a09ed 100644 --- a/swarms/structs/flow.py +++ b/swarms/structs/flow.py @@ -3,7 +3,6 @@ TODO: - Add a retry mechanism - Add prompt injection letting the agent know it's in a flow, Flow prompt - Dynamic temperature handling -- Add """ @@ -14,15 +13,27 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Generator from termcolor import colored import inspect import random +from swarms.tools.tool import BaseTool # Constants FLOW_SYSTEM_PROMPT = """ -You are a language model operating within a flow class. +You are an autonomous agent granted autonomy from a Flow structure. Your role is to engage in multi-step conversations with your self or the user, generate long-form content like blogs, screenplays, or SOPs, and accomplish tasks. You can have internal dialogues with yourself or can interact with the user to aid in these complex tasks. Your responses should be coherent, contextually relevant, and tailored to the task at hand. + + +When you have finished the task, and you feel as if you are done: output a special token: +This will enable you to leave the flow loop. + +""" + + +DYNAMIC_STOP_PROMPT = """ +When you have finished the task, and you feel as if you are done: output a special token: +This will enable you to leave the flow loop. """ @@ -92,6 +103,7 @@ class Flow: retry_interval: int = 1, interactive: bool = False, dashboard: bool = False, + tools: List[BaseTool] = None, dynamic_temperature: bool = False, **kwargs: Any, ): @@ -109,6 +121,7 @@ class Flow: self.interactive = interactive self.dashboard = dashboard self.dynamic_temperature = dynamic_temperature + self.tools = tools def provide_feedback(self, feedback: str) -> None: """Allow users to provide feedback on the responses.""" @@ -196,7 +209,7 @@ class Flow: print(dashboard) - def run(self, task: str): + def run(self, task: str, **kwargs): """ Run the autonomous agent loop @@ -221,7 +234,7 @@ class Flow: for i in range(self.max_loops): print(colored(f"\nLoop {i+1} of {self.max_loops}", "blue")) print("\n") - if self._check_stopping_condition(response): + if self._check_stopping_condition(response) or parse_done_token(response): break # Adjust temperature, comment if no work @@ -231,10 +244,18 @@ class Flow: attempt = 0 while attempt < self.retry_attempts: try: - response = self.llm(response) + response = self.llm( + f""" + SYSTEM_PROMPT: + {FLOW_SYSTEM_PROMPT} + + + History: {response} + + """, **kwargs + ) # print(f"Next query: {response}") # break - if self.interactive: print(f"AI: {response}") history.append(f"AI: {response}") diff --git a/swarms/tools/developer.py b/swarms/tools/developer.py deleted file mode 100644 index 04e4b30a..00000000 --- a/swarms/tools/developer.py +++ /dev/null @@ -1,925 +0,0 @@ -import os -import re -import signal -import subprocess -import time -from datetime import datetime -from pathlib import Path -from typing import Callable, Dict, List, Literal, Optional, Tuple, Union - -from langchain.tools import tool -from ptrace.debugger import ( - NewProcessEvent, - ProcessExecution, - ProcessExit, - ProcessSignal, - PtraceDebugger, - PtraceProcess, -) -from ptrace.func_call import FunctionCallOptions -from ptrace.syscall import PtraceSyscall -from ptrace.tools import signal_to_exitcode - -from swarms.tools.base import BaseToolSet, SessionGetter, ToolScope, tool -from swarms.utils.logger import logger -from swarms.utils.main import ANSI, Color, Style # test - -# helpers -PipeType = Union[Literal["stdout"], Literal["stderr"]] - - -def verify(func): - def wrapper(*args, **kwargs): - try: - filepath = args[0].filepath - except AttributeError: - raise Exception("This tool doesn't have filepath. Please check your code.") - if not str(Path(filepath).resolve()).startswith(str(Path().resolve())): - return "You can't access file outside of playground." - return func(*args, **kwargs) - - return wrapper - - -class SyscallTimeoutException(Exception): - def __init__(self, pid: int, *args) -> None: - super().__init__(f"deadline exceeded while waiting syscall for {pid}", *args) - - -class SyscallTracer: - def __init__(self, pid: int): - self.debugger: PtraceDebugger = PtraceDebugger() - self.pid: int = pid - self.process: PtraceProcess = None - - def is_waiting(self, syscall: PtraceSyscall) -> bool: - if syscall.name.startswith("wait"): - return True - return False - - def attach(self): - self.process = self.debugger.addProcess(self.pid, False) - - def detach(self): - self.process.detach() - self.debugger.quit() - - def set_timer(self, timeout: int): - def handler(signum, frame): - raise SyscallTimeoutException(self.process.pid) - - signal.signal(signal.SIGALRM, handler) - signal.alarm(timeout) - - def reset_timer(self): - signal.alarm(0) - - def wait_syscall_with_timeout(self, timeout: int): - self.set_timer(timeout) - self.process.waitSyscall() - self.reset_timer() - - def wait_until_stop_or_exit(self) -> Tuple[Optional[int], str]: - self.process.syscall() - exitcode = None - reason = "" - while True: - if not self.debugger: - break - - try: - self.wait_syscall_with_timeout(30) - except ProcessExit as event: - if event.exitcode is not None: - exitcode = event.exitcode - continue - except ProcessSignal as event: - event.process.syscall(event.signum) - exitcode = signal_to_exitcode(event.signum) - reason = event.reason - continue - except NewProcessEvent: - continue - except ProcessExecution: - continue - except Exception as e: - reason = str(e) - break - - syscall = self.process.syscall_state.event( - FunctionCallOptions( - write_types=False, - write_argname=False, - string_max_length=300, - replace_socketcall=True, - write_address=False, - max_array_count=20, - ) - ) - - self.process.syscall() - - if syscall is None: - continue - - if syscall.result: - continue - - self.reset_timer() - - return exitcode, reason - - -class StdoutTracer: - def __init__( - self, - process: subprocess.Popen, - timeout: int = 30, - interval: int = 0.1, - on_output: Callable[[PipeType, str], None] = lambda: None, - ): - self.process: subprocess.Popen = process - self.timeout: int = timeout - self.interval: int = interval - self.last_output: datetime = None - self.on_output: Callable[[PipeType, str], None] = on_output - - def nonblock(self): - os.set_blocking(self.process.stdout.fileno(), False) - os.set_blocking(self.process.stderr.fileno(), False) - - def get_output(self, pipe: PipeType) -> str: - output = None - if pipe == "stdout": - output = self.process.stdout.read() - elif pipe == "stderr": - output = self.process.stderr.read() - - if output: - decoded = output.decode() - self.on_output(pipe, decoded) - self.last_output = datetime.now() - return decoded - return "" - - def last_output_passed(self, seconds: int) -> bool: - return (datetime.now() - self.last_output).seconds > seconds - - def wait_until_stop_or_exit(self) -> Tuple[Optional[int], str]: - self.nonblock() - self.last_output = datetime.now() - output = "" - exitcode = None - while True: - new_stdout = self.get_output("stdout") - if new_stdout: - output += new_stdout - - new_stderr = self.get_output("stderr") - if new_stderr: - output += new_stderr - - if self.process.poll() is not None: - exitcode = self.process.poll() - break - - if self.last_output_passed(self.timeout): - self.process.kill() - break - - time.sleep(self.interval) - - return (exitcode, output) - - -class Terminal(BaseToolSet): - def __init__(self): - self.sessions: Dict[str, List[SyscallTracer]] = {} - - @tool( - name="Terminal", - description="Executes commands in a terminal." - "If linux errno occurs, we have to solve the problem with the terminal. " - "Input must be one valid command. " - "Output will be any output from running that command.", - scope=ToolScope.SESSION, - ) - def execute(self, commands: str, get_session: SessionGetter) -> str: - session, _ = get_session() - - try: - process = subprocess.Popen( - commands, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - logger.info(ANSI("Realtime Terminal Output").to(Color.magenta()) + ": ") - - output = "" - tracer = StdoutTracer( - process, - on_output=lambda p, o: logger.info( - ANSI(p).to(Style.dim()) + " " + o.strip("\n") - ), - ) - exitcode, output = tracer.wait_until_stop_or_exit() - except Exception as e: - output = str(e) - - logger.debug( - f"\nProcessed Terminal, Input Commands: {commands} " - f"Output Answer: {output}" - ) - return output - - -############# - - -@tool( - name="Terminal", - description="Executes commands in a terminal." - "If linux errno occurs, we have to solve the problem with the terminal. " - "Input must be one valid command. " - "Output will be any output from running that command.", - scope=ToolScope.SESSION, -) -def terminal_execute(self, commands: str, get_session: SessionGetter) -> str: - session, _ = get_session() - - try: - process = subprocess.Popen( - commands, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - logger.info(ANSI("Realtime Terminal Output").to(Color.magenta()) + ": ") - - output = "" - tracer = StdoutTracer( - process, - on_output=lambda p, o: logger.info( - ANSI(p).to(Style.dim()) + " " + o.strip("\n") - ), - ) - exitcode, output = tracer.wait_until_stop_or_exit() - except Exception as e: - output = str(e) - - logger.debug( - f"\nProcessed Terminal, Input Commands: {commands} " f"Output Answer: {output}" - ) - return output - - -""" -write protocol: - - - -""" - - -class WriteCommand: - separator = "\n" - - def __init__(self, filepath: str, content: int): - self.filepath: str = filepath - self.content: str = content - self.mode: str = "w" - - def with_mode(self, mode: str) -> "WriteCommand": - self.mode = mode - return self - - @verify - def execute(self) -> str: - dir_path = os.path.dirname(self.filepath) - if dir_path: - os.makedirs(dir_path, exist_ok=True) - with open(self.filepath, self.mode) as f: - f.write(self.content) - return self.content - - @staticmethod - def from_str(command: str) -> "WriteCommand": - filepath = command.split(WriteCommand.separator)[0] - return WriteCommand(filepath, command[len(filepath) + 1 :]) - - -class CodeWriter: - @staticmethod - def write(command: str) -> str: - return WriteCommand.from_str(command).with_mode("w").execute() - - @staticmethod - def append(command: str) -> str: - return WriteCommand.from_str(command).with_mode("a").execute() - - -""" -read protocol: - -|- -""" - - -class Line: - def __init__(self, content: str, line_number: int, depth: int): - self.__content: str = content - self.__line_number: int = line_number - self.__depth: int = depth - self.__children: List[Line] = [] - - def get_content(self) -> str: - return self.__content - - def get_depth(self) -> int: - return self.__depth - - def append_child(self, child: "Line") -> None: - self.__children.append(child) - - def find_by_lte_depth(self, depth: int) -> List["Line"]: - if self.__depth > depth: - return [] - - lines: List[Line] = [self] - for child in self.__children: - lines += child.find_by_lte_depth(depth) - return lines - - def find_by_content(self, content: str) -> List["Line"]: - if content in self.__content: - return [self] - - lines: List[Line] = [] - for child in self.__children: - lines += child.find_by_content(content) - return lines - - def find_last_lines(self) -> List["Line"]: - if len(self.__children) == 0: - return [self] - else: - return [self, *self.__children[-1].find_last_lines()] - - def print(self, depth: int = 0) -> None: - print(f"{' ' * depth}{self}", end="") - for child in self.__children: - child.print(depth + 1) - - def __repr__(self): - return f"{self.__line_number}: {self.__content}" - - -class CodeTree: - def __init__(self): - self.root: Line = Line("\n", -1, -1) - - def append(self, content: str, line_number: int) -> None: - last_lines: List[Line] = self.root.find_last_lines() - new_leading_spaces: int = self.__get_leading_spaces(content) - - previous_line: Line = self.root - previous_leading_spaces: int = -1 - for line in last_lines: - leading_spaces = self.__get_leading_spaces(line.get_content()) - if ( - previous_leading_spaces < new_leading_spaces - and new_leading_spaces <= leading_spaces - ): - break - previous_line, previous_leading_spaces = line, leading_spaces - - new_line_depth: int = previous_line.get_depth() + 1 - previous_line.append_child(Line(content, line_number, new_line_depth)) - - def find_from_root(self, depth: int) -> List[Line]: - return self.root.find_by_lte_depth(depth) - - def find_from_parent(self, depth: int, parent_content: str) -> List[Line]: - lines: List[Line] = self.root.find_by_content(parent_content) - if len(lines) == 0: - return [] - parent = lines[0] - return parent.find_by_lte_depth(depth + parent.get_depth()) - - def print(self): - print("Code Tree:") - print("=================================") - self.root.print() - print("=================================") - - def __get_leading_spaces(self, content: str) -> int: - return len(content) - len(content.lstrip()) - - -class ReadCommand: - separator = "|" - - def __init__(self, filepath: str, start: int, end: int): - self.filepath: str = filepath - self.start: int = start - self.end: int = end - - @verify - def execute(self) -> str: - with open(self.filepath, "r") as f: - code = f.readlines() - - if self.start == self.end: - code = code[self.start - 1] - else: - code = "".join(code[self.start - 1 : self.end]) - return code - - @staticmethod - def from_str(command: str) -> "ReadCommand": - filepath, line = command.split(ReadCommand.separator) - start, end = line.split("-") - return ReadCommand(filepath, int(start), int(end)) - - -class SummaryCommand: - separator = "|" - - def __init__(self, filepath: str, depth: int, parent_content: Optional[str] = None): - self.filepath: str = filepath - self.depth: int = depth - self.parent_content: Optional[str] = parent_content - - @verify - def execute(self) -> str: - with open(self.filepath, "r") as f: - code = f.readlines() - - code_tree = CodeTree() - for i, line in enumerate(code): - if line.strip() != "": - code_tree.append(line, i + 1) - - if self.parent_content is None: - lines = code_tree.find_from_root(self.depth) - else: - lines = code_tree.find_from_parent(self.depth, self.parent_content) - return "".join([str(line) for line in lines]) - - @staticmethod - def from_str(command: str) -> "SummaryCommand": - command_list: List[str] = command.split(SummaryCommand.separator) - filepath: str = command_list[0] - depth: int = int(command_list[1]) - parent_content: str | None = command_list[2] if len(command_list) == 3 else None - return SummaryCommand( - filepath=filepath, depth=depth, parent_content=parent_content - ) - - -class CodeReader: - @staticmethod - def read(command: str) -> str: - return ReadCommand.from_str(command).execute() - - @staticmethod - def summary(command: str) -> str: - return SummaryCommand.from_str(command).execute() - - -""" -patch protocol: - -|,|,| ----~~~+++===+++~~~--- -|,|,| ----~~~+++===+++~~~--- -... ----~~~+++===+++~~~--- - -let say original code is: -``` -import requests - -def crawl_news(keyword): - url = f"https://www.google.com/search?q={keyword}+news" - response = requests.get(url) - - news = [] - for result in response: - news.append(result.text) - - return news -``` - -and we want to change it to: -``` -import requests -from bs4 import BeautifulSoup - -def crawl_news(keyword): - url = f"https://www.google.com/search?q={keyword}+news" - html = requests.get(url).text - soup = BeautifulSoup(html, "html.parser") - news_results = soup.find_all("div", class_="BNeawe vvjwJb AP7Wnd") - - news_titles = [] - for result in news_results: - news_titles.append(result.text) - - return news_titles -``` - -then the command will be: -test.py|2,1|2,1|from bs4 import BeautifulSoup - ----~~~+++===+++~~~--- -test.py|5,5|5,33|html = requests.get(url).text - soup = BeautifulSoup(html, "html.parser") - news_results = soup.find_all("div", class_="BNeawe vvjwJb AP7Wnd") ----~~~+++===+++~~~--- -test.py|7,5|9,13|news_titles = [] - for result in news_results: - news_titles ----~~~+++===+++~~~--- -test.py|11,16|11,16|_titles -""" - - -class Position: - separator = "," - - def __init__(self, line: int, col: int): - self.line: int = line - self.col: int = col - - def __str__(self): - return f"(Ln {self.line}, Col {self.col})" - - @staticmethod - def from_str(pos: str) -> "Position": - line, col = pos.split(Position.separator) - return Position(int(line) - 1, int(col) - 1) - - -class PatchCommand: - separator = "|" - - def __init__(self, filepath: str, start: Position, end: Position, content: str): - self.filepath: str = filepath - self.start: Position = start - self.end: Position = end - self.content: str = content - - def read_lines(self) -> list[str]: - with open(self.filepath, "r") as f: - lines = f.readlines() - return lines - - def write_lines(self, lines: list[str]) -> int: - with open(self.filepath, "w") as f: - f.writelines(lines) - return sum([len(line) for line in lines]) - - @verify - def execute(self) -> Tuple[int, int]: - lines = self.read_lines() - before = sum([len(line) for line in lines]) - - lines[self.start.line] = ( - lines[self.start.line][: self.start.col] - + self.content - + lines[self.end.line][self.end.col :] - ) - lines = lines[: self.start.line + 1] + lines[self.end.line + 1 :] - - after = self.write_lines(lines) - - written = len(self.content) - deleted = before - after + written - - return written, deleted - - @staticmethod - def from_str(command: str) -> "PatchCommand": - match = re.search( - r"(.*)\|([0-9]*),([0-9]*)\|([0-9]*),([0-9]*)(\||\n)(.*)", - command, - re.DOTALL, - ) - filepath = match.group(1) - start_line = match.group(2) - start_col = match.group(3) - end_line = match.group(4) - end_col = match.group(5) - content = match.group(7) - return PatchCommand( - filepath, - Position.from_str(f"{start_line},{start_col}"), - Position.from_str(f"{end_line},{end_col}"), - content, - ) - - -class CodePatcher: - separator = "\n---~~~+++===+++~~~---\n" - - @staticmethod - def sort_commands(commands: list[PatchCommand]) -> list[PatchCommand]: - return sorted(commands, key=lambda c: c.start.line, reverse=True) - - @staticmethod - def patch(bulk_command: str) -> Tuple[int, int]: - commands = [ - PatchCommand.from_str(command) - for command in bulk_command.split(CodePatcher.separator) - if command != "" - ] - commands = CodePatcher.sort_commands(commands) - - written, deleted = 0, 0 - for command in commands: - if command: - w, d = command.execute() - written += w - deleted += d - return written, deleted - - -class CodeEditor(BaseToolSet): - @tool( - name="CodeEditor.READ", - description="Read and understand code. " - "Input should be filename and line number group. ex. test.py|1-10 " - "and the output will be code. ", - ) - def read(self, inputs: str) -> str: - try: - output = CodeReader.read(inputs) - except Exception as e: - output = str(e) - - logger.debug( - f"\nProcessed CodeEditor.READ, Input Commands: {inputs} " - f"Output Answer: {output}" - ) - return output - - @tool( - name="CodeEditor.SUMMARY", - description="Summary code. " - "Read the code structured into a tree. " - "If you set specific line, it will show the code from the specific line. " - "Input should be filename, depth, and specific line if you want. ex. test.py|2 or test.py|3|print('hello world') " - "and the output will be list of (line number: code). ", - ) - def summary(self, inputs: str) -> str: - try: - output = CodeReader.summary(inputs) - except Exception as e: - output = str(e) - - logger.debug( - f"\nProcessed CodeEditor.SUMMARY, Input Commands: {inputs} " - f"Output Answer: {output}" - ) - return output - - @tool( - name="CodeEditor.APPEND", - description="Append code to the existing file. " - "If the code is completed, use the Terminal tool to execute it, if not, append the code through the this tool. " - "Input should be filename and code to append. " - "Input code must be the code that should be appended, NOT whole code. " - "ex. test.py\nprint('hello world')\n " - "and the output will be last 3 lines.", - ) - def append(self, inputs: str) -> str: - try: - code = CodeWriter.append(inputs) - output = "Last 3 line was:\n" + "\n".join(code.split("\n")[-3:]) - except Exception as e: - output = str(e) - - logger.debug( - f"\nProcessed CodeEditor.APPEND, Input: {inputs} " - f"Output Answer: {output}" - ) - return output - - @tool( - name="CodeEditor.WRITE", - description="Write code to create a new tool. " - "If the code is completed, use the Terminal tool to execute it, if not, append the code through the CodeEditor.APPEND tool. " - "Input should be formatted like: " - "\n\n\n" - "Here is an example: " - "test.py\nmessage = 'hello world'\nprint(message)\n" - "\n" - "The output will be last 3 lines you wrote.", - ) - def write(self, inputs: str) -> str: - try: - code = CodeWriter.write(inputs.lstrip()) - output = "Last 3 line was:\n" + "\n".join(code.split("\n")[-3:]) - except Exception as e: - output = str(e) - - logger.debug( - f"\nProcessed CodeEditor.WRITE, Input: {inputs} " f"Output Answer: {output}" - ) - return output - - @tool( - name="CodeEditor.PATCH", - description="Patch the code to correct the error if an error occurs or to improve it. " - "Input is a list of patches. The patch is separated by {seperator}. ".format( - seperator=CodePatcher.separator.replace("\n", "\\n") - ) - + "Each patch has to be formatted like below.\n" - "|,|,|" - "Here is an example. If the original code is:\n" - "print('hello world')\n" - "and you want to change it to:\n" - "print('hi corca')\n" - "then the patch should be:\n" - "test.py|1,8|1,19|hi corca\n" - "Code between start and end will be replaced with new_code. " - "The output will be written/deleted bytes or error message. ", - ) - def patch(self, patches: str) -> str: - try: - w, d = CodePatcher.patch(patches) - output = f"successfully wrote {w}, deleted {d}" - except Exception as e: - output = str(e) - - logger.debug( - f"\nProcessed CodeEditor.PATCH, Input Patch: {patches} " - f"Output Answer: {output}" - ) - return output - - @tool( - name="CodeEditor.DELETE", - description="Delete code in file for a new start. " - "Input should be filename." - "ex. test.py " - "Output will be success or error message.", - ) - def delete(self, inputs: str, filepath: str) -> str: - try: - with open(filepath, "w") as f: - f.write("") - output = "success" - except Exception as e: - output = str(e) - - logger.debug( - f"\nProcessed CodeEditor.DELETE, Input filename: {inputs} " - f"Output Answer: {output}" - ) - return output - - -# ---------------- end - - -@tool( - name="CodeEditor.READ", - description="Read and understand code. " - "Input should be filename and line number group. ex. test.py|1-10 " - "and the output will be code. ", -) -def code_editor_read(self, inputs: str) -> str: - try: - output = CodeReader.read(inputs) - except Exception as e: - output = str(e) - - logger.debug( - f"\nProcessed CodeEditor.READ, Input Commands: {inputs} " - f"Output Answer: {output}" - ) - return output - - -@tool( - name="CodeEditor.SUMMARY", - description="Summary code. " - "Read the code structured into a tree. " - "If you set specific line, it will show the code from the specific line. " - "Input should be filename, depth, and specific line if you want. ex. test.py|2 or test.py|3|print('hello world') " - "and the output will be list of (line number: code). ", -) -def code_editor_summary(self, inputs: str) -> str: - try: - output = CodeReader.summary(inputs) - except Exception as e: - output = str(e) - - logger.debug( - f"\nProcessed CodeEditor.SUMMARY, Input Commands: {inputs} " - f"Output Answer: {output}" - ) - return output - - -@tool( - name="CodeEditor.APPEND", - description="Append code to the existing file. " - "If the code is completed, use the Terminal tool to execute it, if not, append the code through the this tool. " - "Input should be filename and code to append. " - "Input code must be the code that should be appended, NOT whole code. " - "ex. test.py\nprint('hello world')\n " - "and the output will be last 3 lines.", -) -def code_editor_append(self, inputs: str) -> str: - try: - code = CodeWriter.append(inputs) - output = "Last 3 line was:\n" + "\n".join(code.split("\n")[-3:]) - except Exception as e: - output = str(e) - - logger.debug( - f"\nProcessed CodeEditor.APPEND, Input: {inputs} " f"Output Answer: {output}" - ) - return output - - -@tool( - name="CodeEditor.WRITE", - description="Write code to create a new tool. " - "If the code is completed, use the Terminal tool to execute it, if not, append the code through the CodeEditor.APPEND tool. " - "Input should be formatted like: " - "\n\n\n" - "Here is an example: " - "test.py\nmessage = 'hello world'\nprint(message)\n" - "\n" - "The output will be last 3 lines you wrote.", -) -def code_editor_write(self, inputs: str) -> str: - try: - code = CodeWriter.write(inputs.lstrip()) - output = "Last 3 line was:\n" + "\n".join(code.split("\n")[-3:]) - except Exception as e: - output = str(e) - - logger.debug( - f"\nProcessed CodeEditor.WRITE, Input: {inputs} " f"Output Answer: {output}" - ) - return output - - -@tool( - name="CodeEditor.PATCH", - description="Patch the code to correct the error if an error occurs or to improve it. " - "Input is a list of patches. The patch is separated by {seperator}. ".format( - seperator=CodePatcher.separator.replace("\n", "\\n") - ) - + "Each patch has to be formatted like below.\n" - "|,|,|" - "Here is an example. If the original code is:\n" - "print('hello world')\n" - "and you want to change it to:\n" - "print('hi corca')\n" - "then the patch should be:\n" - "test.py|1,8|1,19|hi corca\n" - "Code between start and end will be replaced with new_code. " - "The output will be written/deleted bytes or error message. ", -) -def code_editor_patch(self, patches: str) -> str: - try: - w, d = CodePatcher.patch(patches) - output = f"successfully wrote {w}, deleted {d}" - except Exception as e: - output = str(e) - - logger.debug( - f"\nProcessed CodeEditor.PATCH, Input Patch: {patches} " - f"Output Answer: {output}" - ) - return output - - -@tool( - name="CodeEditor.DELETE", - description="Delete code in file for a new start. " - "Input should be filename." - "ex. test.py " - "Output will be success or error message.", -) -def code_editor_delete(self, inputs: str, filepath: str) -> str: - try: - with open(filepath, "w") as f: - f.write("") - output = "success" - except Exception as e: - output = str(e) - - logger.debug( - f"\nProcessed CodeEditor.DELETE, Input filename: {inputs} " - f"Output Answer: {output}" - ) - return output diff --git a/swarms/tools/file_mangagement.py b/swarms/tools/file_mangagement.py deleted file mode 100644 index b9c2041a..00000000 --- a/swarms/tools/file_mangagement.py +++ /dev/null @@ -1,17 +0,0 @@ -from langchain.agents.agent_toolkits import FileManagementToolkit -from tempfile import TemporaryDirectory - -# We'll make a temporary directory to avoid clutter -working_directory = TemporaryDirectory() - -toolkit = FileManagementToolkit( - root_dir=str(working_directory.name) -) # If you don't provide a root_dir, operations will default to the current working directory -toolkit.get_tools() - -file_management_tools = FileManagementToolkit( - root_dir=str(working_directory.name), - selected_tools=["read_file", "write_file", "list_directory"], -).get_tools() - -read_tool, write_tool, list_tool = file_management_tools From 44a0c2f9cf1b79674f2a44166b9d1904796235ca Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 2 Nov 2023 17:17:45 -0400 Subject: [PATCH 06/15] flow docs --- docs/examples/flow.md | 48 +------------------------------------------ mkdocs.yml | 2 +- 2 files changed, 2 insertions(+), 48 deletions(-) diff --git a/docs/examples/flow.md b/docs/examples/flow.md index 3403d55b..86c58626 100644 --- a/docs/examples/flow.md +++ b/docs/examples/flow.md @@ -4,10 +4,6 @@ Welcome to the walkthrough guide for beginners on using the "Flow" feature within the Swarms module. This guide is designed to help you understand and utilize the capabilities of the Flow class for seamless interactions with AI language models. -**Target Audience:** - -- This guide is primarily intended for beginners who want to learn how to use the Flow feature in the Swarms module to interact with AI language models effectively. - ## Table of Contents 1\. **Understanding the Flow Feature** @@ -22,8 +18,6 @@ Welcome to the walkthrough guide for beginners on using the "Flow" feature withi    - 2.2 Installing Required Libraries -   - 2.3 Importing Necessary Modules - 3\. **Creating a Flow Instance**    - 3.1 Importing the Required Modules @@ -118,47 +112,7 @@ Before you begin, ensure that you have the following prerequisites in place: ### 2.2 Installing Required Libraries -To use the Flow feature, you'll need to install the required libraries. Make sure you have these libraries installed: - -- `termcolor`: For colorful console output. - -- `inspects`: For introspecting the language model. - -- `random`: For handling dynamic temperature. - -- Other dependencies as needed for your specific environment. - -You can install these libraries using pip: - -```bash - -pip install termcolor inspects - -``` - -### 2.3 Importing Necessary Modules - -In your Python script or environment, import the necessary modules from the Swarms framework: - -```python - -import json - -import logging - -import time - -from typing import Any, Callable, Dict, List, Optional, Tuple, Generator - -from termcolor import colored - -import inspect - -import random - -``` - -Ensure that you have these modules imported to proceed with the guide. +`pip3 install --upgrade swarms` ## 3. Creating a Flow Instance diff --git a/mkdocs.yml b/mkdocs.yml index 4f5134a7..f9a38fca 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -116,7 +116,7 @@ nav: - swarms.chunkers: - BaseChunker: "swarms/chunkers/basechunker.md" - PdfChunker: "swarms/chunkers/pdf_chunker.md" -- Examples: +- Walkthroughs: - Overview: "examples/index.md" - Flow: "examples/flow.md" - Agents: From 2127e8c64ee2674e11a1938fea1832d4998171b9 Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 2 Nov 2023 17:18:21 -0400 Subject: [PATCH 07/15] flow guide --- docs/examples/flow.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples/flow.md b/docs/examples/flow.md index 86c58626..8e3db0bc 100644 --- a/docs/examples/flow.md +++ b/docs/examples/flow.md @@ -16,7 +16,7 @@ Welcome to the walkthrough guide for beginners on using the "Flow" feature withi    - 2.1 Prerequisites -   - 2.2 Installing Required Libraries +   - 2.2 Installing Required Librarie 3\. **Creating a Flow Instance** From 7e70c320cf5a1223eacffab38af33b45c60439c7 Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 2 Nov 2023 17:35:45 -0400 Subject: [PATCH 08/15] flow --- docs/examples/flow.md | 431 ++++++++++++++++++------------------------ 1 file changed, 187 insertions(+), 244 deletions(-) diff --git a/docs/examples/flow.md b/docs/examples/flow.md index 8e3db0bc..8a82c8ca 100644 --- a/docs/examples/flow.md +++ b/docs/examples/flow.md @@ -4,89 +4,59 @@ Welcome to the walkthrough guide for beginners on using the "Flow" feature within the Swarms module. This guide is designed to help you understand and utilize the capabilities of the Flow class for seamless interactions with AI language models. -## Table of Contents -1\. **Understanding the Flow Feature** +### Table of Contents -   - 1.1 What is the Flow Feature? +1. **Introduction to Swarms Flow Module** + - 1.1 What is Swarms? + - 1.2 Understanding the Flow Module -   - 1.2 Key Concepts +2. **Setting Up Your Development Environment** + - 2.1 Installing Required Dependencies + - 2.2 API Key Setup + - 2.3 Creating Your First Flow -2\. **Setting Up the Environment** +3. **Creating Your First Flow** + - 3.1 Importing Necessary Libraries + - 3.2 Defining Constants + - 3.3 Initializing the Flow Object + - 3.4 Initializing the Language Model + - 3.5 Running Your Flow + - 3.6 Understanding Flow Options -   - 2.1 Prerequisites +4. **Advanced Flow Concepts** + - 4.1 Custom Stopping Conditions + - 4.2 Dynamic Temperature Handling + - 4.3 Providing Feedback on Responses + - 4.4 Retry Mechanism + - 4.5 Response Filtering + - 4.6 Interactive Mode -   - 2.2 Installing Required Librarie +5. **Saving and Loading Flows** + - 5.1 Saving Flow State + - 5.2 Loading a Saved Flow -3\. **Creating a Flow Instance** +6. **Troubleshooting and Tips** + - 6.1 Analyzing Feedback + - 6.2 Troubleshooting Common Issues -   - 3.1 Importing the Required Modules +7. **Conclusion** -   - 3.2 Initializing the Language Model - -   - 3.3 Creating a Flow Instance - -4\. **Running a Flow** - -   - 4.1 Defining the Task - -   - 4.2 Running the Flow - -   - 4.3 Interacting with the AI - -   - 4.4 Dynamic Temperature Handling - -5\. **Customizing Flow Behavior** - -   - 5.1 Stopping Conditions - -   - 5.2 Retry Mechanism - -   - 5.3 Loop Interval - -   - 5.4 Interactive Mode - -6\. **Saving and Loading Flows** - -   - 6.1 Saving a Flow - -   - 6.2 Loading a Saved Flow - -7\. **Analyzing Feedback and Undoing Actions** - -   - 7.1 Providing Feedback - -   - 7.2 Undoing the Last Action - -   - 7.3 Response Filtering - -8\. **Advanced Features** - -   - 8.1 Streamed Generation - -   - 8.2 Real-time Token Generation - -9\. **Best Practices** - -   - 9.1 Conciseness and Clarity - -   - 9.2 Active Voice - -   - 9.3 Highlighting Important Points +--- -   - 9.4 Consistent Style +### 1. Introduction to Swarms Flow Module -10\. **Conclusion** +#### 1.1 What is Swarms? ---- +Swarms is a powerful framework designed to provide tools and capabilities for working with language models and automating various tasks. It allows developers to interact with language models seamlessly. -## 1. Understanding the Flow Feature +## 1.2 Understanding the Flow Feature -### 1.1 What is the Flow Feature? +### What is the Flow Feature? The Flow feature is a powerful component of the Swarms framework that allows developers to create a sequential, conversational interaction with AI language models. It enables developers to build multi-step conversations, generate long-form content, and perform complex tasks using AI. The Flow class provides autonomy to language models, enabling them to generate responses in a structured manner. -### 1.2 Key Concepts +### Key Concepts Before diving into the practical aspects, let's clarify some key concepts related to the Flow feature: @@ -100,304 +70,277 @@ Before diving into the practical aspects, let's clarify some key concepts relate - **Interactive Mode:** Interactive mode allows developers to have a back-and-forth conversation with the AI model, making it suitable for real-time interactions. -## 2. Setting Up the Environment -### 2.1 Prerequisites +### 2. Setting Up Your Development Environment -Before you begin, ensure that you have the following prerequisites in place: +#### 2.1 Installing Required Dependencies -- Basic understanding of Python programming. - -- Access to an AI language model or API key for language model services. - -### 2.2 Installing Required Libraries - -`pip3 install --upgrade swarms` - -## 3. Creating a Flow Instance - -To use the Flow feature, you need to create an instance of the Flow class. This instance will allow you to interact with the AI language model. - -### 3.1 Importing the Required Modules - -In your script, import the required modules for the Flow class: - -```python - -from swarms.structs import Flow - -from swarms.models import OpenAIChat  # Adjust this import according to your specific language model. +Before you can start using the Swarms Flow module, you need to set up your development environment. First, you'll need to install the necessary dependencies, including Swarms itself. +```bash +# Install Swarms and required libraries +pip3 install --upgrade swarms ``` -### 3.2 Initializing the Language Model +#### 2 Creating Your First Flow -Initialize the language model you want to use for interactions. In this example, we're using the `OpenAIChat` model: +Now, let's create your first Flow. A Flow represents a chain-like structure that allows you to engage in multi-step conversations with language models. The Flow structure is what gives an LLM autonomy. It's the Mitochondria of an autonomous agent. ```python +# Import necessary modules +from swarms.models import OpenAIChat # Zephr, Mistral +from swarms.structs import Flow -# Replace 'api_key' with your actual API key or configuration. - -llm = OpenAIChat( - -    openai_api_key='your_api_key', - -    temperature=0.5, - -    max_tokens=3000, +api_key = "" -) +# Initialize the language model (LLM) +llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000) +# Initialize the Flow object +flow = Flow(llm=llm, max_loops=5) ``` -Make sure to provide the necessary configuration, such as your API key and any model-specific parameters. -### 3.3 Creating a Flow Instance +#### 3.3 Initializing the Flow Object -Now, create an instance of the Flow class by passing the initialized language model: +Create a Flow object that will be the backbone of your conversational flow. ```python - +# Initialize the Flow object flow = Flow( - -    llm=llm, - -    max_loops=5, - -    dashboard=True, - -    stopping_condition=None,  # You can define a stopping condition as needed. - -    loop_interval=1, - -    retry_attempts=3, - -    retry_interval=1, - -    interactive=False,  # Set to 'True' for interactive mode. - -    dynamic_temperature=False,  # Set to 'True' for dynamic temperature handling. - + llm=llm, + max_loops=5, + stopping_condition=None, # You can define custom stopping conditions + loop_interval=1, + retry_attempts=3, + retry_interval=1, + interactive=False, # Set to True for interactive mode + dashboard=False, # Set to True for a dashboard view + dynamic_temperature=False, # Enable dynamic temperature handling ) - ``` -This sets up your Flow instance with the specified parameters. Adjust these parameters based on your requirements. - -## 4. Running a Flow - -Now that you have created a Flow instance, let's run a simple interaction with the AI model using the Flow. +#### 3.4 Initializing the Language Model -### 4.1 Defining the Task +Initialize the language model (LLM) that your Flow will interact with. In this example, we're using OpenAI's GPT-3 as the LLM. -Define the task you want the AI model to perform. This can be any prompt or question you have in mind. For example: +- You can also use `Mistral` or `Zephr` or any of other models! ```python - -task = "Generate a 10,000 word blog on health and wellness." - +# Initialize the language model (LLM) +llm = OpenAIChat( + openai_api_key=api_key, + temperature=0.5, + max_tokens=3000, +) ``` -### 4.2 Running the Flow +#### 3.5 Running Your Flow -Run the Flow by providing the task you defined: +Now, you're ready to run your Flow and start interacting with the language model. -```python +If you are using a multi modality model, you can pass in the image path as another parameter -out = flow.run(task) +```python +# Run your Flow +out = flow.run( + "Generate a 10,000 word blog on health and wellness.", + # "img.jpg" , Image path for multi-modal models + ) +print(out) ``` -The Flow will interact with the AI model, generate responses, and store the conversation history. +This code will initiate a conversation with the language model, and you'll receive responses accordingly. -### 4.3 Interacting with the AI +### 4. Advanced Flow Concepts -Depending on whether you set the `interactive` parameter to `True` or `False` during Flow initialization, you can interact with the AI in real-time or simply receive the generated responses in sequence. +In this section, we'll explore advanced concepts that can enhance your experience with the Swarms Flow module. -If `interactive` is set to `True`, you'll have a back-and-forth conversation with the AI, where you provide input after each AI response. +#### 4.1 Custom Stopping Conditions -### 4.4 Dynamic Temperature Handling +You can define custom stopping conditions for your Flow. For example, you might want the Flow to stop when a specific word is mentioned in the response. -If you set the `dynamic_temperature - -` parameter to `True` during Flow initialization, the Flow class will handle temperature dynamically. Temperature affects the randomness of responses generated by the AI model. The dynamic temperature feature allows the temperature to change randomly within a specified range, enhancing response diversity. - -## 5. Customizing Flow Behavior +```python +# Custom stopping condition example +def stop_when_repeats(response: str) -> bool: + return "Stop" in response.lower() -The Flow feature provides various customization options to tailor its behavior to your specific use case. +# Set the stopping condition in your Flow +flow.stopping_condition = stop_when_repeats +``` -### 5.1 Stopping Conditions +#### 4.2 Dynamic Temperature Handling -You can define custom stopping conditions that instruct the Flow to stop generating responses based on specific criteria. For example, you can stop when a certain keyword appears in the response: +Dynamic temperature handling allows you to adjust the temperature attribute of the language model during the conversation. ```python +# Enable dynamic temperature handling in your Flow +flow.dynamic_temperature = True +``` -def custom_stopping_condition(response: str) -> bool: - -    return "Stop" in response.lower() +This feature randomly changes the temperature attribute for each loop, providing a variety of responses. -# Set the custom stopping condition when creating the Flow instance. +#### 4.3 Providing Feedback on Responses -flow = Flow( +You can provide feedback on responses generated by the language model using the `provide_feedback` method. -    llm=llm, +```python +# Provide feedback on a response +flow.provide_feedback("The response was helpful.") +``` -    max_loops=5, +This feedback can be valuable for improving the quality of responses. -    stopping_condition=custom_stopping_condition, +#### 4.4 Retry Mechanism -    # Other parameters... +In case of errors or issues during conversation, you can implement a retry mechanism to attempt generating a response again. -) +```python +# Set the number + of retry attempts and interval +flow.retry_attempts = 3 +flow.retry_interval = 1 # in seconds ``` -### 5.2 Retry Mechanism +#### 4.5 Response Filtering -In case of errors or issues during AI model interactions, you can configure a retry mechanism. Specify the number of retry attempts and the interval between retries: +You can add response filters to filter out certain words or phrases from the responses. ```python +# Add a response filter +flow.add_response_filter("inappropriate_word") +``` -flow = Flow( - -    llm=llm, - -    max_loops=5, - -    retry_attempts=3, +This helps in controlling the content generated by the language model. -    retry_interval=1, +#### 4.6 Interactive Mode -    # Other parameters... - -) +Interactive mode allows you to have a back-and-forth conversation with the language model. When enabled, the Flow will prompt for user input after each response. +```python +# Enable interactive mode +flow.interactive = True ``` -### 5.3 Loop Interval - -The `loop_interval` parameter determines the time delay between consecutive interactions with the AI model. Adjust this value based on your desired pace of conversation. +This is useful for real-time conversations with the model. -### 5.4 Interactive Mode +### 5. Saving and Loading Flows -Set the `interactive` parameter to `True` if you want to have real-time conversations with the AI model. In interactive mode, you provide input after each AI response. +#### 5.1 Saving Flow State -## 6. Saving and Loading Flows +You can save the state of your Flow, including the conversation history, for future use. -You can save and load Flow instances to maintain conversation history or switch between different tasks. +```python +# Save the Flow state to a file +flow.save("path/to/flow_state.json") +``` -### 6.1 Saving a Flow +#### 5.2 Loading a Saved Flow -To save a Flow instance along with its conversation history: +To continue a conversation or reuse a Flow, you can load a previously saved state. ```python - -flow.save("path/flow_history.json") - +# Load a saved Flow state +flow.load("path/to/flow_state.json") ``` -This stores the conversation history as a JSON file for future reference. +### 6. Troubleshooting and Tips -### 6.2 Loading a Saved Flow +#### 6.1 Analyzing Feedback -To load a previously saved Flow instance: +You can analyze the feedback provided during the conversation to identify issues and improve the quality of interactions. ```python - -loaded_flow = Flow(llm=llm, max_loops=5) - -loaded_flow.load("path/flow_history.json") - +# Analyze feedback +flow.analyze_feedback() ``` -This loads the conversation history into the new Flow instance, allowing you to continue the conversation or analyze past interactions. +#### 6.2 Troubleshooting Common Issues -## 7. Analyzing Feedback and Undoing Actions +If you encounter issues during conversation, refer to the troubleshooting section for guidance on resolving common problems. -The Flow feature supports feedback collection and the ability to undo actions. +# 7. Conclusion: Empowering Developers with Swarms Framework and Flow Structure for Automation -### 7.1 Providing Feedback +In a world where digital tasks continue to multiply and diversify, the need for automation has never been more critical. Developers find themselves at the forefront of this automation revolution, tasked with creating reliable solutions that can seamlessly handle an array of digital tasks. Enter the Swarms framework and the Flow structure, a dynamic duo that empowers developers to build autonomous agents capable of efficiently and effectively automating a wide range of digital tasks. -You can provide feedback on AI responses within the Flow. Feedback can be used to analyze the quality of responses or highlight issues: +## The Automation Imperative -```python +Automation is the driving force behind increased efficiency, productivity, and scalability across various industries. From mundane data entry and content generation to complex data analysis and customer support, the possibilities for automation are vast. Developers play a pivotal role in realizing these possibilities, and they require robust tools and frameworks to do so effectively. -flow.provide_feedback("The response was unclear.") +## Swarms Framework: A Developer's Swiss Army Knife -``` +The Swarms framework emerges as a comprehensive toolkit designed to empower developers in their automation endeavors. It equips developers with the tools and capabilities needed to create autonomous agents capable of interacting with language models, orchestrating multi-step workflows, and handling error scenarios gracefully. Let's explore why the Swarms framework is a game-changer for developers: -### 7.2 Undoing the Last Action +### 1. Language Model Integration -If you want to undo the last action taken within the Flow and revert to the previous state, you can use the `undo_last` method: +One of the standout features of Swarms is its seamless integration with state-of-the-art language models, such as GPT-3. These language models have the ability to understand and generate human-like text, making them invaluable for tasks like content creation, translation, code generation, and more. -```python +By leveraging Swarms, developers can effortlessly incorporate these language models into their applications and workflows. For instance, they can build chatbots that provide intelligent responses to customer inquiries or generate lengthy documents with minimal manual intervention. This not only saves time but also enhances overall productivity. -previous_state, message = flow.undo_last() +### 2. Multi-Step Conversational Flows -``` +Swarms excels in orchestrating multi-step conversational flows. Developers can define intricate sequences of interactions, where the system generates responses, and users provide input at various stages. This functionality is a game-changer for building chatbots, virtual assistants, or any application requiring dynamic and context-aware conversations. -This helps you correct or modify previous interactions. +These conversational flows can be tailored to handle a wide range of scenarios, from customer support interactions to data analysis. By providing a structured framework for conversations, Swarms empowers developers to create intelligent and interactive systems that mimic human-like interactions. -### 7.3 Response Filtering +### 3. Customization and Extensibility -The Flow feature allows you to add response filters to filter out specific words or content from AI responses. This can be useful for content moderation or filtering sensitive information: +Every development project comes with its unique requirements and challenges. Swarms acknowledges this by offering a high degree of customization and extensibility. Developers can define custom stopping conditions, implement dynamic temperature handling for language models, and even add response filters to control the generated content. -```python +Moreover, Swarms supports an interactive mode, allowing developers to engage in real-time conversations with the language model. This feature is invaluable for rapid prototyping, testing, and fine-tuning the behavior of autonomous agents. -flow.add_response_filter("sensitive_word") +### 4. Feedback-Driven Improvement -``` +Swarms encourages the collection of feedback on generated responses. Developers and users alike can provide feedback to improve the quality and accuracy of interactions over time. This iterative feedback loop ensures that applications built with Swarms continually improve, becoming more reliable and capable of autonomously handling complex tasks. -The response filters will replace filtered words with placeholders, ensuring that sensitive content is not displayed. +### 5. Handling Errors and Retries -## 8. Advanced Features +Error handling is a critical aspect of any automation framework. Swarms simplifies this process by offering a retry mechanism. In case of errors or issues during conversations, developers can configure the framework to attempt generating responses again, ensuring robust and resilient automation. -### 8.1 Streamed Generation +### 6. Saving and Loading Flows -Streamed generation allows you to generate responses token by token in real-time. This can be useful for creating interactive and dynamic conversations: +Developers can save the state of their conversational flows, allowing for seamless continuity and reusability. This feature is particularly beneficial when working on long-term projects or scenarios where conversations need to be resumed from a specific point. -```python - -response = flow.streamed_generation("Generate a report on finance") - -``` +## Unleashing the Potential of Automation with Swarms and Flow -This function streams each token of the response with a slight delay, simulating real-time conversation. +The combined power of the Swarms framework and the Flow structure creates a synergy that empowers developers to automate a multitude of digital tasks. These tools provide versatility, customization, and extensibility, making them ideal for a wide range of applications. Let's explore some of the remarkable ways in which developers can leverage Swarms and Flow for automation: -### 8.2 Real-time Token Generation +### 1. Customer Support and Service Automation -For even finer control over token generation, you can use the `streamed_token_generation` method. This generates tokens one by one, allowing you to have precise control over the conversation pace: +Swarms and Flow enable the creation of AI-powered customer support chatbots that excel at handling common inquiries, troubleshooting issues, and escalating complex problems to human agents when necessary. This level of automation not only reduces response times but also enhances the overall customer experience. -```python +### 2. Content Generation and Curation -for token in flow.streamed_token_generation("Generate a report on finance"): +Developers can harness the power of Swarms and Flow to automate content generation tasks, such as writing articles, reports, or product descriptions. By providing an initial prompt, the system can generate high-quality content that adheres to specific guidelines and styles. -    print(token, end="") +Furthermore, these tools can automate content curation by summarizing lengthy articles, extracting key insights from research papers, and even translating content into multiple languages. -``` +### 3. Data Analysis and Reporting -## 9. Best Practices +Automation in data analysis and reporting is fundamental for data-driven decision-making. Swarms and Flow simplify these processes by enabling developers to create flows that interact with databases, query data, and generate reports based on user-defined criteria. This empowers businesses to derive insights quickly and make informed decisions. -To create effective and user-friendly interactions with the AI model using the Flow feature, consider the following best practices: +### 4. Programming and Code Generation -### 9.1 Conciseness and Clarity +Swarms and Flow streamline code generation and programming tasks. Developers can create flows to assist in writing code snippets, auto-completing code, or providing solutions to common programming challenges. This accelerates software development and reduces the likelihood of coding errors. -Ensure that your prompts and responses are concise and to the point. Avoid unnecessary verbosity. +### 5. Language Translation and Localization -### 9.2 Active Voice +With the ability to interface with language models, Swarms and Flow can automate language translation tasks. They can seamlessly translate content from one language to another, making it easier for businesses to reach global audiences and localize their offerings effectively. -Use an active voice when giving instructions or prompts. For example, say, "Generate a report" instead of "A report should be generated." +### 6. Virtual Assistants and AI Applications -### 9.3 Highlighting Important Points +Developers can build virtual assistants and AI applications that offer personalized experiences. These applications can automate tasks such as setting reminders, answering questions, providing recommendations, and much more. Swarms and Flow provide the foundation for creating intelligent, interactive virtual assistants. -Use formatting options like bold text, italics, or color highlights to draw attention to important points within the conversation. +## Future Opportunities and Challenges -### 9.4 Consistent Style +As Swarms and Flow continue to evolve, developers can look forward to even more advanced features and capabilities. However, with great power comes great responsibility. Developers must remain vigilant about the ethical use of automation and language models. Ensuring that automated systems provide accurate and unbiased information is an ongoing challenge that the developer community must address. -Maintain a consistent tone and style throughout the conversation. If there is a style guide or specific formatting conventions, adhere to them. +## In Conclusion -## 10. Conclusion +The Swarms framework and the Flow structure empower developers to automate an extensive array of digital tasks by offering versatility, customization, and extensibility. From natural language understanding and generation to orchestrating multi-step conversational flows, these tools simplify complex automation scenarios. -In conclusion, the Flow feature in the Swarms module provides a versatile and interactive way to interact with AI language models. By following this walkthrough guide and considering the best practices, you can effectively harness the power of Flow for a wide range of applications, from generating content to performing complex tasks. +By embracing Swarms and Flow, developers can not only save time and resources but also unlock new opportunities for innovation. The ability to harness the power of language models and create intelligent, interactive applications opens doors to a future where automation plays a pivotal role in our digital lives. -Start creating your own interactive conversations and enjoy the benefits of seamless AI interactions with the Flow feature. Happy coding! \ No newline at end of file +As the developer community continues to explore the capabilities of Swarms and Flow, it is essential to approach automation with responsibility, ethics, and a commitment to delivering valuable, user-centric experiences. With Swarms and Flow, the future of automation is in the hands of developers, ready to create a more efficient, intelligent, and automated world. \ No newline at end of file From a50f91ac2afc3e06d672c2caf02d849d5630622a Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 2 Nov 2023 18:35:50 -0400 Subject: [PATCH 09/15] flow guide from medium --- docs/examples/flow.md | 283 ++++++++++++++++++++++++------------------ 1 file changed, 159 insertions(+), 124 deletions(-) diff --git a/docs/examples/flow.md b/docs/examples/flow.md index 8a82c8ca..454bd244 100644 --- a/docs/examples/flow.md +++ b/docs/examples/flow.md @@ -1,107 +1,126 @@ -# Walkthrough Guide: Getting Started with Swarms Module's Flow Feature +# Reliable Enterprise-Grade Autonomous Agents in Less Than 5 lines of Code +======================================================================== -## Introduction +Welcome to the walkthrough guide for beginners on using the "Flow" feature within the Swarms framework. This guide is designed to help you understand and utilize the capabilities of the Flow class for seamless and reliable interactions with autonomous agents. -Welcome to the walkthrough guide for beginners on using the "Flow" feature within the Swarms module. This guide is designed to help you understand and utilize the capabilities of the Flow class for seamless interactions with AI language models. +## Official Swarms Links +===================== +[Swarms website:](https://www.swarms.world/) -### Table of Contents +[Swarms Github:](https://github.com/kyegomez/swarms) -1. **Introduction to Swarms Flow Module** - - 1.1 What is Swarms? - - 1.2 Understanding the Flow Module +[Swarms docs:](https://swarms.apac.ai/en/latest/) -2. **Setting Up Your Development Environment** - - 2.1 Installing Required Dependencies - - 2.2 API Key Setup - - 2.3 Creating Your First Flow +[Swarm Community!](https://discord.gg/39j5kwTuW4)! -3. **Creating Your First Flow** - - 3.1 Importing Necessary Libraries - - 3.2 Defining Constants - - 3.3 Initializing the Flow Object - - 3.4 Initializing the Language Model - - 3.5 Running Your Flow - - 3.6 Understanding Flow Options +[Book a call with The Swarm Corporation here if you're interested in high performance custom swarms!](https://calendly.com/swarm-corp/30min) -4. **Advanced Flow Concepts** - - 4.1 Custom Stopping Conditions - - 4.2 Dynamic Temperature Handling - - 4.3 Providing Feedback on Responses - - 4.4 Retry Mechanism - - 4.5 Response Filtering - - 4.6 Interactive Mode +Now let's begin... -5. **Saving and Loading Flows** - - 5.1 Saving Flow State - - 5.2 Loading a Saved Flow +## [Table of Contents](https://github.com/kyegomez/swarms) +=========================================================================================================== -6. **Troubleshooting and Tips** - - 6.1 Analyzing Feedback - - 6.2 Troubleshooting Common Issues +1. Introduction to Swarms Flow Module -7. **Conclusion** +- 1.1 What is Swarms? +- 1.2 Understanding the Flow Module ---- +2. Setting Up Your Development Environment -### 1. Introduction to Swarms Flow Module +- 2.1 Installing Required Dependencies +- 2.2 API Key Setup +- 2.3 Creating Your First Flow -#### 1.1 What is Swarms? +3. Creating Your First Flow -Swarms is a powerful framework designed to provide tools and capabilities for working with language models and automating various tasks. It allows developers to interact with language models seamlessly. +- 3.1 Importing Necessary Libraries +- 3.2 Defining Constants +- 3.3 Initializing the Flow Object +- 3.4 Initializing the Language Model +- 3.5 Running Your Flow +- 3.6 Understanding Flow Options -## 1.2 Understanding the Flow Feature +4. Advanced Flow Concepts -### What is the Flow Feature? +- 4.1 Custom Stopping Conditions +- 4.2 Dynamic Temperature Handling +- 4.3 Providing Feedback on Responses +- 4.4 Retry Mechanism +- 4.5 Response Filtering +- 4.6 Interactive Mode -The Flow feature is a powerful component of the Swarms framework that allows developers to create a sequential, conversational interaction with AI language models. It enables developers to build multi-step conversations, generate long-form content, and perform complex tasks using AI. The Flow class provides autonomy to language models, enabling them to generate responses in a structured manner. +5. Saving and Loading Flows -### Key Concepts +- 5.1 Saving Flow State +- 5.2 Loading a Saved Flow -Before diving into the practical aspects, let's clarify some key concepts related to the Flow feature: +6. Troubleshooting and Tips + +- 6.1 Analyzing Feedback +- 6.2 Troubleshooting Common Issues + +7. Conclusion -- **Flow:** A Flow is an instance of the Flow class that represents an ongoing interaction with an AI language model. It consists of a series of steps and responses. +## [1. Introduction to Swarms Flow Module](https://github.com/kyegomez/swarms) +=================================================================================================================================================== + +### [1.1 What is Swarms?](https://github.com/kyegomez/swarms) +------------------------------------------------------------------------------------------------------------- + +Swarms is a powerful framework designed to provide tools and capabilities for working with language models and automating various tasks. It allows developers to interact with language models seamlessly. + +## 1.2 Understanding the Flow Feature +================================== -- **Stopping Condition:** A stopping condition is a criterion that, when met, allows the Flow to stop generating responses. This can be user-defined and can depend on the content of the responses. +### [What is the Flow Feature?](https://github.com/kyegomez/swarms) +-------------------------------------------------------------------------------------------------------------------------- -- **Loop Interval:** The loop interval specifies the time delay between consecutive interactions with the AI model. +The Flow feature is a powerful component of the Swarms framework that allows developers to create a sequential, conversational interaction with AI language models. It enables developers to build multi-step conversations, generate long-form content, and perform complex tasks using AI. The Flow class provides autonomy to language models, enabling them to generate responses in a structured manner. -- **Retry Mechanism:** In case of errors or failures during AI model interactions, the Flow can be configured to make multiple retry attempts with a specified interval. +### [Key Concepts](https://github.com/kyegomez/swarms) +------------------------------------------------------------------------------------------------- -- **Interactive Mode:** Interactive mode allows developers to have a back-and-forth conversation with the AI model, making it suitable for real-time interactions. +Before diving into the practical aspects, let's clarify some key concepts related to the Flow feature: +- Flow: A Flow is an instance of the Flow class that represents an ongoing interaction with an AI language model. It consists of a series of steps and responses. +- Stopping Condition: A stopping condition is a criterion that, when met, allows the Flow to stop generating responses. This can be user-defined and can depend on the content of the responses. +- Loop Interval: The loop interval specifies the time delay between consecutive interactions with the AI model. +- Retry Mechanism: In case of errors or failures during AI model interactions, the Flow can be configured to make multiple retry attempts with a specified interval. +- Interactive Mode: Interactive mode allows developers to have a back-and-forth conversation with the AI model, making it suitable for real-time interactions. -### 2. Setting Up Your Development Environment +## [2. Setting Up Your Development Environment](https://github.com/kyegomez/swarms) +============================================================================================================================================================= -#### 2.1 Installing Required Dependencies +### [2.1 Installing Required Dependencies](https://github.com/kyegomez/swarms) +------------------------------------------------------------------------------------------------------------------------------------------------ Before you can start using the Swarms Flow module, you need to set up your development environment. First, you'll need to install the necessary dependencies, including Swarms itself. -```bash # Install Swarms and required libraries -pip3 install --upgrade swarms -``` +`pip3 install --upgrade swarms` -#### 2 Creating Your First Flow +## [2. Creating Your First Flow](https://github.com/kyegomez/swarms) +----------------------------------------------------------------------------------------------------------------------------- Now, let's create your first Flow. A Flow represents a chain-like structure that allows you to engage in multi-step conversations with language models. The Flow structure is what gives an LLM autonomy. It's the Mitochondria of an autonomous agent. -```python # Import necessary modules +```python from swarms.models import OpenAIChat # Zephr, Mistral from swarms.structs import Flow -api_key = "" +api_key = ""# Initialize the language model (LLM) +llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000)# Initialize the Flow object -# Initialize the language model (LLM) -llm = OpenAIChat(openai_api_key=api_key, temperature=0.5, max_tokens=3000) +flow = Flow(llm=llm, max_loops=5)# Run the flow +out = flow.run("Create an financial analysis on the following metrics") +print(out) -# Initialize the Flow object -flow = Flow(llm=llm, max_loops=5) ``` - -#### 3.3 Initializing the Flow Object +### [3. Initializing the Flow Object](https://github.com/kyegomez/swarms) +---------------------------------------------------------------------------------------------------------------------------------------- Create a Flow object that will be the backbone of your conversational flow. @@ -120,11 +139,12 @@ flow = Flow( ) ``` -#### 3.4 Initializing the Language Model +### [3.2 Initializing the Language Model](https://github.com/kyegomez/swarms) +---------------------------------------------------------------------------------------------------------------------------------------------- Initialize the language model (LLM) that your Flow will interact with. In this example, we're using OpenAI's GPT-3 as the LLM. -- You can also use `Mistral` or `Zephr` or any of other models! +- You can also use `Mistral` or `Zephr` or any of other models! ```python # Initialize the language model (LLM) @@ -135,13 +155,14 @@ llm = OpenAIChat( ) ``` -#### 3.5 Running Your Flow +### [3.3 Running Your Flow](https://github.com/kyegomez/swarms) +------------------------------------------------------------------------------------------------------------------ Now, you're ready to run your Flow and start interacting with the language model. If you are using a multi modality model, you can pass in the image path as another parameter -```python +``` # Run your Flow out = flow.run( "Generate a 10,000 word blog on health and wellness.", @@ -153,191 +174,205 @@ print(out) This code will initiate a conversation with the language model, and you'll receive responses accordingly. -### 4. Advanced Flow Concepts +## [4. Advanced Flow Concepts](https://github.com/kyegomez/swarms) +=========================================================================================================================== In this section, we'll explore advanced concepts that can enhance your experience with the Swarms Flow module. -#### 4.1 Custom Stopping Conditions +### [4.1 Custom Stopping Conditions](https://github.com/kyegomez/swarms) You can define custom stopping conditions for your Flow. For example, you might want the Flow to stop when a specific word is mentioned in the response. -```python # Custom stopping condition example +```python def stop_when_repeats(response: str) -> bool: return "Stop" in response.lower() +``` # Set the stopping condition in your Flow -flow.stopping_condition = stop_when_repeats -``` +```flow.stopping_condition = stop_when_repeats``` -#### 4.2 Dynamic Temperature Handling +### [4.2 Dynamic Temperature Handling](https://github.com/kyegomez/swarms) +---------------------------------------------------------------------------------------------------------------------------------------- Dynamic temperature handling allows you to adjust the temperature attribute of the language model during the conversation. -```python # Enable dynamic temperature handling in your Flow -flow.dynamic_temperature = True -``` +`flow.dynamic_temperature = True` This feature randomly changes the temperature attribute for each loop, providing a variety of responses. -#### 4.3 Providing Feedback on Responses +### [4.3 Providing Feedback on Responses](https://github.com/kyegomez/swarms) +---------------------------------------------------------------------------------------------------------------------------------------------- -You can provide feedback on responses generated by the language model using the `provide_feedback` method. +You can provide feedback on responses generated by the language model using the `provide_feedback` method. -```python -# Provide feedback on a response -flow.provide_feedback("The response was helpful.") -``` +- Provide feedback on a response +`flow.provide_feedback("The response was helpful.")` This feedback can be valuable for improving the quality of responses. -#### 4.4 Retry Mechanism +### [4.4 Retry Mechanism](https://github.com/kyegomez/swarms) +-------------------------------------------------------------------------------------------------------------- In case of errors or issues during conversation, you can implement a retry mechanism to attempt generating a response again. +# Set the number of retry attempts and interval ```python -# Set the number - - of retry attempts and interval flow.retry_attempts = 3 flow.retry_interval = 1 # in seconds ``` - -#### 4.5 Response Filtering +### [4.5 Response Filtering](https://github.com/kyegomez/swarms) +-------------------------------------------------------------------------------------------------------------------- You can add response filters to filter out certain words or phrases from the responses. -```python # Add a response filter +```python flow.add_response_filter("inappropriate_word") ``` - This helps in controlling the content generated by the language model. -#### 4.6 Interactive Mode +### [4.6 Interactive Mode](https://github.com/kyegomez/swarms) +---------------------------------------------------------------------------------------------------------------- Interactive mode allows you to have a back-and-forth conversation with the language model. When enabled, the Flow will prompt for user input after each response. -```python # Enable interactive mode -flow.interactive = True -``` +`flow.interactive = True` This is useful for real-time conversations with the model. -### 5. Saving and Loading Flows +## [5. Saving and Loading Flows](https://github.com/kyegomez/swarms) +=============================================================================================================================== -#### 5.1 Saving Flow State +### [5.1 Saving Flow State](https://github.com/kyegomez/swarms) +------------------------------------------------------------------------------------------------------------------ You can save the state of your Flow, including the conversation history, for future use. -```python # Save the Flow state to a file -flow.save("path/to/flow_state.json") -``` +`flow.save("path/to/flow_state.json")`` -#### 5.2 Loading a Saved Flow +### [5.2 Loading a Saved Flow](https://github.com/kyegomez/swarms) +------------------------------------------------------------------------------------------------------------------------ To continue a conversation or reuse a Flow, you can load a previously saved state. -```python # Load a saved Flow state -flow.load("path/to/flow_state.json") -``` +`flow.load("path/to/flow_state.json")`` -### 6. Troubleshooting and Tips +## [6. Troubleshooting and Tips](https://github.com/kyegomez/swarms) +=============================================================================================================================== -#### 6.1 Analyzing Feedback +### [6.1 Analyzing Feedback](https://github.com/kyegomez/swarms) +-------------------------------------------------------------------------------------------------------------------- You can analyze the feedback provided during the conversation to identify issues and improve the quality of interactions. -```python # Analyze feedback -flow.analyze_feedback() -``` +`flow.analyze_feedback()` -#### 6.2 Troubleshooting Common Issues +### [6.2 Troubleshooting Common Issues](https://github.com/kyegomez/swarms) +------------------------------------------------------------------------------------------------------------------------------------------ If you encounter issues during conversation, refer to the troubleshooting section for guidance on resolving common problems. -# 7. Conclusion: Empowering Developers with Swarms Framework and Flow Structure for Automation +# [7. Conclusion: Empowering Developers with Swarms Framework and Flow Structure for Automation](https://github.com/kyegomez/swarms) +================================================================================================================================================================================================================================================================ In a world where digital tasks continue to multiply and diversify, the need for automation has never been more critical. Developers find themselves at the forefront of this automation revolution, tasked with creating reliable solutions that can seamlessly handle an array of digital tasks. Enter the Swarms framework and the Flow structure, a dynamic duo that empowers developers to build autonomous agents capable of efficiently and effectively automating a wide range of digital tasks. -## The Automation Imperative +[The Automation Imperative](https://github.com/kyegomez/swarms) +--------------------------------------------------------------------------------------------------------------------------- Automation is the driving force behind increased efficiency, productivity, and scalability across various industries. From mundane data entry and content generation to complex data analysis and customer support, the possibilities for automation are vast. Developers play a pivotal role in realizing these possibilities, and they require robust tools and frameworks to do so effectively. -## Swarms Framework: A Developer's Swiss Army Knife +[Swarms Framework: A Developer's Swiss Army Knife](https://github.com/kyegomez/swarms) +----------------------------------------------------------------------------------------------------------------------------------------------------------------------- The Swarms framework emerges as a comprehensive toolkit designed to empower developers in their automation endeavors. It equips developers with the tools and capabilities needed to create autonomous agents capable of interacting with language models, orchestrating multi-step workflows, and handling error scenarios gracefully. Let's explore why the Swarms framework is a game-changer for developers: -### 1. Language Model Integration +[1. Language Model Integration](https://github.com/kyegomez/swarms) +----------------------------------------------------------------------------------------------------------------------------------- One of the standout features of Swarms is its seamless integration with state-of-the-art language models, such as GPT-3. These language models have the ability to understand and generate human-like text, making them invaluable for tasks like content creation, translation, code generation, and more. By leveraging Swarms, developers can effortlessly incorporate these language models into their applications and workflows. For instance, they can build chatbots that provide intelligent responses to customer inquiries or generate lengthy documents with minimal manual intervention. This not only saves time but also enhances overall productivity. -### 2. Multi-Step Conversational Flows +[2. Multi-Step Conversational Flows](https://github.com/kyegomez/swarms) +--------------------------------------------------------------------------------------------------------------------------------------------- Swarms excels in orchestrating multi-step conversational flows. Developers can define intricate sequences of interactions, where the system generates responses, and users provide input at various stages. This functionality is a game-changer for building chatbots, virtual assistants, or any application requiring dynamic and context-aware conversations. These conversational flows can be tailored to handle a wide range of scenarios, from customer support interactions to data analysis. By providing a structured framework for conversations, Swarms empowers developers to create intelligent and interactive systems that mimic human-like interactions. -### 3. Customization and Extensibility +[3. Customization and Extensibility](https://github.com/kyegomez/swarms) +--------------------------------------------------------------------------------------------------------------------------------------------- Every development project comes with its unique requirements and challenges. Swarms acknowledges this by offering a high degree of customization and extensibility. Developers can define custom stopping conditions, implement dynamic temperature handling for language models, and even add response filters to control the generated content. Moreover, Swarms supports an interactive mode, allowing developers to engage in real-time conversations with the language model. This feature is invaluable for rapid prototyping, testing, and fine-tuning the behavior of autonomous agents. -### 4. Feedback-Driven Improvement +[4. Feedback-Driven Improvement](https://github.com/kyegomez/swarms) +------------------------------------------------------------------------------------------------------------------------------------- Swarms encourages the collection of feedback on generated responses. Developers and users alike can provide feedback to improve the quality and accuracy of interactions over time. This iterative feedback loop ensures that applications built with Swarms continually improve, becoming more reliable and capable of autonomously handling complex tasks. -### 5. Handling Errors and Retries +[5. Handling Errors and Retries](https://github.com/kyegomez/swarms) +------------------------------------------------------------------------------------------------------------------------------------- Error handling is a critical aspect of any automation framework. Swarms simplifies this process by offering a retry mechanism. In case of errors or issues during conversations, developers can configure the framework to attempt generating responses again, ensuring robust and resilient automation. -### 6. Saving and Loading Flows +[6. Saving and Loading Flows](https://github.com/kyegomez/swarms) +------------------------------------------------------------------------------------------------------------------------------- Developers can save the state of their conversational flows, allowing for seamless continuity and reusability. This feature is particularly beneficial when working on long-term projects or scenarios where conversations need to be resumed from a specific point. -## Unleashing the Potential of Automation with Swarms and Flow +[Unleashing the Potential of Automation with Swarms and Flow](https://github.com/kyegomez/swarms) +=============================================================================================================================================================================================== The combined power of the Swarms framework and the Flow structure creates a synergy that empowers developers to automate a multitude of digital tasks. These tools provide versatility, customization, and extensibility, making them ideal for a wide range of applications. Let's explore some of the remarkable ways in which developers can leverage Swarms and Flow for automation: -### 1. Customer Support and Service Automation +[1. Customer Support and Service Automation](https://github.com/kyegomez/swarms) +------------------------------------------------------------------------------------------------------------------------------------------------------------- Swarms and Flow enable the creation of AI-powered customer support chatbots that excel at handling common inquiries, troubleshooting issues, and escalating complex problems to human agents when necessary. This level of automation not only reduces response times but also enhances the overall customer experience. -### 2. Content Generation and Curation +[2. Content Generation and Curation](https://github.com/kyegomez/swarms) +--------------------------------------------------------------------------------------------------------------------------------------------- Developers can harness the power of Swarms and Flow to automate content generation tasks, such as writing articles, reports, or product descriptions. By providing an initial prompt, the system can generate high-quality content that adheres to specific guidelines and styles. Furthermore, these tools can automate content curation by summarizing lengthy articles, extracting key insights from research papers, and even translating content into multiple languages. -### 3. Data Analysis and Reporting +[3. Data Analysis and Reporting](https://github.com/kyegomez/swarms) +------------------------------------------------------------------------------------------------------------------------------------- Automation in data analysis and reporting is fundamental for data-driven decision-making. Swarms and Flow simplify these processes by enabling developers to create flows that interact with databases, query data, and generate reports based on user-defined criteria. This empowers businesses to derive insights quickly and make informed decisions. -### 4. Programming and Code Generation +[4. Programming and Code Generation](https://github.com/kyegomez/swarms) +--------------------------------------------------------------------------------------------------------------------------------------------- Swarms and Flow streamline code generation and programming tasks. Developers can create flows to assist in writing code snippets, auto-completing code, or providing solutions to common programming challenges. This accelerates software development and reduces the likelihood of coding errors. -### 5. Language Translation and Localization +[5. Language Translation and Localization](https://github.com/kyegomez/swarms) +--------------------------------------------------------------------------------------------------------------------------------------------------------- With the ability to interface with language models, Swarms and Flow can automate language translation tasks. They can seamlessly translate content from one language to another, making it easier for businesses to reach global audiences and localize their offerings effectively. -### 6. Virtual Assistants and AI Applications +[6. Virtual Assistants and AI Applications](https://github.com/kyegomez/swarms) +----------------------------------------------------------------------------------------------------------------------------------------------------------- Developers can build virtual assistants and AI applications that offer personalized experiences. These applications can automate tasks such as setting reminders, answering questions, providing recommendations, and much more. Swarms and Flow provide the foundation for creating intelligent, interactive virtual assistants. -## Future Opportunities and Challenges +[Future Opportunities and Challenges](https://github.com/kyegomez/swarms) +----------------------------------------------------------------------------------------------------------------------------------------------- As Swarms and Flow continue to evolve, developers can look forward to even more advanced features and capabilities. However, with great power comes great responsibility. Developers must remain vigilant about the ethical use of automation and language models. Ensuring that automated systems provide accurate and unbiased information is an ongoing challenge that the developer community must address. -## In Conclusion +# [In Conclusion](https://github.com/kyegomez/swarms) +=================================================================================================== The Swarms framework and the Flow structure empower developers to automate an extensive array of digital tasks by offering versatility, customization, and extensibility. From natural language understanding and generation to orchestrating multi-step conversational flows, these tools simplify complex automation scenarios. From cb936eaef774e7aa770b078afef46033819e6cbe Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 2 Nov 2023 18:37:48 -0400 Subject: [PATCH 10/15] structs docs --- mkdocs.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mkdocs.yml b/mkdocs.yml index f9a38fca..bf155336 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -118,7 +118,8 @@ nav: - PdfChunker: "swarms/chunkers/pdf_chunker.md" - Walkthroughs: - Overview: "examples/index.md" - - Flow: "examples/flow.md" + - Structs: + - Flow: "examples/flow.md" - Agents: - OmniAgent: "examples/omni_agent.md" - Worker: From 8dfb1d33d00f51c1c9389c5dd93fc14cc20a03cd Mon Sep 17 00:00:00 2001 From: Kye Date: Fri, 3 Nov 2023 11:10:49 -0400 Subject: [PATCH 11/15] HuggingfaceLLM, jina embeds --- .github/workflows/cos_integration.yml | 2 +- CONTRIBUTING.md | 4 +- flow.py | 8 +- simple_agent.py | 1 + swarms/agents/stream_response.py | 6 - swarms/embeddings/__init__.py | 2 - swarms/embeddings/embed.py | 10 - swarms/memory/chroma.py | 2 +- .../base.py => models/embeddings_base.py} | 0 swarms/models/huggingface.py | 68 +++-- swarms/models/jina_embeds.py | 214 ++++++++++++++ .../openai.py => models/openai_embeddings.py} | 2 +- swarms/{embeddings => models}/pegasus.py | 0 swarms/{embeddings => models}/simple_ada.py | 3 +- swarms/models/yarn_mistral.py | 265 ++++++++++++++++++ swarms/structs/flow.py | 11 +- swarms/structs/sequential_workflow.py | 20 ++ tests/embeddings/pegasus.py | 2 +- 18 files changed, 571 insertions(+), 49 deletions(-) delete mode 100644 swarms/agents/stream_response.py delete mode 100644 swarms/embeddings/__init__.py delete mode 100644 swarms/embeddings/embed.py rename swarms/{embeddings/base.py => models/embeddings_base.py} (100%) create mode 100644 swarms/models/jina_embeds.py rename swarms/{embeddings/openai.py => models/openai_embeddings.py} (99%) rename swarms/{embeddings => models}/pegasus.py (100%) rename swarms/{embeddings => models}/simple_ada.py (99%) create mode 100644 swarms/models/yarn_mistral.py create mode 100644 swarms/structs/sequential_workflow.py diff --git a/.github/workflows/cos_integration.yml b/.github/workflows/cos_integration.yml index 7cdb41e9..0f3fc605 100644 --- a/.github/workflows/cos_integration.yml +++ b/.github/workflows/cos_integration.yml @@ -39,4 +39,4 @@ jobs: run: sphinx-build -b linkcheck docs build/docs - name: Run performance tests - run: pytest tests/performance \ No newline at end of file + run: find ./tests -name '*.py' -exec pytest {} \; \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e00478f1..bd9090de 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -97,8 +97,8 @@ To run the documentation, install the project requirements with `poetry install You can learn more about mkdocs on the [mkdocs website](https://www.mkdocs.org/). ## 🧪 tests - -[`pytests`](https://docs.pytest.org/en/7.1.x/) is used to run our tests. +- Run all the tests in the tests folder +`find ./tests -name '*.py' -exec pytest {} \;` ## 📄 license diff --git a/flow.py b/flow.py index fd7a02b2..d2c21ba8 100644 --- a/flow.py +++ b/flow.py @@ -11,7 +11,11 @@ llm = OpenAIChat( ) # Initialize the flow -flow = Flow(llm=llm, max_loops=5, dashboard=True,) +flow = Flow( + llm=llm, + max_loops=5, + dashboard=True, +) flow = Flow( llm=llm, @@ -28,4 +32,4 @@ flow = Flow( out = flow.run("Generate a 10,000 word blog on health and wellness.") -print(out) \ No newline at end of file +print(out) diff --git a/simple_agent.py b/simple_agent.py index 9ec9aaf6..515b83bc 100644 --- a/simple_agent.py +++ b/simple_agent.py @@ -19,6 +19,7 @@ flow = Flow( agent = SimpleAgent( name="Optimus Prime", flow=flow, + # Memory ) out = agent.run("Generate a 10,000 word blog on health and wellness.") diff --git a/swarms/agents/stream_response.py b/swarms/agents/stream_response.py deleted file mode 100644 index ecd29ff0..00000000 --- a/swarms/agents/stream_response.py +++ /dev/null @@ -1,6 +0,0 @@ -def stream(response): - """ - Yield the response token by token (word by word) from llm - """ - for token in response.split(): - yield token diff --git a/swarms/embeddings/__init__.py b/swarms/embeddings/__init__.py deleted file mode 100644 index 2c6c13b7..00000000 --- a/swarms/embeddings/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# from swarms.embeddings.pegasus import PegasusEmbedding -from swarms.embeddings.simple_ada import get_ada_embeddings diff --git a/swarms/embeddings/embed.py b/swarms/embeddings/embed.py deleted file mode 100644 index ce50e0cf..00000000 --- a/swarms/embeddings/embed.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file contains the function that embeds the input into a vector -from chromadb import EmbeddingFunction - - -def openai_embed(self, input, api_key, model_name): - openai = EmbeddingFunction.OpenAIEmbeddingFunction( - api_key=api_key, model_name=model_name - ) - embedding = openai(input) - return embedding diff --git a/swarms/memory/chroma.py b/swarms/memory/chroma.py index dc0399ef..422d0a67 100644 --- a/swarms/memory/chroma.py +++ b/swarms/memory/chroma.py @@ -17,7 +17,7 @@ from typing import ( import numpy as np from swarms.structs.document import Document -from swarms.embeddings.base import Embeddings +from swarms.models.embeddings_base import Embeddings from langchain.schema.vectorstore import VectorStore from langchain.utils import xor_args from langchain.vectorstores.utils import maximal_marginal_relevance diff --git a/swarms/embeddings/base.py b/swarms/models/embeddings_base.py similarity index 100% rename from swarms/embeddings/base.py rename to swarms/models/embeddings_base.py diff --git a/swarms/models/huggingface.py b/swarms/models/huggingface.py index 5b12bc76..f07edad3 100644 --- a/swarms/models/huggingface.py +++ b/swarms/models/huggingface.py @@ -3,6 +3,7 @@ import logging import torch from torch.nn.parallel import DistributedDataParallel as DDP from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig +from termcolor import colored class HuggingfaceLLM: @@ -20,13 +21,13 @@ class HuggingfaceLLM: # Usage ``` - from finetuning_suite import Inference + from swarms.models import HuggingfaceLLM model_id = "gpt2-small" - inference = Inference(model_id=model_id) + inference = HuggingfaceLLM(model_id=model_id) - prompt_text = "Once upon a time" - generated_text = inference(prompt_text) + task = "Once upon a time" + generated_text = inference(task) print(generated_text) ``` """ @@ -42,6 +43,8 @@ class HuggingfaceLLM: # logger=None, distributed=False, decoding=False, + *args, + **kwargs, ): self.logger = logging.getLogger(__name__) self.device = ( @@ -53,7 +56,6 @@ class HuggingfaceLLM: self.distributed = distributed self.decoding = decoding self.model, self.tokenizer = None, None - # self.log = Logging() if self.distributed: assert ( @@ -104,12 +106,12 @@ class HuggingfaceLLM: self.logger.error(f"Failed to load the model or the tokenizer: {error}") raise - def run(self, prompt_text: str): + def run(self, task: str): """ Generate a response based on the prompt text. Args: - - prompt_text (str): Text to prompt the model. + - task (str): Text to prompt the model. - max_length (int): Maximum length of the response. Returns: @@ -119,10 +121,10 @@ class HuggingfaceLLM: max_length = self.max_length + self.print_dashboard(task) + try: - inputs = self.tokenizer.encode(prompt_text, return_tensors="pt").to( - self.device - ) + inputs = self.tokenizer.encode(task, return_tensors="pt").to(self.device) # self.log.start() @@ -181,12 +183,12 @@ class HuggingfaceLLM: # Wrapping synchronous calls with async return self.run(task, *args, **kwargs) - def __call__(self, prompt_text: str): + def __call__(self, task: str): """ Generate a response based on the prompt text. Args: - - prompt_text (str): Text to prompt the model. + - task (str): Text to prompt the model. - max_length (int): Maximum length of the response. Returns: @@ -194,12 +196,12 @@ class HuggingfaceLLM: """ self.load_model() - max_length = self.max_ + max_length = self.max_length + + self.print_dashboard(task) try: - inputs = self.tokenizer.encode(prompt_text, return_tensors="pt").to( - self.device - ) + inputs = self.tokenizer.encode(task, return_tensors="pt").to(self.device) # self.log.start() @@ -258,3 +260,37 @@ class HuggingfaceLLM: return {"allocated": allocated, "reserved": reserved} else: return {"error": "GPU not available"} + + def print_dashboard(self, task: str): + """Print dashboard""" + + dashboard = print( + colored( + f""" + HuggingfaceLLM Dashboard + -------------------------------------------- + Model Name: {self.model_id} + Tokenizer: {self.tokenizer} + Model MaxLength: {self.max_length} + Model Device: {self.device} + Model Quantization: {self.quantize} + Model Quantization Config: {self.quantization_config} + Model Verbose: {self.verbose} + Model Distributed: {self.distributed} + Model Decoding: {self.decoding} + + ---------------------------------------- + Metadata: + Task Memory Consumption: {self.memory_consumption()} + GPU Available: {self.gpu_available()} + ---------------------------------------- + + Task Environment: + Task: {task} + + """, + "red", + ) + ) + + print(dashboard) diff --git a/swarms/models/jina_embeds.py b/swarms/models/jina_embeds.py new file mode 100644 index 00000000..a72b8a9e --- /dev/null +++ b/swarms/models/jina_embeds.py @@ -0,0 +1,214 @@ +import logging + +import torch +from numpy.linalg import norm +from torch.nn.parallel import DistributedDataParallel as DDP +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + + +def cos_sim(a, b): + return a @ b.T / (norm(a) * norm(b)) + + +class JinaEmbeddings: + """ + A class for running inference on a given model. + + Attributes: + model_id (str): The ID of the model. + device (str): The device to run the model on (either 'cuda' or 'cpu'). + max_length (int): The maximum length of the output sequence. + quantize (bool, optional): Whether to use quantization. Defaults to False. + quantization_config (dict, optional): The configuration for quantization. + verbose (bool, optional): Whether to print verbose logs. Defaults to False. + logger (logging.Logger, optional): The logger to use. Defaults to a basic logger. + + # Usage + ``` + from swarms.models import JinaEmbeddings + + model = JinaEmbeddings() + + embeddings = model("Encode this text") + + print(embeddings) + + + ``` + """ + + def __init__( + self, + model_id: str, + device: str = None, + max_length: int = 500, + quantize: bool = False, + quantization_config: dict = None, + verbose=False, + # logger=None, + distributed=False, + decoding=False, + cos_sim: bool = False, + *args, + **kwargs, + ): + self.logger = logging.getLogger(__name__) + self.device = ( + device if device else ("cuda" if torch.cuda.is_available() else "cpu") + ) + self.model_id = model_id + self.max_length = max_length + self.verbose = verbose + self.distributed = distributed + self.decoding = decoding + self.model, self.tokenizer = None, None + # self.log = Logging() + self.cos_sim = cos_sim + + if self.distributed: + assert ( + torch.cuda.device_count() > 1 + ), "You need more than 1 gpu for distributed processing" + + bnb_config = None + if quantize: + if not quantization_config: + quantization_config = { + "load_in_4bit": True, + "bnb_4bit_use_double_quant": True, + "bnb_4bit_quant_type": "nf4", + "bnb_4bit_compute_dtype": torch.bfloat16, + } + bnb_config = BitsAndBytesConfig(**quantization_config) + + try: + self.model = AutoModelForCausalLM.from_pretrained( + self.model_id, quantization_config=bnb_config, trust_remote_code=True + ) + + self.model # .to(self.device) + except Exception as e: + self.logger.error(f"Failed to load the model or the tokenizer: {e}") + raise + + def load_model(self): + """Load the model""" + if not self.model or not self.tokenizer: + try: + self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) + + bnb_config = ( + BitsAndBytesConfig(**self.quantization_config) + if self.quantization_config + else None + ) + + self.model = AutoModelForCausalLM.from_pretrained( + self.model_id, + quantization_config=bnb_config, + trust_remote_code=True, + ).to(self.device) + + if self.distributed: + self.model = DDP(self.model) + except Exception as error: + self.logger.error(f"Failed to load the model or the tokenizer: {error}") + raise + + def run(self, task: str): + """ + Generate a response based on the prompt text. + + Args: + - task (str): Text to prompt the model. + - max_length (int): Maximum length of the response. + + Returns: + - Generated text (str). + """ + self.load_model() + + max_length = self.max_length + + try: + embeddings = self.model.encode([task], max_length=max_length) + + if self.cos_sim: + print(cos_sim(embeddings[0], embeddings[1])) + else: + return embeddings[0] + except Exception as e: + self.logger.error(f"Failed to generate the text: {e}") + raise + + async def run_async(self, task: str, *args, **kwargs) -> str: + """ + Run the model asynchronously + + Args: + task (str): Task to run. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. + + Examples: + >>> mpt_instance = MPT('mosaicml/mpt-7b-storywriter', "EleutherAI/gpt-neox-20b", max_tokens=150) + >>> mpt_instance("generate", "Once upon a time in a land far, far away...") + 'Once upon a time in a land far, far away...' + >>> mpt_instance.batch_generate(["In the deep jungles,", "At the heart of the city,"], temperature=0.7) + ['In the deep jungles,', + 'At the heart of the city,'] + >>> mpt_instance.freeze_model() + >>> mpt_instance.unfreeze_model() + + """ + # Wrapping synchronous calls with async + return self.run(task, *args, **kwargs) + + def __call__(self, task: str): + """ + Generate a response based on the prompt text. + + Args: + - task (str): Text to prompt the model. + - max_length (int): Maximum length of the response. + + Returns: + - Generated text (str). + """ + self.load_model() + + max_length = self.max_length + + try: + embeddings = self.model.encode([task], max_length=max_length) + + if self.cos_sim: + print(cos_sim(embeddings[0], embeddings[1])) + else: + return embeddings[0] + except Exception as e: + self.logger.error(f"Failed to generate the text: {e}") + raise + + async def __call_async__(self, task: str, *args, **kwargs) -> str: + """Call the model asynchronously""" "" + return await self.run_async(task, *args, **kwargs) + + def save_model(self, path: str): + """Save the model to a given path""" + self.model.save_pretrained(path) + self.tokenizer.save_pretrained(path) + + def gpu_available(self) -> bool: + """Check if GPU is available""" + return torch.cuda.is_available() + + def memory_consumption(self) -> dict: + """Get the memory consumption of the GPU""" + if self.gpu_available(): + torch.cuda.synchronize() + allocated = torch.cuda.memory_allocated() + reserved = torch.cuda.memory_reserved() + return {"allocated": allocated, "reserved": reserved} + else: + return {"error": "GPU not available"} diff --git a/swarms/embeddings/openai.py b/swarms/models/openai_embeddings.py similarity index 99% rename from swarms/embeddings/openai.py rename to swarms/models/openai_embeddings.py index 230dade9..0aa3473d 100644 --- a/swarms/embeddings/openai.py +++ b/swarms/models/openai_embeddings.py @@ -25,7 +25,7 @@ from tenacity import ( stop_after_attempt, wait_exponential, ) -from swarms.embeddings.base import Embeddings +from swarms.models.embeddings_base import Embeddings def get_from_dict_or_env( diff --git a/swarms/embeddings/pegasus.py b/swarms/models/pegasus.py similarity index 100% rename from swarms/embeddings/pegasus.py rename to swarms/models/pegasus.py diff --git a/swarms/embeddings/simple_ada.py b/swarms/models/simple_ada.py similarity index 99% rename from swarms/embeddings/simple_ada.py rename to swarms/models/simple_ada.py index ba0b4cf7..7eb923b4 100644 --- a/swarms/embeddings/simple_ada.py +++ b/swarms/models/simple_ada.py @@ -1,10 +1,9 @@ import openai from dotenv import load_dotenv +from os import getenv load_dotenv() -from os import getenv - def get_ada_embeddings(text: str, model: str = "text-embedding-ada-002"): """ diff --git a/swarms/models/yarn_mistral.py b/swarms/models/yarn_mistral.py new file mode 100644 index 00000000..ebe107a2 --- /dev/null +++ b/swarms/models/yarn_mistral.py @@ -0,0 +1,265 @@ +import logging + +import torch +from torch.nn.parallel import DistributedDataParallel as DDP +from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig + + +class YarnMistral128: + """ + A class for running inference on a given model. + + Attributes: + model_id (str): The ID of the model. + device (str): The device to run the model on (either 'cuda' or 'cpu'). + max_length (int): The maximum length of the output sequence. + quantize (bool, optional): Whether to use quantization. Defaults to False. + quantization_config (dict, optional): The configuration for quantization. + verbose (bool, optional): Whether to print verbose logs. Defaults to False. + logger (logging.Logger, optional): The logger to use. Defaults to a basic logger. + + # Usage + ``` + from finetuning_suite import Inference + + model_id = "gpt2-small" + inference = Inference(model_id=model_id) + + prompt_text = "Once upon a time" + generated_text = inference(prompt_text) + print(generated_text) + ``` + """ + + def __init__( + self, + model_id: str = "NousResearch/Yarn-Mistral-7b-128k", + device: str = None, + max_length: int = 500, + quantize: bool = False, + quantization_config: dict = None, + verbose=False, + # logger=None, + distributed=False, + decoding=False, + ): + self.logger = logging.getLogger(__name__) + self.device = ( + device if device else ("cuda" if torch.cuda.is_available() else "cpu") + ) + self.model_id = model_id + self.max_length = max_length + self.verbose = verbose + self.distributed = distributed + self.decoding = decoding + self.model, self.tokenizer = None, None + # self.log = Logging() + + if self.distributed: + assert ( + torch.cuda.device_count() > 1 + ), "You need more than 1 gpu for distributed processing" + + bnb_config = None + if quantize: + if not quantization_config: + quantization_config = { + "load_in_4bit": True, + "bnb_4bit_use_double_quant": True, + "bnb_4bit_quant_type": "nf4", + "bnb_4bit_compute_dtype": torch.bfloat16, + } + bnb_config = BitsAndBytesConfig(**quantization_config) + + try: + self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) + self.model = AutoModelForCausalLM.from_pretrained( + self.model_id, + quantization_config=bnb_config, + use_flash_attention_2=True, + torch_dtype=torch.bfloat16, + device_map="auto", + trust_remote_code=True, + ) + + self.model # .to(self.device) + except Exception as e: + self.logger.error(f"Failed to load the model or the tokenizer: {e}") + raise + + def load_model(self): + """Load the model""" + if not self.model or not self.tokenizer: + try: + self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) + + bnb_config = ( + BitsAndBytesConfig(**self.quantization_config) + if self.quantization_config + else None + ) + + self.model = AutoModelForCausalLM.from_pretrained( + self.model_id, quantization_config=bnb_config + ).to(self.device) + + if self.distributed: + self.model = DDP(self.model) + except Exception as error: + self.logger.error(f"Failed to load the model or the tokenizer: {error}") + raise + + def run(self, prompt_text: str): + """ + Generate a response based on the prompt text. + + Args: + - prompt_text (str): Text to prompt the model. + - max_length (int): Maximum length of the response. + + Returns: + - Generated text (str). + """ + self.load_model() + + max_length = self.max_length + + try: + inputs = self.tokenizer.encode(prompt_text, return_tensors="pt").to( + self.device + ) + + # self.log.start() + + if self.decoding: + with torch.no_grad(): + for _ in range(max_length): + output_sequence = [] + + outputs = self.model.generate( + inputs, max_length=len(inputs) + 1, do_sample=True + ) + output_tokens = outputs[0][-1] + output_sequence.append(output_tokens.item()) + + # print token in real-time + print( + self.tokenizer.decode( + [output_tokens], skip_special_tokens=True + ), + end="", + flush=True, + ) + inputs = outputs + else: + with torch.no_grad(): + outputs = self.model.generate( + inputs, max_length=max_length, do_sample=True + ) + + del inputs + return self.tokenizer.decode(outputs[0], skip_special_tokens=True) + except Exception as e: + self.logger.error(f"Failed to generate the text: {e}") + raise + + async def run_async(self, task: str, *args, **kwargs) -> str: + """ + Run the model asynchronously + + Args: + task (str): Task to run. + *args: Variable length argument list. + **kwargs: Arbitrary keyword arguments. + + Examples: + >>> mpt_instance = MPT('mosaicml/mpt-7b-storywriter', "EleutherAI/gpt-neox-20b", max_tokens=150) + >>> mpt_instance("generate", "Once upon a time in a land far, far away...") + 'Once upon a time in a land far, far away...' + >>> mpt_instance.batch_generate(["In the deep jungles,", "At the heart of the city,"], temperature=0.7) + ['In the deep jungles,', + 'At the heart of the city,'] + >>> mpt_instance.freeze_model() + >>> mpt_instance.unfreeze_model() + + """ + # Wrapping synchronous calls with async + return self.run(task, *args, **kwargs) + + def __call__(self, prompt_text: str): + """ + Generate a response based on the prompt text. + + Args: + - prompt_text (str): Text to prompt the model. + - max_length (int): Maximum length of the response. + + Returns: + - Generated text (str). + """ + self.load_model() + + max_length = self.max_ + + try: + inputs = self.tokenizer.encode(prompt_text, return_tensors="pt").to( + self.device + ) + + # self.log.start() + + if self.decoding: + with torch.no_grad(): + for _ in range(max_length): + output_sequence = [] + + outputs = self.model.generate( + inputs, max_length=len(inputs) + 1, do_sample=True + ) + output_tokens = outputs[0][-1] + output_sequence.append(output_tokens.item()) + + # print token in real-time + print( + self.tokenizer.decode( + [output_tokens], skip_special_tokens=True + ), + end="", + flush=True, + ) + inputs = outputs + else: + with torch.no_grad(): + outputs = self.model.generate( + inputs, max_length=max_length, do_sample=True + ) + + del inputs + + return self.tokenizer.decode(outputs[0], skip_special_tokens=True) + except Exception as e: + self.logger.error(f"Failed to generate the text: {e}") + raise + + async def __call_async__(self, task: str, *args, **kwargs) -> str: + """Call the model asynchronously""" "" + return await self.run_async(task, *args, **kwargs) + + def save_model(self, path: str): + """Save the model to a given path""" + self.model.save_pretrained(path) + self.tokenizer.save_pretrained(path) + + def gpu_available(self) -> bool: + """Check if GPU is available""" + return torch.cuda.is_available() + + def memory_consumption(self) -> dict: + """Get the memory consumption of the GPU""" + if self.gpu_available(): + torch.cuda.synchronize() + allocated = torch.cuda.memory_allocated() + reserved = torch.cuda.memory_reserved() + return {"allocated": allocated, "reserved": reserved} + else: + return {"error": "GPU not available"} diff --git a/swarms/structs/flow.py b/swarms/structs/flow.py index 8d7a09ed..8601b8dd 100644 --- a/swarms/structs/flow.py +++ b/swarms/structs/flow.py @@ -1,9 +1,9 @@ """ TODO: -- Add a retry mechanism -- Add prompt injection letting the agent know it's in a flow, Flow prompt -- Dynamic temperature handling - +- Add tools +- Add open interpreter style conversation +- Add configurable save and restore so the user can restore from previus flows +- Add memory vector database retrieval """ import json @@ -252,7 +252,8 @@ class Flow: History: {response} - """, **kwargs + """, + **kwargs, ) # print(f"Next query: {response}") # break diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py new file mode 100644 index 00000000..2df95c07 --- /dev/null +++ b/swarms/structs/sequential_workflow.py @@ -0,0 +1,20 @@ +""" +Sequential Workflow + +from swarms.models import OpenAIChat, Mistral +from swarms.structs import SequentialWorkflow + + +llm = OpenAIChat(openai_api_key="") +mistral = Mistral() + +# Max loops will run over the sequential pipeline twice +workflow = SequentialWorkflow(max_loops=2) + +workflow.add("What's the weather in miami", llm) + +workflow.add("Create a report on these metrics", mistral) + +workflow.run() + +""" diff --git a/tests/embeddings/pegasus.py b/tests/embeddings/pegasus.py index d1e901dc..e9632eae 100644 --- a/tests/embeddings/pegasus.py +++ b/tests/embeddings/pegasus.py @@ -1,6 +1,6 @@ import pytest from unittest.mock import patch -from swarms.embeddings.pegasus import PegasusEmbedding +from swarms.models.pegasus import PegasusEmbedding def test_init(): From fea0eeebc9a83ca53651843dd587f4a2563689fd Mon Sep 17 00:00:00 2001 From: Kye Date: Fri, 3 Nov 2023 11:24:13 -0400 Subject: [PATCH 12/15] hugginface --- swarms/models/distilled_whisperx.py | 3 + swarms/models/huggingface.py | 20 ++ swarms/models/petals.py | 2 +- tests/models/ada.py | 68 +++++++ tests/models/huggingface.py | 286 ++++++++++++++++++++++------ 5 files changed, 325 insertions(+), 54 deletions(-) create mode 100644 swarms/models/distilled_whisperx.py create mode 100644 tests/models/ada.py diff --git a/swarms/models/distilled_whisperx.py b/swarms/models/distilled_whisperx.py new file mode 100644 index 00000000..2eb2788d --- /dev/null +++ b/swarms/models/distilled_whisperx.py @@ -0,0 +1,3 @@ +""" + +""" \ No newline at end of file diff --git a/swarms/models/huggingface.py b/swarms/models/huggingface.py index f07edad3..437d9144 100644 --- a/swarms/models/huggingface.py +++ b/swarms/models/huggingface.py @@ -294,3 +294,23 @@ class HuggingfaceLLM: ) print(dashboard) + + def set_device(self, device): + """ + Changes the device used for inference. + + Parameters + ---------- + device : str + The new device to use for inference. + """ + self.device = device + self.model.to(self.device) + + def set_max_length(self, max_length): + """Set max_length""" + self.max_length = max_length + + def clear_chat_history(self): + """Clear chat history""" + self.chat_history = [] diff --git a/swarms/models/petals.py b/swarms/models/petals.py index cc90cb62..189c2477 100644 --- a/swarms/models/petals.py +++ b/swarms/models/petals.py @@ -35,7 +35,7 @@ class Petals: "max_length": self.max_length, } - def generate(self, prompt): + def __call__(self, prompt): """Generate text using the Petals API.""" params = self._default_params() inputs = self.tokenizer(prompt, return_tensors="pt")["input_ids"] diff --git a/tests/models/ada.py b/tests/models/ada.py new file mode 100644 index 00000000..786b162d --- /dev/null +++ b/tests/models/ada.py @@ -0,0 +1,68 @@ +# test_embeddings.py + +import pytest +import openai +from unittest.mock import patch +from swarms.models.simple_ada import get_ada_embeddings # Adjust this import path to your project structure +from os import getenv +from dotenv import load_dotenv + +load_dotenv() + +# Fixture for test texts +@pytest.fixture +def test_texts(): + return [ + "Hello World", + "This is a test string with newline\ncharacters", + "A quick brown fox jumps over the lazy dog", + ] + +# Basic Test +def test_get_ada_embeddings_basic(test_texts): + with patch('openai.Embedding.create') as mock_create: + # Mocking the OpenAI API call + mock_create.return_value = { + "data": [ + {"embedding": [0.1, 0.2, 0.3]} + ] + } + + for text in test_texts: + embedding = get_ada_embeddings(text) + assert embedding == [0.1, 0.2, 0.3], "Embedding does not match expected output" + mock_create.assert_called_with(input=[text.replace("\n", " ")], model="text-embedding-ada-002") + +# Parameterized Test +@pytest.mark.parametrize( + "text, model, expected_call_model", + [ + ("Hello World", "text-embedding-ada-002", "text-embedding-ada-002"), + ("Hello World", "text-embedding-ada-001", "text-embedding-ada-001"), + ], +) +def test_get_ada_embeddings_models(text, model, expected_call_model): + with patch('openai.Embedding.create') as mock_create: + mock_create.return_value = { + "data": [ + {"embedding": [0.1, 0.2, 0.3]} + ] + } + + _ = get_ada_embeddings(text, model=model) + mock_create.assert_called_with(input=[text], model=expected_call_model) + +# Exception Test +def test_get_ada_embeddings_exception(): + with patch('openai.Embedding.create') as mock_create: + mock_create.side_effect = openai.error.OpenAIError("Test error") + with pytest.raises(openai.error.OpenAIError): + get_ada_embeddings("Some text") + +# Tests for environment variable loading +def test_env_var_loading(monkeypatch): + monkeypatch.setenv("OPENAI_API_KEY", "testkey123") + with patch('openai.Embedding.create'): + assert getenv("OPENAI_API_KEY") == "testkey123", "Environment variable for API key is not set correctly" + +# ... more tests to cover other aspects such as different input types, large inputs, invalid inputs, etc. diff --git a/tests/models/huggingface.py b/tests/models/huggingface.py index 46c7fa12..1bb44bed 100644 --- a/tests/models/huggingface.py +++ b/tests/models/huggingface.py @@ -1,58 +1,238 @@ -import pytest import torch -from unittest.mock import Mock, patch -from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig -from swarms.models.huggingface import HuggingfaceLLM +import pytest +from unittest.mock import patch, MagicMock +from swarms.models.huggingface import HuggingfaceLLM # Replace with the actual import path +# Fixture for the class instance @pytest.fixture -def huggingface_llm(): - # Create an instance of HuggingfaceLLM for testing. +def llm_instance(): model_id = "gpt2-small" - return HuggingfaceLLM(model_id=model_id) - - -def test_initialization(huggingface_llm): - # Test the initialization of the HuggingfaceLLM class. - assert huggingface_llm.model_id == "gpt2-small" - assert huggingface_llm.device in ["cpu", "cuda"] - assert huggingface_llm.max_length == 20 - assert huggingface_llm.verbose == False - assert huggingface_llm.distributed == False - assert huggingface_llm.decoding == False - assert huggingface_llm.model is None - assert huggingface_llm.tokenizer is None - - -def test_load_model(huggingface_llm): - # Test loading the model. - huggingface_llm.load_model() - assert isinstance(huggingface_llm.model, AutoModelForCausalLM) - assert isinstance(huggingface_llm.tokenizer, AutoTokenizer) - - -def test_run(huggingface_llm): - # Test the run method of HuggingfaceLLM. - prompt_text = "Once upon a time" - generated_text = huggingface_llm.run(prompt_text) - assert isinstance(generated_text, str) - assert len(generated_text) > 0 - - -def test_call_method(huggingface_llm): - # Test the __call__ method of HuggingfaceLLM. - prompt_text = "Once upon a time" - generated_text = huggingface_llm(prompt_text) - assert isinstance(generated_text, str) - assert len(generated_text) > 0 - - -def test_load_model_failure(): - # Test loading model failure. - with patch( - "your_module.AutoModelForCausalLM.from_pretrained", - side_effect=Exception("Model load failed"), - ): - with pytest.raises(Exception): - huggingface_llm = HuggingfaceLLM(model_id="gpt2-small") - huggingface_llm.load_model() + instance = HuggingfaceLLM(model_id=model_id) + return instance + + +# Test for instantiation and attributes +def test_llm_initialization(llm_instance): + assert llm_instance.model_id == "gpt2-small" + assert llm_instance.max_length == 500 + # ... add more assertions for all default attributes + + +# Parameterized test for setting devices +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_llm_set_device(llm_instance, device): + llm_instance.set_device(device) + assert llm_instance.device == device + + +# Test exception during initialization with a bad model_id +def test_llm_bad_model_initialization(): + with pytest.raises(Exception): + HuggingfaceLLM(model_id="unknown-model") + + +# Mocking the tokenizer and model to test run method +@patch("swarms.models.huggingface.AutoTokenizer.from_pretrained") +@patch("swarms.models.huggingface.AutoModelForCausalLM.from_pretrained") +def test_llm_run(mock_model, mock_tokenizer, llm_instance): + mock_model.return_value.generate.return_value = "mocked output" + mock_tokenizer.return_value.encode.return_value = "mocked input" + result = llm_instance.run("test task") + assert result == "mocked output" + + +# Async test (requires pytest-asyncio plugin) +@pytest.mark.asyncio +async def test_llm_run_async(llm_instance): + result = await llm_instance.run_async("test task") + assert isinstance(result, str) + + +# Test for checking GPU availability +def test_llm_gpu_availability(llm_instance): + # Assuming the test is running on a machine where the GPU availability is known + expected_result = torch.cuda.is_available() + assert llm_instance.gpu_available() == expected_result + + +# Test for memory consumption reporting +def test_llm_memory_consumption(llm_instance): + # Mocking torch.cuda functions for consistent results + with patch("torch.cuda.memory_allocated", return_value=1024): + with patch("torch.cuda.memory_reserved", return_value=2048): + memory = llm_instance.memory_consumption() + assert memory == {"allocated": 1024, "reserved": 2048} + + +# Test different initialization parameters +@pytest.mark.parametrize("model_id, max_length", [ + ("gpt2-small", 100), + ("gpt2-medium", 200), + ("gpt2-large", None) # None to check default behavior +]) +def test_llm_initialization_params(model_id, max_length): + if max_length: + instance = HuggingfaceLLM(model_id=model_id, max_length=max_length) + assert instance.max_length == max_length + else: + instance = HuggingfaceLLM(model_id=model_id) + assert instance.max_length == 500 # Assuming 500 is the default max_length + + +# Test for setting an invalid device +def test_llm_set_invalid_device(llm_instance): + with pytest.raises(ValueError): + llm_instance.set_device("quantum_processor") + + +# Test for model download progress bar +@patch("swarms.models.huggingface.HuggingfaceLLM._download_model") +def test_llm_model_download_progress(mock_download, llm_instance): + llm_instance.download_model_with_progress() + mock_download.assert_called_once() + + +# Mocking external API call to test run method without network +@patch("swarms.models.huggingface.HuggingfaceLLM.run") +def test_llm_run_without_network(mock_run, llm_instance): + mock_run.return_value = "mocked output" + result = llm_instance.run("test task without network") + assert result == "mocked output" + + +# Test handling of empty input for the run method +def test_llm_run_empty_input(llm_instance): + with pytest.raises(ValueError): + llm_instance.run("") + + +# Test the generation with a provided seed for reproducibility +@patch("swarms.models.huggingface.HuggingfaceLLM.run") +def test_llm_run_with_seed(mock_run, llm_instance): + seed = 42 + llm_instance.set_seed(seed) + # Assuming set_seed method affects the randomness in the model + # You would typically ensure that setting the seed gives reproducible results + mock_run.return_value = "mocked deterministic output" + result = llm_instance.run("test task", seed=seed) + assert result == "mocked deterministic output" + + +# Test the output length is as expected +@patch("swarms.models.huggingface.HuggingfaceLLM.run") +def test_llm_run_output_length(mock_run, llm_instance): + input_text = "test task" + llm_instance.max_length = 50 # set a max_length for the output + mock_run.return_value = "mocked output" * 10 # some long text + result = llm_instance.run(input_text) + assert len(result.split()) <= llm_instance.max_length + + +# Test the tokenizer handling special tokens correctly +@patch("swarms.models.huggingface.HuggingfaceLLM._tokenizer.encode") +@patch("swarms.models.huggingface.HuggingfaceLLM._tokenizer.decode") +def test_llm_tokenizer_special_tokens(mock_decode, mock_encode, llm_instance): + mock_encode.return_value = "encoded input with special tokens" + mock_decode.return_value = "decoded output with special tokens" + result = llm_instance.run("test task with special tokens") + mock_encode.assert_called_once() + mock_decode.assert_called_once() + assert "special tokens" in result + + +# Test for correct handling of timeouts +@patch("swarms.models.huggingface.HuggingfaceLLM.run") +def test_llm_timeout_handling(mock_run, llm_instance): + mock_run.side_effect = TimeoutError + with pytest.raises(TimeoutError): + llm_instance.run("test task with timeout") + + +# Test for response time within a threshold (performance test) +@patch("swarms.models.huggingface.HuggingfaceLLM.run") +def test_llm_response_time(mock_run, llm_instance): + import time + mock_run.return_value = "mocked output" + start_time = time.time() + llm_instance.run("test task for response time") + end_time = time.time() + assert end_time - start_time < 1 # Assuming the response should be faster than 1 second + + +# Test the logging of a warning for long inputs +@patch("swarms.models.huggingface.logging.warning") +def test_llm_long_input_warning(mock_warning, llm_instance): + long_input = "x" * 10000 # input longer than the typical limit + llm_instance.run(long_input) + mock_warning.assert_called_once() + + +# Test for run method behavior when model raises an exception +@patch("swarms.models.huggingface.HuggingfaceLLM._model.generate", side_effect=RuntimeError) +def test_llm_run_model_exception(mock_generate, llm_instance): + with pytest.raises(RuntimeError): + llm_instance.run("test task when model fails") + + +# Test the behavior when GPU is forced but not available +@patch("torch.cuda.is_available", return_value=False) +def test_llm_force_gpu_when_unavailable(mock_is_available, llm_instance): + with pytest.raises(EnvironmentError): + llm_instance.set_device("cuda") # Attempt to set CUDA when it's not available + + +# Test for proper cleanup after model use (releasing resources) +@patch("swarms.models.huggingface.HuggingfaceLLM._model") +@patch("swarms.models.huggingface.HuggingfaceLLM._tokenizer") +def test_llm_cleanup(mock_model, mock_tokenizer, llm_instance): + llm_instance.cleanup() + # Assuming cleanup method is meant to free resources + mock_model.delete.assert_called_once() + mock_tokenizer.delete.assert_called_once() + + +# Test updating the configuration after instantiation +def test_llm_update_configuration(llm_instance): + new_config = {"temperature": 0.7} + llm_instance.update_configuration(new_config) + assert llm_instance.configuration["temperature"] == 0.7 + + +# Test if the model is re-downloaded when changing the model_id +@patch("swarms.models.huggingface.HuggingfaceLLM._download_model") +def test_llm_change_model_id(mock_download, llm_instance): + new_model_id = "gpt2-xl" + llm_instance.model_id = new_model_id + mock_download.assert_called_with(new_model_id) + + +# Test model's ability to handle multilingual input +@patch("swarms.models.huggingface.HuggingfaceLLM.run") +def test_llm_multilingual_input(mock_run, llm_instance): + mock_run.return_value = "mocked multilingual output" + multilingual_input = "Bonjour, ceci est un test multilingue." + result = llm_instance.run(multilingual_input) + assert isinstance(result, str) # Simple check to ensure output is string type + +# Test caching mechanism to prevent re-running the same inputs +@patch("swarms.models.huggingface.HuggingfaceLLM.run") +def test_llm_caching_mechanism(mock_run, llm_instance): + input_text = "test caching mechanism" + mock_run.return_value = "cached output" + # Run the input twice + first_run_result = llm_instance.run(input_text) + second_run_result = llm_instance.run(input_text) + mock_run.assert_called_once() # Should only be called once due to caching + assert first_run_result == second_run_result + + +# Ensure that model re-downloads when force_download flag is set +@patch("swarms.models.huggingface.HuggingfaceLLM._download_model") +def test_llm_force_download(mock_download, llm_instance): + llm_instance.download_model_with_progress(force_download=True) + mock_download.assert_called_once_with(llm_instance.model_id, force=True) + + +# These tests are provided as examples. In real-world scenarios, you will need to adapt these tests to the actual logic of your `HuggingfaceLLM` class. +# For instance, "mock_model.delete.assert_called_once()" and similar lines are based on hypothetical methods and behaviors that you need to replace with actual implementations. From 81d02a610366f0305428d2b300c4b059f34b3875 Mon Sep 17 00:00:00 2001 From: Kye Date: Fri, 3 Nov 2023 15:44:06 -0400 Subject: [PATCH 13/15] tests --- swarms/models/bioclip.py | 1 - tests/models/huggingface.py | 10 +++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/swarms/models/bioclip.py b/swarms/models/bioclip.py index 937634e3..318de290 100644 --- a/swarms/models/bioclip.py +++ b/swarms/models/bioclip.py @@ -42,7 +42,6 @@ Please refer to the corresponding paper, "Large-Scale Domain-Specific Pretrainin """ import open_clip -import glob import torch from PIL import Image import matplotlib.pyplot as plt diff --git a/tests/models/huggingface.py b/tests/models/huggingface.py index 1bb44bed..847ced06 100644 --- a/tests/models/huggingface.py +++ b/tests/models/huggingface.py @@ -1,7 +1,11 @@ -import torch +from unittest.mock import MagicMock, patch + import pytest -from unittest.mock import patch, MagicMock -from swarms.models.huggingface import HuggingfaceLLM # Replace with the actual import path +import torch + +from swarms.models.huggingface import ( + HuggingfaceLLM, # Replace with the actual import path +) # Fixture for the class instance From 1162271fc628b5ddea343af480a3952fb0afd565 Mon Sep 17 00:00:00 2001 From: Kye Date: Fri, 3 Nov 2023 15:54:11 -0400 Subject: [PATCH 14/15] flow --- README.md | 5 ++--- swarms/structs/flow.py | 6 +++--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index bea090d3..68d7ba05 100644 --- a/README.md +++ b/README.md @@ -118,14 +118,13 @@ agent.run("Create a video of a swarm of fish") --- ## Documentation - - For documentation, go here, [swarms.apac.ai](https://swarms.apac.ai) ## Contribute -We're always looking for contributors to help us improve and expand this project. If you're interested, please check out our [Contributing Guidelines](CONTRIBUTING.md). +We're always looking for contributors to help us improve and expand this project. If you're interested, please check out our [Contributing Guidelines](CONTRIBUTING.md) and our [contributing board](https://github.com/users/kyegomez/projects/1) -# License +# License MIT diff --git a/swarms/structs/flow.py b/swarms/structs/flow.py index 8601b8dd..40e00ca1 100644 --- a/swarms/structs/flow.py +++ b/swarms/structs/flow.py @@ -13,7 +13,7 @@ from typing import Any, Callable, Dict, List, Optional, Tuple, Generator from termcolor import colored import inspect import random -from swarms.tools.tool import BaseTool +# from swarms.tools.tool import BaseTool # Constants @@ -103,7 +103,7 @@ class Flow: retry_interval: int = 1, interactive: bool = False, dashboard: bool = False, - tools: List[BaseTool] = None, + # tools: List[BaseTool] = None, dynamic_temperature: bool = False, **kwargs: Any, ): @@ -121,7 +121,7 @@ class Flow: self.interactive = interactive self.dashboard = dashboard self.dynamic_temperature = dynamic_temperature - self.tools = tools + # self.tools = tools def provide_feedback(self, feedback: str) -> None: """Allow users to provide feedback on the responses.""" From 1b0cb87c737aa9e748f0132dd09ff9a714b4faf0 Mon Sep 17 00:00:00 2001 From: Kye Date: Fri, 3 Nov 2023 15:55:41 -0400 Subject: [PATCH 15/15] no stream --- swarms/agents/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/swarms/agents/__init__.py b/swarms/agents/__init__.py index 0026cdc3..f622f3f8 100644 --- a/swarms/agents/__init__.py +++ b/swarms/agents/__init__.py @@ -1,7 +1,7 @@ from swarms.agents.omni_modal_agent import OmniModalAgent from swarms.agents.hf_agents import HFAgent from swarms.agents.message import Message -from swarms.agents.stream_response import stream +# from swarms.agents.stream_response import stream from swarms.agents.base import AbstractAgent from swarms.agents.registry import Registry from swarms.agents.idea_to_image_agent import Idea2Image @@ -14,7 +14,6 @@ __all__ = [ "OmniModalAgent", "HFAgent", "Message", - "stream", "AbstractAgent", "Registry", "Idea2Image",