commit
80187603f6
@ -0,0 +1,43 @@
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
#
|
||||
# This workflow file requires a free account on Bearer.com to manage findings, notifications and more.
|
||||
# See https://docs.bearer.com/guides/bearer-cloud/
|
||||
name: Bearer
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["master" ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: ["master"]
|
||||
schedule:
|
||||
- cron: '24 22 * * 6'
|
||||
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
|
||||
|
||||
jobs:
|
||||
bearer:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Checkout project source
|
||||
- uses: actions/checkout@v4
|
||||
# Scan code using Bearer CLI
|
||||
- name: Run Report
|
||||
id: report
|
||||
uses: bearer/bearer-action@828eeb928ce2f4a7ca5ed57fb8b59508cb8c79bc
|
||||
with:
|
||||
api-key: ${{ secrets.BEARER_TOKEN }}
|
||||
format: sarif
|
||||
output: results.sarif
|
||||
exit-code: 0
|
||||
# Upload SARIF file generated in previous step
|
||||
- name: Upload SARIF file
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: results.sarif
|
@ -0,0 +1,39 @@
|
||||
# Dependency Review Action
|
||||
#
|
||||
# This Action will scan dependency manifest files that change as part of a Pull Request,
|
||||
# surfacing known-vulnerable versions of the packages declared or updated in the PR.
|
||||
# Once installed, if the workflow run is marked as required, PRs introducing known-vulnerable
|
||||
# packages will be blocked from merging.
|
||||
#
|
||||
# Source repository: https://github.com/actions/dependency-review-action
|
||||
# Public documentation: https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-dependency-review#dependency-review-enforcement
|
||||
name: 'Dependency review'
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ "master" ]
|
||||
|
||||
# If using a dependency submission action in this workflow this permission will need to be set to:
|
||||
#
|
||||
# permissions:
|
||||
# contents: write
|
||||
#
|
||||
# https://docs.github.com/en/enterprise-cloud@latest/code-security/supply-chain-security/understanding-your-software-supply-chain/using-the-dependency-submission-api
|
||||
permissions:
|
||||
contents: read
|
||||
# Write permissions for pull-requests are required for using the `comment-summary-in-pr` option, comment out if you aren't using this option
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
dependency-review:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'Checkout repository'
|
||||
uses: actions/checkout@v4
|
||||
- name: 'Dependency Review'
|
||||
uses: actions/dependency-review-action@v4
|
||||
# Commonly enabled options, see https://github.com/actions/dependency-review-action#configuration-options for all available options.
|
||||
with:
|
||||
comment-summary-in-pr: always
|
||||
# fail-on-severity: moderate
|
||||
# deny-licenses: GPL-1.0-or-later, LGPL-2.0-or-later
|
||||
# retry-on-snapshot-warnings: true
|
@ -0,0 +1,18 @@
|
||||
name: Docker Image CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
pull_request:
|
||||
branches: [ "master" ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file Dockerfile --tag my-image-name:$(date +%s)
|
@ -0,0 +1,50 @@
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
# This workflow integrates Python Static Analyzer (Pysa) with
|
||||
# GitHub's Code Scanning feature.
|
||||
#
|
||||
# Python Static Analyzer (Pysa) is a security-focused static
|
||||
# analysis tool that tracks flows of data from where they
|
||||
# originate to where they terminate in a dangerous location.
|
||||
#
|
||||
# See https://pyre-check.org/docs/pysa-basics/
|
||||
|
||||
name: Pysa
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
pull_request:
|
||||
branches: [ "master" ]
|
||||
schedule:
|
||||
- cron: '43 5 * * 3'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
pysa:
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Run Pysa
|
||||
uses: facebook/pysa-action@f46a63777e59268613bd6e2ff4e29f144ca9e88b
|
||||
with:
|
||||
# To customize these inputs:
|
||||
# See https://github.com/facebook/pysa-action#inputs
|
||||
repo-directory: './'
|
||||
requirements-path: 'requirements.txt'
|
||||
infer-types: true
|
||||
include-default-sapp-filters: true
|
@ -0,0 +1,34 @@
|
||||
name: Python Package using Conda
|
||||
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build-linux:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
max-parallel: 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
- name: Add conda to system path
|
||||
run: |
|
||||
# $CONDA is an environment variable pointing to the root of the miniconda directory
|
||||
echo $CONDA/bin >> $GITHUB_PATH
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
conda env update --file environment.yml --name base
|
||||
- name: Lint with flake8
|
||||
run: |
|
||||
conda install flake8
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
conda install pytest
|
||||
pytest
|
@ -0,0 +1,49 @@
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
# This workflow file requires a free account on Semgrep.dev to
|
||||
# manage rules, file ignores, notifications, and more.
|
||||
#
|
||||
# See https://semgrep.dev/docs
|
||||
|
||||
name: Semgrep
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ "master" ]
|
||||
schedule:
|
||||
- cron: '19 7 * * 3'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
semgrep:
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
|
||||
name: Scan
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# Checkout project source
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
# Scan code using project's configuration on https://semgrep.dev/manage
|
||||
- uses: returntocorp/semgrep-action@713efdd345f3035192eaa63f56867b88e63e4e5d
|
||||
with:
|
||||
publishToken: ${{ secrets.SEMGREP_APP_TOKEN }}
|
||||
publishDeployment: ${{ secrets.SEMGREP_DEPLOYMENT_ID }}
|
||||
generateSarif: "1"
|
||||
|
||||
# Upload SARIF file generated in previous step
|
||||
- name: Upload SARIF file
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: semgrep.sarif
|
||||
if: always()
|
@ -0,0 +1,48 @@
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
name: trivy
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ "master" ]
|
||||
schedule:
|
||||
- cron: '31 0 * * 5'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
security-events: write # for github/codeql-action/upload-sarif to upload SARIF results
|
||||
actions: read # only required for a private repository by github/codeql-action/upload-sarif to get the Action run status
|
||||
name: Build
|
||||
runs-on: "ubuntu-20.04"
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Build an image from Dockerfile
|
||||
run: |
|
||||
docker build -t docker.io/my-organization/my-app:${{ github.sha }} .
|
||||
|
||||
- name: Run Trivy vulnerability scanner
|
||||
uses: aquasecurity/trivy-action@18f2510ee396bbf400402947b394f2dd8c87dbb0
|
||||
with:
|
||||
image-ref: 'docker.io/my-organization/my-app:${{ github.sha }}'
|
||||
format: 'template'
|
||||
template: '@/contrib/sarif.tpl'
|
||||
output: 'trivy-results.sarif'
|
||||
severity: 'CRITICAL,HIGH'
|
||||
|
||||
- name: Upload Trivy scan results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
with:
|
||||
sarif_file: 'trivy-results.sarif'
|
@ -0,0 +1,6 @@
|
||||
fastapi
|
||||
uvicorn
|
||||
pydantic
|
||||
loguru
|
||||
python-dotenv
|
||||
swarms # Specify the version or source if it's not on PyPI
|
@ -0,0 +1,135 @@
|
||||
# OpenAI Assistant
|
||||
|
||||
The OpenAI Assistant class provides a wrapper around OpenAI's Assistants API, integrating it with the swarms framework.
|
||||
|
||||
## Overview
|
||||
|
||||
The `OpenAIAssistant` class allows you to create and interact with OpenAI Assistants, providing a simple interface for:
|
||||
|
||||
- Creating assistants with specific roles and capabilities
|
||||
- Adding custom functions that the assistant can call
|
||||
- Managing conversation threads
|
||||
- Handling tool calls and function execution
|
||||
- Getting responses from the assistant
|
||||
|
||||
## Insstallation
|
||||
|
||||
```bash
|
||||
pip install swarms
|
||||
```
|
||||
## Basic Usage
|
||||
|
||||
```python
|
||||
|
||||
from swarms import OpenAIAssistant
|
||||
|
||||
#Create an assistant
|
||||
assistant = OpenAIAssistant(
|
||||
name="Math Tutor",
|
||||
instructions="You are a helpful math tutor.",
|
||||
model="gpt-4o",
|
||||
tools=[{"type": "code_interpreter"}]
|
||||
)
|
||||
|
||||
#Run a Task
|
||||
response = assistant.run("Solve the equation: 3x + 11 = 14")
|
||||
print(response)
|
||||
|
||||
# Continue the conversation in the same thread
|
||||
follow_up = assistant.run("Now explain how you solved it")
|
||||
print(follow_up)
|
||||
```
|
||||
|
||||
## Function Calling
|
||||
|
||||
The assistant supports custom function integration:
|
||||
|
||||
```python
|
||||
|
||||
def get_weather(location: str, unit: str = "celsius") -> str:
|
||||
# Mock weather function
|
||||
return f"The weather in {location} is 22 degrees {unit}"
|
||||
|
||||
# Add function to assistant
|
||||
assistant.add_function(
|
||||
description="Get the current weather in a location",
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "City name"
|
||||
},
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"enum": ["celsius", "fahrenheit"],
|
||||
"default": "celsius"
|
||||
}
|
||||
},
|
||||
"required": ["location"]
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### Constructor
|
||||
|
||||
```python
|
||||
OpenAIAssistant(
|
||||
name: str,
|
||||
instructions: Optional[str] = None,
|
||||
model: str = "gpt-4o",
|
||||
tools: Optional[List[Dict[str, Any]]] = None,
|
||||
file_ids: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
functions: Optional[List[Dict[str, Any]]] = None,
|
||||
)
|
||||
```
|
||||
|
||||
### Methods
|
||||
|
||||
#### run(task: str) -> str
|
||||
Sends a task to the assistant and returns its response. The conversation thread is maintained between calls.
|
||||
|
||||
#### add_function(func: Callable, description: str, parameters: Dict[str, Any]) -> None
|
||||
Adds a callable function that the assistant can use during conversations.
|
||||
|
||||
#### add_message(content: str, file_ids: Optional[List[str]] = None) -> None
|
||||
Adds a message to the current conversation thread.
|
||||
|
||||
## Error Handling
|
||||
|
||||
The assistant implements robust error handling:
|
||||
- Retries on rate limits
|
||||
- Graceful handling of API errors
|
||||
- Clear error messages for debugging
|
||||
- Status monitoring for runs and completions
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. Thread Management
|
||||
- Use the same assistant instance for related conversations
|
||||
- Create new instances for unrelated tasks
|
||||
- Monitor thread status during long-running operations
|
||||
|
||||
2. Function Integration
|
||||
- Keep functions simple and focused
|
||||
- Provide clear descriptions and parameter schemas
|
||||
- Handle errors gracefully in custom functions
|
||||
- Test functions independently before integration
|
||||
|
||||
3. Performance
|
||||
- Reuse assistant instances when possible
|
||||
- Monitor and handle rate limits appropriately
|
||||
- Use appropriate polling intervals for status checks
|
||||
- Consider implementing timeouts for long-running operations
|
||||
|
||||
## References
|
||||
|
||||
- [OpenAI Assistants API Documentation](https://platform.openai.com/docs/assistants/overview)
|
||||
- [OpenAI Function Calling Guide](https://platform.openai.com/docs/guides/function-calling)
|
||||
- [OpenAI Rate Limits](https://platform.openai.com/docs/guides/rate-limits)
|
||||
|
||||
|
||||
|
@ -0,0 +1,264 @@
|
||||
from typing import Optional, List, Dict, Any, Callable
|
||||
import time
|
||||
from openai import OpenAI
|
||||
from swarms.structs.agent import Agent
|
||||
import json
|
||||
|
||||
class OpenAIAssistant(Agent):
|
||||
"""
|
||||
OpenAI Assistant wrapper for the swarms framework.
|
||||
Integrates OpenAI's Assistants API with the swarms architecture.
|
||||
|
||||
Example:
|
||||
>>> assistant = OpenAIAssistant(
|
||||
... name="Math Tutor",
|
||||
... instructions="You are a personal math tutor.",
|
||||
... model="gpt-4o",
|
||||
... tools=[{"type": "code_interpreter"}]
|
||||
... )
|
||||
>>> response = assistant.run("Solve 3x + 11 = 14")
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
instructions: Optional[str] = None,
|
||||
model: str = "gpt-4o",
|
||||
tools: Optional[List[Dict[str, Any]]] = None,
|
||||
file_ids: Optional[List[str]] = None,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
functions: Optional[List[Dict[str, Any]]] = None,
|
||||
*args,
|
||||
**kwargs
|
||||
):
|
||||
"""Initialize the OpenAI Assistant.
|
||||
|
||||
Args:
|
||||
name: Name of the assistant
|
||||
instructions: System instructions for the assistant
|
||||
model: Model to use (default: gpt-4-turbo-preview)
|
||||
tools: List of tools to enable (code_interpreter, retrieval)
|
||||
file_ids: List of file IDs to attach
|
||||
metadata: Additional metadata
|
||||
functions: List of custom functions to make available
|
||||
"""
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
# Initialize tools list with any provided functions
|
||||
self.tools = tools or []
|
||||
if functions:
|
||||
for func in functions:
|
||||
self.tools.append({
|
||||
"type": "function",
|
||||
"function": func
|
||||
})
|
||||
|
||||
# Create the OpenAI Assistant
|
||||
self.client = OpenAI()
|
||||
self.assistant = self.client.beta.assistants.create(
|
||||
name=name,
|
||||
instructions=instructions,
|
||||
model=model,
|
||||
tools=self.tools,
|
||||
file_ids=file_ids or [],
|
||||
metadata=metadata or {}
|
||||
)
|
||||
|
||||
# Store available functions
|
||||
self.available_functions: Dict[str, Callable] = {}
|
||||
|
||||
def add_function(self, func: Callable, description: str, parameters: Dict[str, Any]) -> None:
|
||||
"""Add a function that the assistant can call.
|
||||
|
||||
Args:
|
||||
func: The function to make available to the assistant
|
||||
description: Description of what the function does
|
||||
parameters: JSON schema describing the function parameters
|
||||
"""
|
||||
func_dict = {
|
||||
"name": func.__name__,
|
||||
"description": description,
|
||||
"parameters": parameters
|
||||
}
|
||||
|
||||
# Add to tools list
|
||||
self.tools.append({
|
||||
"type": "function",
|
||||
"function": func_dict
|
||||
})
|
||||
|
||||
# Store function reference
|
||||
self.available_functions[func.__name__] = func
|
||||
|
||||
# Update assistant with new tools
|
||||
self.assistant = self.client.beta.assistants.update(
|
||||
assistant_id=self.assistant.id,
|
||||
tools=self.tools
|
||||
)
|
||||
|
||||
def _handle_tool_calls(self, run, thread_id: str) -> None:
|
||||
"""Handle any required tool calls during a run.
|
||||
|
||||
This method processes any tool calls required by the assistant during execution.
|
||||
It extracts function calls, executes them with provided arguments, and submits
|
||||
the results back to the assistant.
|
||||
|
||||
Args:
|
||||
run: The current run object from the OpenAI API
|
||||
thread_id: ID of the current conversation thread
|
||||
|
||||
Returns:
|
||||
Updated run object after processing tool calls
|
||||
|
||||
Raises:
|
||||
Exception: If there are errors executing the tool calls
|
||||
"""
|
||||
while run.status == "requires_action":
|
||||
tool_calls = run.required_action.submit_tool_outputs.tool_calls
|
||||
tool_outputs = []
|
||||
|
||||
for tool_call in tool_calls:
|
||||
if tool_call.type == "function":
|
||||
# Get function details
|
||||
function_name = tool_call.function.name
|
||||
function_args = json.loads(tool_call.function.arguments)
|
||||
|
||||
# Call function if available
|
||||
if function_name in self.available_functions:
|
||||
function_response = self.available_functions[function_name](**function_args)
|
||||
tool_outputs.append({
|
||||
"tool_call_id": tool_call.id,
|
||||
"output": str(function_response)
|
||||
})
|
||||
|
||||
# Submit outputs back to the run
|
||||
run = self.client.beta.threads.runs.submit_tool_outputs(
|
||||
thread_id=thread_id,
|
||||
run_id=run.id,
|
||||
tool_outputs=tool_outputs
|
||||
)
|
||||
|
||||
# Wait for processing
|
||||
run = self._wait_for_run(run)
|
||||
|
||||
return run
|
||||
|
||||
def _wait_for_run(self, run) -> Any:
|
||||
"""Wait for a run to complete and handle any required actions.
|
||||
|
||||
This method polls the OpenAI API to check the status of a run until it completes
|
||||
or fails. It handles intermediate states like required actions and implements
|
||||
exponential backoff.
|
||||
|
||||
Args:
|
||||
run: The run object to monitor
|
||||
|
||||
Returns:
|
||||
The completed run object
|
||||
|
||||
Raises:
|
||||
Exception: If the run fails or expires
|
||||
"""
|
||||
while True:
|
||||
run = self.client.beta.threads.runs.retrieve(
|
||||
thread_id=run.thread_id,
|
||||
run_id=run.id
|
||||
)
|
||||
|
||||
if run.status == "completed":
|
||||
break
|
||||
elif run.status == "requires_action":
|
||||
run = self._handle_tool_calls(run, run.thread_id)
|
||||
if run.status == "completed":
|
||||
break
|
||||
elif run.status in ["failed", "expired"]:
|
||||
raise Exception(f"Run failed with status: {run.status}")
|
||||
|
||||
time.sleep(3) # Wait 3 seconds before checking again
|
||||
|
||||
return run
|
||||
|
||||
def _ensure_thread(self):
|
||||
"""Ensure a thread exists for the conversation.
|
||||
|
||||
This method checks if there is an active thread for the current conversation.
|
||||
If no thread exists, it creates a new one. This maintains conversation context
|
||||
across multiple interactions.
|
||||
|
||||
Side Effects:
|
||||
Sets self.thread if it doesn't exist
|
||||
"""
|
||||
if not self.thread:
|
||||
self.thread = self.client.beta.threads.create()
|
||||
|
||||
def add_message(self, content: str, file_ids: Optional[List[str]] = None) -> None:
|
||||
"""Add a message to the thread.
|
||||
|
||||
This method adds a new user message to the conversation thread. It ensures
|
||||
a thread exists before adding the message and handles file attachments.
|
||||
|
||||
Args:
|
||||
content: The text content of the message to add
|
||||
file_ids: Optional list of file IDs to attach to the message. These must be
|
||||
files that have been previously uploaded to OpenAI.
|
||||
|
||||
Side Effects:
|
||||
Creates a new thread if none exists
|
||||
Adds the message to the thread in OpenAI's system
|
||||
"""
|
||||
self._ensure_thread()
|
||||
self.client.beta.threads.messages.create(
|
||||
thread_id=self.thread.id,
|
||||
role="user",
|
||||
content=content,
|
||||
file_ids=file_ids or []
|
||||
)
|
||||
|
||||
def _get_response(self) -> str:
|
||||
"""Get the latest assistant response from the thread."""
|
||||
messages = self.client.beta.threads.messages.list(
|
||||
thread_id=self.thread.id,
|
||||
order="desc",
|
||||
limit=1
|
||||
)
|
||||
|
||||
if not messages.data:
|
||||
return ""
|
||||
|
||||
message = messages.data[0]
|
||||
if message.role == "assistant":
|
||||
return message.content[0].text.value
|
||||
return ""
|
||||
|
||||
def run(self, task: str, *args, **kwargs) -> str:
|
||||
"""Run a task using the OpenAI Assistant.
|
||||
|
||||
Args:
|
||||
task: The task or prompt to send to the assistant
|
||||
|
||||
Returns:
|
||||
The assistant's response as a string
|
||||
"""
|
||||
self._ensure_thread()
|
||||
|
||||
# Add the user message
|
||||
self.add_message(task)
|
||||
|
||||
# Create and run the assistant
|
||||
run = self.client.beta.threads.runs.create(
|
||||
thread_id=self.thread.id,
|
||||
assistant_id=self.assistant.id,
|
||||
instructions=self.instructions
|
||||
)
|
||||
|
||||
# Wait for completion
|
||||
run = self._wait_for_run(run)
|
||||
|
||||
# Only get and return the response if run completed successfully
|
||||
if run.status == "completed":
|
||||
return self._get_response()
|
||||
return ""
|
||||
|
||||
def call(self, task: str, *args, **kwargs) -> str:
|
||||
"""Alias for run() to maintain compatibility with different agent interfaces."""
|
||||
return self.run(task, *args, **kwargs)
|
@ -0,0 +1,62 @@
|
||||
import asyncio
|
||||
from typing import Any, Callable, List, Optional
|
||||
from swarms.structs.base_workflow import BaseWorkflow
|
||||
from swarms.structs.agent import Agent
|
||||
from swarms.utils.loguru_logger import logger
|
||||
|
||||
class AsyncWorkflow(BaseWorkflow):
|
||||
def __init__(
|
||||
self,
|
||||
name: str = "AsyncWorkflow",
|
||||
agents: List[Agent] = None,
|
||||
max_workers: int = 5,
|
||||
dashboard: bool = False,
|
||||
autosave: bool = False,
|
||||
verbose: bool = False,
|
||||
**kwargs
|
||||
):
|
||||
super().__init__(agents=agents, **kwargs)
|
||||
self.name = name
|
||||
self.agents = agents or []
|
||||
self.max_workers = max_workers
|
||||
self.dashboard = dashboard
|
||||
self.autosave = autosave
|
||||
self.verbose = verbose
|
||||
self.task_pool = []
|
||||
self.results = []
|
||||
self.loop = None
|
||||
|
||||
async def _execute_agent_task(self, agent: Agent, task: str) -> Any:
|
||||
"""Execute a single agent task asynchronously"""
|
||||
try:
|
||||
if self.verbose:
|
||||
logger.info(f"Agent {agent.agent_name} processing task: {task}")
|
||||
result = await agent.arun(task)
|
||||
if self.verbose:
|
||||
logger.info(f"Agent {agent.agent_name} completed task")
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f"Error in agent {agent.agent_name}: {str(e)}")
|
||||
return str(e)
|
||||
|
||||
async def run(self, task: str) -> List[Any]:
|
||||
"""Run the workflow with all agents processing the task concurrently"""
|
||||
if not self.agents:
|
||||
raise ValueError("No agents provided to the workflow")
|
||||
|
||||
try:
|
||||
# Create tasks for all agents
|
||||
tasks = [self._execute_agent_task(agent, task) for agent in self.agents]
|
||||
|
||||
# Execute all tasks concurrently
|
||||
self.results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
if self.autosave:
|
||||
# TODO: Implement autosave logic here
|
||||
pass
|
||||
|
||||
return self.results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in workflow execution: {str(e)}")
|
||||
raise
|
Loading…
Reference in new issue