Merge branch 'master' of https://github.com/kyegomez/swarms into memory
commit
6cf5cdf39f
@ -1,2 +1,2 @@
|
||||
[flake8]
|
||||
extend-ignore = E501, W292, W291
|
||||
extend-ignore = E501, W292, W291, W293
|
||||
|
@ -1,13 +1,14 @@
|
||||
---
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: [kyegomez]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: # Replace with a single Open Collective username
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||
custom: #Nothing
|
||||
# patreon: # Replace with a single Patreon username
|
||||
# open_collective: # Replace with a single Open Collective username
|
||||
# ko_fi: # Replace with a single Ko-fi username
|
||||
# tidelift: # Replace with a single Tidelift platform-name/package-name
|
||||
# community_bridge: # Replace with a single Community Bridge project-name
|
||||
# liberapay: # Replace with a single Liberapay username
|
||||
# issuehunt: # Replace with a single IssueHunt username
|
||||
# otechie: # Replace with a single Otechie username
|
||||
# lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name
|
||||
# custom: #Nothing
|
||||
|
@ -1,12 +1,14 @@
|
||||
---
|
||||
# this is a config file for the github action labeler
|
||||
|
||||
# Add 'label1' to any changes within 'example' folder or any subfolders
|
||||
example_change:
|
||||
- example/**
|
||||
- example/**
|
||||
|
||||
# Add 'label2' to any file changes within 'example2' folder
|
||||
example2_change: example2/*
|
||||
|
||||
# Add label3 to any change to .txt files within the entire repository. Quotation marks are required for the leading asterisk
|
||||
# Add label3 to any change to .txt files within the entire repository.
|
||||
# Quotation marks are required for the leading asterisk
|
||||
text_files:
|
||||
- '**/*.txt'
|
||||
- '**/*.txt'
|
||||
|
@ -1,30 +0,0 @@
|
||||
name: Linting and Formatting
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
lint_and_format:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.x
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install -r requirements.txt
|
||||
|
||||
- name: Find Python files
|
||||
run: find swarms -name "*.py" -type f -exec autopep8 --in-place --aggressive --aggressive {} +
|
||||
|
||||
- name: Push changes
|
||||
uses: ad-m/github-push-action@master
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
@ -1,42 +0,0 @@
|
||||
name: Continuous Integration
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.x
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install -r requirements.txt
|
||||
|
||||
- name: Run unit tests
|
||||
run: pytest tests/unit
|
||||
|
||||
- name: Run integration tests
|
||||
run: pytest tests/integration
|
||||
|
||||
- name: Run code coverage
|
||||
run: pytest --cov=swarms tests/
|
||||
|
||||
- name: Run linters
|
||||
run: pylint swarms
|
||||
|
||||
- name: Build documentation
|
||||
run: make docs
|
||||
|
||||
- name: Validate documentation
|
||||
run: sphinx-build -b linkcheck docs build/docs
|
||||
|
||||
- name: Run performance tests
|
||||
run: find ./tests -name '*.py' -exec pytest {} \;
|
@ -1,18 +1,19 @@
|
||||
---
|
||||
name: Docker Image CI
|
||||
|
||||
on:
|
||||
on: # yamllint disable-line rule:truthy
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
branches: ["master"]
|
||||
pull_request:
|
||||
branches: [ "master" ]
|
||||
branches: ["master"]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
name: Build Docker image
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file Dockerfile --tag my-image-name:$(date +%s)
|
||||
- uses: actions/checkout@v4
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file Dockerfile --tag my-image-name:$(date +%s)
|
||||
|
@ -1,28 +0,0 @@
|
||||
name: Documentation Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.x
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install -r requirements.txt
|
||||
|
||||
- name: Build documentation
|
||||
run: make docs
|
||||
|
||||
- name: Validate documentation
|
||||
run: sphinx-build -b linkcheck docs build/docs
|
@ -1,66 +0,0 @@
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
# This workflow lets you generate SLSA provenance file for your project.
|
||||
# The generation satisfies level 3 for the provenance requirements - see https://slsa.dev/spec/v0.1/requirements
|
||||
# The project is an initiative of the OpenSSF (openssf.org) and is developed at
|
||||
# https://github.com/slsa-framework/slsa-github-generator.
|
||||
# The provenance file can be verified using https://github.com/slsa-framework/slsa-verifier.
|
||||
# For more information about SLSA and how it improves the supply-chain, visit slsa.dev.
|
||||
|
||||
name: SLSA generic generator
|
||||
on:
|
||||
workflow_dispatch:
|
||||
release:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
digests: ${{ steps.hash.outputs.digests }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
# ========================================================
|
||||
#
|
||||
# Step 1: Build your artifacts.
|
||||
#
|
||||
# ========================================================
|
||||
- name: Build artifacts
|
||||
run: |
|
||||
# These are some amazing artifacts.
|
||||
echo "artifact1" > artifact1
|
||||
echo "artifact2" > artifact2
|
||||
|
||||
# ========================================================
|
||||
#
|
||||
# Step 2: Add a step to generate the provenance subjects
|
||||
# as shown below. Update the sha256 sum arguments
|
||||
# to include all binaries that you generate
|
||||
# provenance for.
|
||||
#
|
||||
# ========================================================
|
||||
- name: Generate subject for provenance
|
||||
id: hash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# List the artifacts the provenance will refer to.
|
||||
files=$(ls artifact*)
|
||||
# Generate the subjects (base64 encoded).
|
||||
echo "hashes=$(sha256sum $files | base64 -w0)" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
provenance:
|
||||
needs: [build]
|
||||
permissions:
|
||||
actions: read # To read the workflow path.
|
||||
id-token: write # To sign the provenance.
|
||||
contents: write # To add assets to a release.
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.4.0
|
||||
with:
|
||||
base64-subjects: "${{ needs.build.outputs.digests }}"
|
||||
upload-assets: true # Optional: Upload to a new release
|
@ -1,19 +1,29 @@
|
||||
---
|
||||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: Lint
|
||||
|
||||
on: [push, pull_request]
|
||||
on: [push, pull_request] # yamllint disable-line rule:truthy
|
||||
|
||||
jobs:
|
||||
yaml-lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out source repository
|
||||
uses: actions/checkout@v4
|
||||
- name: yaml Lint
|
||||
uses: ibiqlik/action-yamllint@v3
|
||||
flake8-lint:
|
||||
runs-on: ubuntu-latest
|
||||
name: Lint
|
||||
name: flake8 Lint
|
||||
steps:
|
||||
- name: Check out source repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Python environment
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: flake8 Lint
|
||||
uses: py-actions/flake8@v2
|
||||
uses: py-actions/flake8@v2
|
||||
ruff-lint:
|
||||
runs-on: ubuntu-latest
|
||||
name: ruff Lint
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: chartboost/ruff-action@v1
|
||||
|
@ -1,25 +0,0 @@
|
||||
name: Linting
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.x
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install -r requirements.txt
|
||||
|
||||
- name: Run linters
|
||||
run: pylint swarms
|
@ -1,27 +0,0 @@
|
||||
name: Makefile CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
pull_request:
|
||||
branches: [ "master" ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: configure
|
||||
run: ./configure
|
||||
|
||||
- name: Install dependencies
|
||||
run: make
|
||||
|
||||
- name: Run check
|
||||
run: make check
|
||||
|
||||
- name: Run distcheck
|
||||
run: make distcheck
|
@ -1,23 +0,0 @@
|
||||
name: Pylint
|
||||
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pylint
|
||||
- name: Analysing the code with pylint
|
||||
run: |
|
||||
pylint $(git ls-files '*.py')
|
@ -1,39 +0,0 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a single version of Python
|
||||
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
|
||||
|
||||
name: Python application
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
pull_request:
|
||||
branches: [ "master" ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install flake8 pytest
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
- name: Lint with flake8
|
||||
run: |
|
||||
# stop the build if there are Python syntax errors or undefined names
|
||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest
|
@ -1,23 +0,0 @@
|
||||
name: Quality
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
pull_request:
|
||||
branches: [ "master" ]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
steps:
|
||||
- name: Checkout actions
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Init environment
|
||||
uses: ./.github/actions/init-environment
|
||||
- name: Run linter
|
||||
run: |
|
||||
pylint `git diff --name-only --diff-filter=d origin/master HEAD | grep -E '\.py$' | tr '\n' ' '`
|
@ -1,8 +0,0 @@
|
||||
name: Ruff
|
||||
on: [ push, pull_request ]
|
||||
jobs:
|
||||
ruff:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: chartboost/ruff-action@v1
|
@ -1,23 +0,0 @@
|
||||
name: Python application test
|
||||
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.8
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.8
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pytest
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
- name: Run tests with pytest
|
||||
run: |
|
||||
find tests/ -name "*.py" | xargs pytest
|
@ -0,0 +1,4 @@
|
||||
rules:
|
||||
line-length:
|
||||
level: warning
|
||||
allow-non-breakable-inline-mappings: true
|
@ -1,251 +1,201 @@
|
||||
# `GPT4Vision` Documentation
|
||||
# `GPT4VisionAPI` Documentation
|
||||
|
||||
## Table of Contents
|
||||
- [Overview](#overview)
|
||||
**Table of Contents**
|
||||
- [Introduction](#introduction)
|
||||
- [Installation](#installation)
|
||||
- [Initialization](#initialization)
|
||||
- [Methods](#methods)
|
||||
- [process_img](#process_img)
|
||||
- [__call__](#__call__)
|
||||
- [run](#run)
|
||||
- [arun](#arun)
|
||||
- [Configuration Options](#configuration-options)
|
||||
- [Usage Examples](#usage-examples)
|
||||
- [Additional Tips](#additional-tips)
|
||||
- [References and Resources](#references-and-resources)
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
The GPT4Vision Model API is designed to provide an easy-to-use interface for interacting with the OpenAI GPT-4 Vision model. This model can generate textual descriptions for images and answer questions related to visual content. Whether you want to describe images or perform other vision-related tasks, GPT4Vision makes it simple and efficient.
|
||||
|
||||
The library offers a straightforward way to send images and tasks to the GPT-4 Vision model and retrieve the generated responses. It handles API communication, authentication, and retries, making it a powerful tool for developers working with computer vision and natural language processing tasks.
|
||||
- [Module Overview](#module-overview)
|
||||
- [Class: GPT4VisionAPI](#class-gpt4visionapi)
|
||||
- [Initialization](#initialization)
|
||||
- [Methods](#methods)
|
||||
- [encode_image](#encode_image)
|
||||
- [run](#run)
|
||||
- [__call__](#__call__)
|
||||
- [Examples](#examples)
|
||||
- [Example 1: Basic Usage](#example-1-basic-usage)
|
||||
- [Example 2: Custom API Key](#example-2-custom-api-key)
|
||||
- [Example 3: Adjusting Maximum Tokens](#example-3-adjusting-maximum-tokens)
|
||||
- [Additional Information](#additional-information)
|
||||
- [References](#references)
|
||||
|
||||
## Introduction<a name="introduction"></a>
|
||||
|
||||
Welcome to the documentation for the `GPT4VisionAPI` module! This module is a powerful wrapper for the OpenAI GPT-4 Vision model. It allows you to interact with the model to generate descriptions or answers related to images. This documentation will provide you with comprehensive information on how to use this module effectively.
|
||||
|
||||
## Installation<a name="installation"></a>
|
||||
|
||||
Before you start using the `GPT4VisionAPI` module, make sure you have the required dependencies installed. You can install them using the following commands:
|
||||
|
||||
```bash
|
||||
pip3 install --upgrade swarms
|
||||
```
|
||||
|
||||
## Installation
|
||||
## Module Overview<a name="module-overview"></a>
|
||||
|
||||
To use the GPT4Vision Model API, you need to install the required dependencies and configure your environment. Follow these steps to get started:
|
||||
The `GPT4VisionAPI` module serves as a bridge between your application and the OpenAI GPT-4 Vision model. It allows you to send requests to the model and retrieve responses related to images. Here are some key features and functionality provided by this module:
|
||||
|
||||
1. Install the required Python package:
|
||||
- Encoding images to base64 format.
|
||||
- Running the GPT-4 Vision model with specified tasks and images.
|
||||
- Customization options such as setting the OpenAI API key and maximum token limit.
|
||||
|
||||
```bash
|
||||
pip3 install --upgrade swarms
|
||||
```
|
||||
## Class: GPT4VisionAPI<a name="class-gpt4visionapi"></a>
|
||||
|
||||
2. Make sure you have an OpenAI API key. You can obtain one by signing up on the [OpenAI platform](https://beta.openai.com/signup/).
|
||||
The `GPT4VisionAPI` class is the core component of this module. It encapsulates the functionality required to interact with the GPT-4 Vision model. Below, we'll dive into the class in detail.
|
||||
|
||||
3. Set your OpenAI API key as an environment variable. You can do this in your code or your environment configuration. Alternatively, you can provide the API key directly when initializing the `GPT4Vision` class.
|
||||
### Initialization<a name="initialization"></a>
|
||||
|
||||
## Initialization
|
||||
When initializing the `GPT4VisionAPI` class, you have the option to provide the OpenAI API key and set the maximum token limit. Here are the parameters and their descriptions:
|
||||
|
||||
To start using the GPT4Vision Model API, you need to create an instance of the `GPT4Vision` class. You can customize its behavior by providing various configuration options, but it also comes with sensible defaults.
|
||||
| Parameter | Type | Default Value | Description |
|
||||
|---------------------|----------|-------------------------------|----------------------------------------------------------------------------------------------------------|
|
||||
| openai_api_key | str | `OPENAI_API_KEY` environment variable (if available) | The OpenAI API key. If not provided, it defaults to the `OPENAI_API_KEY` environment variable. |
|
||||
| max_tokens | int | 300 | The maximum number of tokens to generate in the model's response. |
|
||||
|
||||
Here's how you can initialize the `GPT4Vision` class:
|
||||
Here's how you can initialize the `GPT4VisionAPI` class:
|
||||
|
||||
```python
|
||||
from swarms.models.gpt4v import GPT4Vision
|
||||
|
||||
gpt4vision = GPT4Vision(
|
||||
api_key="Your Key"
|
||||
)
|
||||
```
|
||||
|
||||
The above code initializes the `GPT4Vision` class with default settings. You can adjust these settings as needed.
|
||||
|
||||
## Methods
|
||||
|
||||
### `process_img`
|
||||
from swarms.models import GPT4VisionAPI
|
||||
|
||||
The `process_img` method is used to preprocess an image before sending it to the GPT-4 Vision model. It takes the image path as input and returns the processed image in a format suitable for API requests.
|
||||
# Initialize with default API key and max_tokens
|
||||
api = GPT4VisionAPI()
|
||||
|
||||
```python
|
||||
processed_img = gpt4vision.process_img(img_path)
|
||||
# Initialize with custom API key and max_tokens
|
||||
custom_api_key = "your_custom_api_key"
|
||||
api = GPT4VisionAPI(openai_api_key=custom_api_key, max_tokens=500)
|
||||
```
|
||||
|
||||
- `img_path` (str): The file path or URL of the image to be processed.
|
||||
### Methods<a name="methods"></a>
|
||||
|
||||
### `__call__`
|
||||
#### encode_image<a name="encode_image"></a>
|
||||
|
||||
The `__call__` method is the main method for interacting with the GPT-4 Vision model. It sends the image and tasks to the model and returns the generated response.
|
||||
This method allows you to encode an image from a URL to base64 format. It's a utility function used internally by the module.
|
||||
|
||||
```python
|
||||
response = gpt4vision(img, tasks)
|
||||
```
|
||||
|
||||
- `img` (Union[str, List[str]]): Either a single image URL or a list of image URLs to be used for the API request.
|
||||
- `tasks` (List[str]): A list of tasks or questions related to the image(s).
|
||||
|
||||
This method returns a `GPT4VisionResponse` object, which contains the generated answer.
|
||||
|
||||
### `run`
|
||||
def encode_image(img: str) -> str:
|
||||
"""
|
||||
Encode image to base64.
|
||||
|
||||
The `run` method is an alternative way to interact with the GPT-4 Vision model. It takes a single task and image URL as input and returns the generated response.
|
||||
Parameters:
|
||||
- img (str): URL of the image to encode.
|
||||
|
||||
```python
|
||||
response = gpt4vision.run(task, img)
|
||||
Returns:
|
||||
str: Base64 encoded image.
|
||||
"""
|
||||
```
|
||||
|
||||
- `task` (str): The task or question related to the image.
|
||||
- `img` (str): The image URL to be used for the API request.
|
||||
|
||||
This method simplifies interactions when dealing with a single task and image.
|
||||
|
||||
### `arun`
|
||||
#### run<a name="run"></a>
|
||||
|
||||
The `arun` method is an asynchronous version of the `run` method. It allows for asynchronous processing of API requests, which can be useful in certain scenarios.
|
||||
The `run` method is the primary way to interact with the GPT-4 Vision model. It sends a request to the model with a task and an image URL, and it returns the model's response.
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
def run(task: str, img: str) -> str:
|
||||
"""
|
||||
Run the GPT-4 Vision model.
|
||||
|
||||
async def main():
|
||||
response = await gpt4vision.arun(task, img)
|
||||
print(response)
|
||||
Parameters:
|
||||
- task (str): The task or question related to the image.
|
||||
- img (str): URL of the image to analyze.
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
loop.run_until_complete(main())
|
||||
Returns:
|
||||
str: The model's response.
|
||||
"""
|
||||
```
|
||||
|
||||
- `task` (str): The task or question related to the image.
|
||||
- `img` (str): The image URL to be used for the API request.
|
||||
|
||||
## Configuration Options
|
||||
|
||||
The `GPT4Vision` class provides several configuration options that allow you to customize its behavior:
|
||||
#### __call__<a name="__call__"></a>
|
||||
|
||||
- `max_retries` (int): The maximum number of retries to make to the API. Default: 3
|
||||
- `backoff_factor` (float): The backoff factor to use for exponential backoff. Default: 2.0
|
||||
- `timeout_seconds` (int): The timeout in seconds for the API request. Default: 10
|
||||
- `api_key` (str): The API key to use for the API request. Default: None (set via environment variable)
|
||||
- `quality` (str): The quality of the image to generate. Options: 'low' or 'high'. Default: 'low'
|
||||
- `max_tokens` (int): The maximum number of tokens to use for the API request. Default: 200
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Example 1: Generating Image Descriptions
|
||||
|
||||
```python
|
||||
gpt4vision = GPT4Vision()
|
||||
img = "https://example.com/image.jpg"
|
||||
tasks = ["Describe this image."]
|
||||
response = gpt4vision(img, tasks)
|
||||
print(response.answer)
|
||||
```
|
||||
|
||||
In this example, we create an instance of `GPT4Vision`, provide an image URL, and ask the model to describe the image. The response contains the generated description.
|
||||
|
||||
### Example 2: Custom Configuration
|
||||
The `__call__` method is a convenient way to run the GPT-4 Vision model. It has the same functionality as the `run` method.
|
||||
|
||||
```python
|
||||
custom_config = {
|
||||
"max_retries": 5,
|
||||
"timeout_seconds": 20,
|
||||
"quality": "high",
|
||||
"max_tokens": 300,
|
||||
}
|
||||
gpt4vision = GPT4Vision(**custom_config)
|
||||
img = "https://example.com/another_image.jpg"
|
||||
tasks = ["What objects can you identify in this image?"]
|
||||
response = gpt4vision(img, tasks)
|
||||
print(response.answer)
|
||||
```
|
||||
def __call__(task: str, img: str) -> str:
|
||||
"""
|
||||
Run the GPT-4 Vision model (callable).
|
||||
|
||||
In this example, we create an instance of `GPT4Vision` with custom configuration options. We set a higher timeout, request high-quality images, and allow more tokens in the response.
|
||||
Parameters:
|
||||
- task (str): The task or question related to the image.
|
||||
- img
|
||||
|
||||
### Example 3: Using the `run` Method
|
||||
(str): URL of the image to analyze.
|
||||
|
||||
```python
|
||||
gpt4vision = GPT4Vision()
|
||||
img = "https://example.com/image.jpg"
|
||||
task = "Describe this image in detail."
|
||||
response = gpt4vision.run(task, img)
|
||||
print(response)
|
||||
Returns:
|
||||
str: The model's response.
|
||||
"""
|
||||
```
|
||||
|
||||
In this example, we use the `run` method to simplify the interaction by providing a single task and image URL.
|
||||
## Examples<a name="examples"></a>
|
||||
|
||||
# Model Usage and Image Understanding
|
||||
Let's explore some usage examples of the `GPT4VisionAPI` module to better understand how to use it effectively.
|
||||
|
||||
The GPT-4 Vision model processes images in a unique way, allowing it to answer questions about both or each of the images independently. Here's an overview:
|
||||
### Example 1: Basic Usage<a name="example-1-basic-usage"></a>
|
||||
|
||||
| Purpose | Description |
|
||||
| --------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
|
||||
| Image Understanding | The model is shown two copies of the same image and can answer questions about both or each of the images independently. |
|
||||
In this example, we'll use the module with the default API key and maximum tokens to analyze an image.
|
||||
|
||||
# Image Detail Control
|
||||
```python
|
||||
from swarms.models import GPT4VisionAPI
|
||||
|
||||
You have control over how the model processes the image and generates textual understanding by using the `detail` parameter, which has two options: `low` and `high`.
|
||||
# Initialize with default API key and max_tokens
|
||||
api = GPT4VisionAPI()
|
||||
|
||||
| Detail | Description |
|
||||
| -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| low | Disables the "high-res" model. The model receives a low-res 512 x 512 version of the image and represents the image with a budget of 65 tokens. Ideal for use cases not requiring high detail. |
|
||||
| high | Enables "high-res" mode. The model first sees the low-res image and then creates detailed crops of input images as 512px squares based on the input image size. Uses a total of 129 tokens. |
|
||||
# Define the task and image URL
|
||||
task = "What is the color of the object?"
|
||||
img = "https://i.imgur.com/2M2ZGwC.jpeg"
|
||||
|
||||
# Managing Images
|
||||
# Run the GPT-4 Vision model
|
||||
response = api.run(task, img)
|
||||
|
||||
To use the Chat Completions API effectively, you must manage the images you pass to the model. Here are some key considerations:
|
||||
# Print the model's response
|
||||
print(response)
|
||||
```
|
||||
|
||||
| Management Aspect | Description |
|
||||
| ------------------------- | ------------------------------------------------------------------------------------------------- |
|
||||
| Image Reuse | To pass the same image multiple times, include the image with each API request. |
|
||||
| Image Size Optimization | Improve latency by downsizing images to meet the expected size requirements. |
|
||||
| Image Deletion | After processing, images are deleted from OpenAI servers and not retained. No data is used for training. |
|
||||
### Example 2: Custom API Key<a name="example-2-custom-api-key"></a>
|
||||
|
||||
# Limitations
|
||||
If you have a custom API key, you can initialize the module with it as shown in this example.
|
||||
|
||||
While GPT-4 with Vision is powerful, it has some limitations:
|
||||
```python
|
||||
from swarms.models import GPT4VisionAPI
|
||||
|
||||
| Limitation | Description |
|
||||
| -------------------------------------------- | --------------------------------------------------------------------------------------------------- |
|
||||
| Medical Images | Not suitable for interpreting specialized medical images like CT scans. |
|
||||
| Non-English Text | May not perform optimally when handling non-Latin alphabets, such as Japanese or Korean. |
|
||||
| Large Text in Images | Enlarge text within images for readability, but avoid cropping important details. |
|
||||
| Rotated or Upside-Down Text/Images | May misinterpret rotated or upside-down text or images. |
|
||||
| Complex Visual Elements | May struggle to understand complex graphs or text with varying colors or styles. |
|
||||
| Spatial Reasoning | Struggles with tasks requiring precise spatial localization, such as identifying chess positions. |
|
||||
| Accuracy | May generate incorrect descriptions or captions in certain scenarios. |
|
||||
| Panoramic and Fisheye Images | Struggles with panoramic and fisheye images. |
|
||||
# Initialize with custom API key and max_tokens
|
||||
custom_api_key = "your_custom_api_key"
|
||||
api = GPT4VisionAPI(openai_api_key=custom_api_key, max_tokens=500)
|
||||
|
||||
# Calculating Costs
|
||||
# Define the task and image URL
|
||||
task = "What is the object in the image?"
|
||||
img = "https://i.imgur.com/3T3ZHwD.jpeg"
|
||||
|
||||
Image inputs are metered and charged in tokens. The token cost depends on the image size and detail option.
|
||||
# Run the GPT-4 Vision model
|
||||
response = api.run(task, img)
|
||||
|
||||
| Example | Token Cost |
|
||||
| --------------------------------------------- | ----------- |
|
||||
| 1024 x 1024 square image in detail: high mode | 765 tokens |
|
||||
| 2048 x 4096 image in detail: high mode | 1105 tokens |
|
||||
| 4096 x 8192 image in detail: low mode | 85 tokens |
|
||||
# Print the model's response
|
||||
print(response)
|
||||
```
|
||||
|
||||
# FAQ
|
||||
### Example 3: Adjusting Maximum Tokens<a name="example-3-adjusting-maximum-tokens"></a>
|
||||
|
||||
Here are some frequently asked questions about GPT-4 with Vision:
|
||||
You can also customize the maximum token limit when initializing the module. In this example, we set it to 1000 tokens.
|
||||
|
||||
| Question | Answer |
|
||||
| -------------------------------------------- | -------------------------------------------------------------------------------------------------- |
|
||||
| Fine-Tuning Image Capabilities | No, fine-tuning the image capabilities of GPT-4 is not supported at this time. |
|
||||
| Generating Images | GPT-4 is used for understanding images, not generating them. |
|
||||
| Supported Image File Types | Supported image file types include PNG (.png), JPEG (.jpeg and .jpg), WEBP (.webp), and non-animated GIF (.gif). |
|
||||
| Image Size Limitations | Image uploads are restricted to 20MB per image. |
|
||||
| Image Deletion | Uploaded images are automatically deleted after processing by the model. |
|
||||
| Learning More | For more details about GPT-4 with Vision, refer to the GPT-4 with Vision system card. |
|
||||
| CAPTCHA Submission | CAPTCHAs are blocked for safety reasons. |
|
||||
| Rate Limits | Image processing counts toward your tokens per minute (TPM) limit. Refer to the calculating costs section for details. |
|
||||
| Image Metadata | The model does not receive image metadata. |
|
||||
| Handling Unclear Images | If an image is unclear, the model will do its best to interpret it, but results may be less accurate. |
|
||||
```python
|
||||
from swarms.models import GPT4VisionAPI
|
||||
|
||||
# Initialize with default API key and custom max_tokens
|
||||
api = GPT4VisionAPI(max_tokens=1000)
|
||||
|
||||
# Define the task and image URL
|
||||
task = "Describe the scene in the image."
|
||||
img = "https://i.imgur.com/4P4ZRxU.jpeg"
|
||||
|
||||
## Additional Tips
|
||||
# Run the GPT-4 Vision model
|
||||
response = api.run(task, img)
|
||||
|
||||
- Make sure to handle potential exceptions and errors when making API requests. The library includes retries and error handling, but it's essential to handle exceptions gracefully in your code.
|
||||
- Experiment with different configuration options to optimize the trade-off between response quality and response time based on your specific requirements.
|
||||
# Print the model's response
|
||||
print(response)
|
||||
```
|
||||
|
||||
## References and Resources
|
||||
## Additional Information<a name="additional-information"></a>
|
||||
|
||||
- [OpenAI Platform](https://beta.openai.com/signup/): Sign up for an OpenAI API key.
|
||||
- [OpenAI API Documentation](https://platform.openai.com/docs/api-reference/chat/create): Official API documentation for the GPT-4 Vision model.
|
||||
- If you encounter any errors or issues with the module, make sure to check your API key and internet connectivity.
|
||||
- It's recommended to handle exceptions when using the module to gracefully handle errors.
|
||||
- You can further customize the module to fit your specific use case by modifying the code as needed.
|
||||
|
||||
Now you have a comprehensive understanding of the GPT4Vision Model API, its configuration options, and how to use it for various computer vision and natural language processing tasks. Start experimenting and integrating it into your projects to leverage the power of GPT-4 Vision for image-related tasks.
|
||||
## References<a name="references"></a>
|
||||
|
||||
# Conclusion
|
||||
- [OpenAI API Documentation](https://beta.openai.com/docs/)
|
||||
|
||||
With GPT-4 Vision, you have a powerful tool for understanding and generating textual descriptions for images. By considering its capabilities, limitations, and cost calculations, you can effectively leverage this model for various image-related tasks.
|
||||
This documentation provides a comprehensive guide on how to use the `GPT4VisionAPI` module effectively. It covers initialization, methods, usage examples, and additional information to ensure a smooth experience when working with the GPT-4 Vision model.
|
@ -1,31 +0,0 @@
|
||||
import os
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from swarms.models import OpenAIChat
|
||||
from swarms.structs import Flow
|
||||
from swarms.swarms.multi_agent_collab import MultiAgentCollaboration
|
||||
|
||||
load_dotenv()
|
||||
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
|
||||
# Initialize the language model
|
||||
llm = OpenAIChat(
|
||||
temperature=0.5,
|
||||
openai_api_key=api_key,
|
||||
)
|
||||
|
||||
|
||||
## Initialize the workflow
|
||||
flow = Flow(llm=llm, max_loops=1, dashboard=True)
|
||||
flow2 = Flow(llm=llm, max_loops=1, dashboard=True)
|
||||
flow3 = Flow(llm=llm, max_loops=1, dashboard=True)
|
||||
|
||||
|
||||
swarm = MultiAgentCollaboration(
|
||||
agents=[flow, flow2, flow3],
|
||||
max_iters=4,
|
||||
)
|
||||
|
||||
swarm.run("Generate a 10,000 word blog on health and wellness.")
|
@ -0,0 +1,20 @@
|
||||
from swarms.structs import Flow
|
||||
from swarms.models.gpt4_vision_api import GPT4VisionAPI
|
||||
from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
|
||||
MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1,
|
||||
)
|
||||
|
||||
|
||||
llm = GPT4VisionAPI()
|
||||
|
||||
task = "What is the color of the object?"
|
||||
img = "images/swarms.jpeg"
|
||||
|
||||
## Initialize the workflow
|
||||
flow = Flow(
|
||||
llm=llm,
|
||||
sop=MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1,
|
||||
max_loops="auto",
|
||||
)
|
||||
|
||||
flow.run(task=task, img=img)
|
@ -1,28 +0,0 @@
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms.models.revgptV4 import RevChatGPTModel
|
||||
from swarms.workers.worker import Worker
|
||||
|
||||
load_dotenv()
|
||||
|
||||
config = {
|
||||
"model": os.getenv("REVGPT_MODEL"),
|
||||
"plugin_ids": [os.getenv("REVGPT_PLUGIN_IDS")],
|
||||
"disable_history": os.getenv("REVGPT_DISABLE_HISTORY") == "True",
|
||||
"PUID": os.getenv("REVGPT_PUID"),
|
||||
"unverified_plugin_domains": [
|
||||
os.getenv("REVGPT_UNVERIFIED_PLUGIN_DOMAINS")
|
||||
],
|
||||
}
|
||||
|
||||
llm = RevChatGPTModel(access_token=os.getenv("ACCESS_TOKEN"), **config)
|
||||
|
||||
worker = Worker(ai_name="Optimus Prime", llm=llm)
|
||||
|
||||
task = (
|
||||
"What were the winning boston marathon times for the past 5 years (ending"
|
||||
" in 2022)? Generate a table of the year, name, country of origin, and"
|
||||
" times."
|
||||
)
|
||||
response = worker.run(task)
|
||||
print(response)
|
@ -0,0 +1,24 @@
|
||||
from swarms.structs import Flow
|
||||
from swarms.models.gpt4_vision_api import GPT4VisionAPI
|
||||
from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
|
||||
MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1,
|
||||
)
|
||||
|
||||
llm = GPT4VisionAPI()
|
||||
|
||||
task = (
|
||||
"Analyze this image of an assembly line and identify any issues such as"
|
||||
" misaligned parts, defects, or deviations from the standard assembly"
|
||||
" process. IF there is anything unsafe in the image, explain why it is"
|
||||
" unsafe and how it could be improved."
|
||||
)
|
||||
img = "assembly_line.jpg"
|
||||
|
||||
## Initialize the workflow
|
||||
flow = Flow(
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
dashboard=True,
|
||||
)
|
||||
|
||||
flow.run(task=task, img=img)
|
After Width: | Height: | Size: 532 KiB |
@ -1,7 +1,7 @@
|
||||
import os
|
||||
from termcolor import colored
|
||||
from swarms.models import OpenAIChat
|
||||
from swarms.models.autotemp import AutoTemp
|
||||
from autotemp import AutoTemp
|
||||
from swarms.structs import SequentialWorkflow
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
import os
|
||||
from swarms.swarms.blog_gen import BlogGen
|
||||
from blog_gen import BlogGen
|
||||
|
||||
|
||||
def main():
|
@ -0,0 +1,7 @@
|
||||
"""
|
||||
Idea 2 img
|
||||
|
||||
task -> gpt4 text -> dalle3 img -> gpt4vision img + text analyze img -> dalle3 img -> loop
|
||||
|
||||
"""
|
||||
from swarms.models.gpt4_vision_api import GPT4VisionAPI
|
@ -0,0 +1,20 @@
|
||||
from swarms.structs import Flow
|
||||
from swarms.models.gpt4_vision_api import GPT4VisionAPI
|
||||
from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
|
||||
MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1,
|
||||
)
|
||||
|
||||
|
||||
llm = GPT4VisionAPI()
|
||||
|
||||
task = "What is the color of the object?"
|
||||
img = "images/swarms.jpeg"
|
||||
|
||||
## Initialize the workflow
|
||||
flow = Flow(
|
||||
llm=llm,
|
||||
sop=MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1,
|
||||
max_loops="auto",
|
||||
)
|
||||
|
||||
flow.run(task=task, img=img)
|
@ -1,33 +1,17 @@
|
||||
from swarms.structs import Flow
|
||||
from swarms.models import Idefics
|
||||
from swarms.models.gpt4_vision_api import GPT4VisionAPI
|
||||
|
||||
# Multi Modality Auto Agent
|
||||
llm = Idefics(max_length=2000)
|
||||
|
||||
task = (
|
||||
"User: What is in this image?"
|
||||
" https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"
|
||||
)
|
||||
llm = GPT4VisionAPI()
|
||||
|
||||
task = "What is the color of the object?"
|
||||
img = "images/swarms.jpeg"
|
||||
|
||||
## Initialize the workflow
|
||||
flow = Flow(
|
||||
llm=llm,
|
||||
max_loops=2,
|
||||
max_loops="auto",
|
||||
dashboard=True,
|
||||
# stopping_condition=None, # You can define a stopping condition as needed.
|
||||
# loop_interval=1,
|
||||
# retry_attempts=3,
|
||||
# retry_interval=1,
|
||||
# interactive=False, # Set to 'True' for interactive mode.
|
||||
# dynamic_temperature=False, # Set to 'True' for dynamic temperature handling.
|
||||
)
|
||||
|
||||
# out = flow.load_state("flow_state.json")
|
||||
# temp = flow.dynamic_temperature()
|
||||
# filter = flow.add_response_filter("Trump")
|
||||
out = flow.run(task)
|
||||
# out = flow.validate_response(out)
|
||||
# out = flow.analyze_feedback(out)
|
||||
# out = flow.print_history_and_memory()
|
||||
# # out = flow.save_state("flow_state.json")
|
||||
# print(out)
|
||||
flow.run(task=task, img=img)
|
||||
|
After Width: | Height: | Size: 41 KiB |
@ -0,0 +1,127 @@
|
||||
"""
|
||||
Swarm of multi modal autonomous agents for manufacturing!
|
||||
---------------------------------------------------------
|
||||
Health Security agent: Agent that monitors the health of working conditions: input image of factory output: health safety index 0.0 - 1.0 being the highest
|
||||
Quality Control agent: Agent that monitors the quality of the product: input image of product output: quality index 0.0 - 1.0 being the highest
|
||||
Productivity agent: Agent that monitors the productivity of the factory: input image of factory output: productivity index 0.0 - 1.0 being the highest
|
||||
Safety agent: Agent that monitors the safety of the factory: input image of factory output: safety index 0.0 - 1.0 being the highest
|
||||
Security agent: Agent that monitors the security of the factory: input image of factory output: security index 0.0 - 1.0 being the highest
|
||||
Sustainability agent: Agent that monitors the sustainability of the factory: input image of factory output: sustainability index 0.0 - 1.0 being the highest
|
||||
Efficiency agent: Agent that monitors the efficiency of the factory: input image of factory output: efficiency index 0.0 - 1.0 being the highest
|
||||
|
||||
|
||||
Flow:
|
||||
health security agent -> quality control agent -> productivity agent -> safety agent -> security agent -> sustainability agent -> efficiency agent
|
||||
"""
|
||||
from swarms.structs import Flow
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms.models import GPT4VisionAPI
|
||||
|
||||
load_dotenv()
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
|
||||
llm = GPT4VisionAPI(
|
||||
openai_api_key=api_key
|
||||
)
|
||||
|
||||
assembly_line = "playground/demos/swarm_of_mma_manufacturing/assembly_line.jpg"
|
||||
red_robots = "playground/demos/swarm_of_mma_manufacturing/red_robots.jpg"
|
||||
robots = "playground/demos/swarm_of_mma_manufacturing/robots.jpg"
|
||||
tesla_assembly_line = "playground/demos/swarm_of_mma_manufacturing/tesla_assembly.jpg"
|
||||
|
||||
|
||||
# Define detailed prompts for each agent
|
||||
tasks = {
|
||||
"health_safety": (
|
||||
"Analyze the factory's working environment for health safety. Focus on"
|
||||
" cleanliness, ventilation, spacing between workstations, and personal"
|
||||
" protective equipment availability."
|
||||
),
|
||||
"productivity": (
|
||||
"Review the factory's workflow efficiency, machine utilization, and"
|
||||
" employee engagement. Identify operational delays or bottlenecks."
|
||||
),
|
||||
"safety": (
|
||||
"Analyze the factory's safety measures, including fire exits, safety"
|
||||
" signage, and emergency response equipment."
|
||||
),
|
||||
"security": (
|
||||
"Evaluate the factory's security systems, entry/exit controls, and"
|
||||
" potential vulnerabilities."
|
||||
),
|
||||
"sustainability": (
|
||||
"Inspect the factory's sustainability practices, including waste"
|
||||
" management, energy usage, and eco-friendly processes."
|
||||
),
|
||||
"efficiency": (
|
||||
"Assess the manufacturing process's efficiency, considering the layout,"
|
||||
" logistics, and automation level."
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
# Define prompts for each agent
|
||||
health_safety_prompt = tasks["health_safety"]
|
||||
productivity_prompt = tasks["productivity"]
|
||||
safety_prompt = tasks["safety"]
|
||||
security_prompt = tasks["security"]
|
||||
sustainability_prompt = tasks["sustainability"]
|
||||
efficiency_prompt = tasks["efficiency"]
|
||||
|
||||
|
||||
# Health security agent
|
||||
health_security_agent = Flow(
|
||||
llm=llm,
|
||||
sop_list=health_safety_prompt,
|
||||
max_loops=2,
|
||||
multi_modal=True
|
||||
)
|
||||
|
||||
# Quality control agent
|
||||
productivity_check_agent = Flow(
|
||||
llm=llm,
|
||||
sop=productivity_prompt,
|
||||
max_loops=2,
|
||||
multi_modal=True
|
||||
)
|
||||
|
||||
# Security agent
|
||||
security_check_agent = Flow(
|
||||
llm=llm,
|
||||
sop=security_prompt,
|
||||
max_loops=2,
|
||||
multi_modal=True
|
||||
)
|
||||
|
||||
# Efficiency agent
|
||||
efficiency_check_agent = Flow(
|
||||
llm=llm,
|
||||
sop=efficiency_prompt,
|
||||
max_loops=2,
|
||||
multi_modal=True
|
||||
)
|
||||
|
||||
|
||||
# Add the first task to the health_security_agent
|
||||
health_check = health_security_agent.run(
|
||||
"Analyze the safety of this factory",
|
||||
robots
|
||||
)
|
||||
|
||||
# Add the third task to the productivity_check_agent
|
||||
productivity_check = productivity_check_agent.run(
|
||||
health_check, assembly_line
|
||||
)
|
||||
|
||||
# Add the fourth task to the security_check_agent
|
||||
security_check = security_check_agent.add(
|
||||
productivity_check, red_robots
|
||||
)
|
||||
|
||||
# Add the fifth task to the efficiency_check_agent
|
||||
efficiency_check = efficiency_check_agent.run(
|
||||
security_check, tesla_assembly_line
|
||||
)
|
||||
|
After Width: | Height: | Size: 43 KiB |
After Width: | Height: | Size: 39 KiB |
After Width: | Height: | Size: 42 KiB |
@ -0,0 +1,112 @@
|
||||
import os
|
||||
import base64
|
||||
import requests
|
||||
from dotenv import load_dotenv
|
||||
from typing import List
|
||||
|
||||
load_dotenv()
|
||||
|
||||
class StableDiffusion:
|
||||
"""
|
||||
A class to interact with the Stable Diffusion API for image generation.
|
||||
|
||||
Attributes:
|
||||
-----------
|
||||
api_key : str
|
||||
The API key for accessing the Stable Diffusion API.
|
||||
api_host : str
|
||||
The host URL of the Stable Diffusion API.
|
||||
engine_id : str
|
||||
The ID of the Stable Diffusion engine.
|
||||
headers : dict
|
||||
The headers for the API request.
|
||||
output_dir : str
|
||||
Directory where generated images will be saved.
|
||||
|
||||
Methods:
|
||||
--------
|
||||
generate_image(prompt: str, cfg_scale: int, height: int, width: int, samples: int, steps: int) -> List[str]:
|
||||
Generates images based on a text prompt and returns a list of file paths to the generated images.
|
||||
"""
|
||||
|
||||
def __init__(self, api_key: str, api_host: str = "https://api.stability.ai"):
|
||||
"""
|
||||
Initializes the StableDiffusion class with the provided API key and host.
|
||||
|
||||
Parameters:
|
||||
-----------
|
||||
api_key : str
|
||||
The API key for accessing the Stable Diffusion API.
|
||||
api_host : str
|
||||
The host URL of the Stable Diffusion API. Default is "https://api.stability.ai".
|
||||
"""
|
||||
self.api_key = api_key
|
||||
self.api_host = api_host
|
||||
self.engine_id = "stable-diffusion-v1-6"
|
||||
self.headers = {
|
||||
"Authorization": f"Bearer {self.api_key}",
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json"
|
||||
}
|
||||
self.output_dir = "images"
|
||||
os.makedirs(self.output_dir, exist_ok=True)
|
||||
|
||||
def generate_image(self, prompt: str, cfg_scale: int = 7, height: int = 1024, width: int = 1024, samples: int = 1, steps: int = 30) -> List[str]:
|
||||
"""
|
||||
Generates images based on a text prompt.
|
||||
|
||||
Parameters:
|
||||
-----------
|
||||
prompt : str
|
||||
The text prompt based on which the image will be generated.
|
||||
cfg_scale : int
|
||||
CFG scale parameter for image generation. Default is 7.
|
||||
height : int
|
||||
Height of the generated image. Default is 1024.
|
||||
width : int
|
||||
Width of the generated image. Default is 1024.
|
||||
samples : int
|
||||
Number of images to generate. Default is 1.
|
||||
steps : int
|
||||
Number of steps for the generation process. Default is 30.
|
||||
|
||||
Returns:
|
||||
--------
|
||||
List[str]:
|
||||
A list of paths to the generated images.
|
||||
|
||||
Raises:
|
||||
-------
|
||||
Exception:
|
||||
If the API response is not 200 (OK).
|
||||
"""
|
||||
response = requests.post(
|
||||
f"{self.api_host}/v1/generation/{self.engine_id}/text-to-image",
|
||||
headers=self.headers,
|
||||
json={
|
||||
"text_prompts": [{"text": prompt}],
|
||||
"cfg_scale": cfg_scale,
|
||||
"height": height,
|
||||
"width": width,
|
||||
"samples": samples,
|
||||
"steps": steps,
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise Exception(f"Non-200 response: {response.text}")
|
||||
|
||||
data = response.json()
|
||||
image_paths = []
|
||||
for i, image in enumerate(data["artifacts"]):
|
||||
image_path = os.path.join(self.output_dir, f"v1_txt2img_{i}.png")
|
||||
with open(image_path, "wb") as f:
|
||||
f.write(base64.b64decode(image["base64"]))
|
||||
image_paths.append(image_path)
|
||||
|
||||
return image_paths
|
||||
|
||||
# Usage example:
|
||||
# sd = StableDiffusion("your-api-key")
|
||||
# images = sd.generate_image("A scenic landscape with mountains")
|
||||
# print(images)
|
@ -0,0 +1,266 @@
|
||||
from abc import abstractmethod
|
||||
import asyncio
|
||||
import base64
|
||||
import concurrent.futures
|
||||
import time
|
||||
from concurrent import ThreadPoolExecutor
|
||||
from io import BytesIO
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
import requests
|
||||
from PIL import Image
|
||||
from termcolor import colored
|
||||
|
||||
|
||||
class BaseMultiModalModel:
|
||||
"""
|
||||
Base class for multimodal models
|
||||
|
||||
|
||||
Args:
|
||||
model_name (Optional[str], optional): Model name. Defaults to None.
|
||||
temperature (Optional[int], optional): Temperature. Defaults to 0.5.
|
||||
max_tokens (Optional[int], optional): Max tokens. Defaults to 500.
|
||||
max_workers (Optional[int], optional): Max workers. Defaults to 10.
|
||||
top_p (Optional[int], optional): Top p. Defaults to 1.
|
||||
top_k (Optional[int], optional): Top k. Defaults to 50.
|
||||
beautify (Optional[bool], optional): Beautify. Defaults to False.
|
||||
device (Optional[str], optional): Device. Defaults to "cuda".
|
||||
max_new_tokens (Optional[int], optional): Max new tokens. Defaults to 500.
|
||||
retries (Optional[int], optional): Retries. Defaults to 3.
|
||||
|
||||
Examples:
|
||||
>>> from swarms.models.base_multimodal_model import BaseMultiModalModel
|
||||
>>> model = BaseMultiModalModel()
|
||||
>>> model.run("Generate a summary of this text")
|
||||
>>> model.run("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")
|
||||
>>> model.run_batch(["Generate a summary of this text", "Generate a summary of this text"])
|
||||
>>> model.run_batch([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")])
|
||||
>>> model.run_batch_async(["Generate a summary of this text", "Generate a summary of this text"])
|
||||
>>> model.run_batch_async([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")])
|
||||
>>> model.run_batch_async_with_retries(["Generate a summary of this text", "Generate a summary of this text"])
|
||||
>>> model.run_batch_async_with_retries([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")])
|
||||
>>> model.generate_summary("Generate a summary of this text")
|
||||
>>> model.set_temperature(0.5)
|
||||
>>> model.set_max_tokens(500)
|
||||
>>> model.get_generation_time()
|
||||
>>> model.get_chat_history()
|
||||
>>> model.get_unique_chat_history()
|
||||
>>> model.get_chat_history_length()
|
||||
>>> model.get_unique_chat_history_length()
|
||||
>>> model.get_chat_history_tokens()
|
||||
>>> model.print_beautiful("Print this beautifully")
|
||||
>>> model.stream("Stream this")
|
||||
>>> model.unique_chat_history()
|
||||
>>> model.clear_chat_history()
|
||||
>>> model.get_img_from_web("https://www.google.com/images/branding/googlelogo/")
|
||||
|
||||
"""
|
||||
def __init__(
|
||||
self,
|
||||
model_name: Optional[str],
|
||||
temperature: Optional[int] = 0.5,
|
||||
max_tokens: Optional[int] = 500,
|
||||
max_workers: Optional[int] = 10,
|
||||
top_p: Optional[int] = 1,
|
||||
top_k: Optional[int] = 50,
|
||||
beautify: Optional[bool] = False,
|
||||
device: Optional[str] = "cuda",
|
||||
max_new_tokens: Optional[int] = 500,
|
||||
retries: Optional[int] = 3,
|
||||
):
|
||||
self.model_name = model_name
|
||||
self.temperature = temperature
|
||||
self.max_tokens = max_tokens
|
||||
self.max_workers = max_workers
|
||||
self.top_p = top_p
|
||||
self.top_k = top_k
|
||||
self.beautify = beautify
|
||||
self.device = device
|
||||
self.max_new_tokens = max_new_tokens
|
||||
self.retries = retries
|
||||
self.chat_history = []
|
||||
|
||||
@abstractmethod
|
||||
def __call__(self, text: str, img: str):
|
||||
"""Run the model"""
|
||||
pass
|
||||
|
||||
def run(self, task: str, img: str):
|
||||
"""Run the model"""
|
||||
pass
|
||||
|
||||
async def arun(self, task: str, img: str):
|
||||
"""Run the model asynchronously"""
|
||||
pass
|
||||
|
||||
def get_img_from_web(self, img: str):
|
||||
"""Get the image from the web"""
|
||||
try:
|
||||
response = requests.get(img)
|
||||
response.raise_for_status()
|
||||
image_pil = Image.open(BytesIO(response.content))
|
||||
return image_pil
|
||||
except requests.RequestException as error:
|
||||
print(f"Error fetching image from {img} and error: {error}")
|
||||
return None
|
||||
|
||||
def encode_img(self, img: str):
|
||||
"""Encode the image to base64"""
|
||||
with open(img, "rb") as image_file:
|
||||
return base64.b64encode(image_file.read()).decode("utf-8")
|
||||
|
||||
def get_img(self, img: str):
|
||||
"""Get the image from the path"""
|
||||
image_pil = Image.open(img)
|
||||
return image_pil
|
||||
|
||||
def clear_chat_history(self):
|
||||
"""Clear the chat history"""
|
||||
self.chat_history = []
|
||||
|
||||
def run_many(
|
||||
self,
|
||||
tasks: List[str],
|
||||
imgs: List[str],
|
||||
):
|
||||
"""
|
||||
Run the model on multiple tasks and images all at once using concurrent
|
||||
|
||||
Args:
|
||||
tasks (List[str]): List of tasks
|
||||
imgs (List[str]): List of image paths
|
||||
|
||||
Returns:
|
||||
List[str]: List of responses
|
||||
|
||||
|
||||
"""
|
||||
# Instantiate the thread pool executor
|
||||
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
||||
results = executor.map(self.run, tasks, imgs)
|
||||
|
||||
# Print the results for debugging
|
||||
for result in results:
|
||||
print(result)
|
||||
|
||||
def run_batch(self, tasks_images: List[Tuple[str, str]]) -> List[str]:
|
||||
"""Process a batch of tasks and images"""
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
futures = [
|
||||
executor.submit(self.run, task, img)
|
||||
for task, img in tasks_images
|
||||
]
|
||||
results = [future.result() for future in futures]
|
||||
return results
|
||||
|
||||
async def run_batch_async(
|
||||
self, tasks_images: List[Tuple[str, str]]
|
||||
) -> List[str]:
|
||||
"""Process a batch of tasks and images asynchronously"""
|
||||
loop = asyncio.get_event_loop()
|
||||
futures = [
|
||||
loop.run_in_executor(None, self.run, task, img)
|
||||
for task, img in tasks_images
|
||||
]
|
||||
return await asyncio.gather(*futures)
|
||||
|
||||
async def run_batch_async_with_retries(
|
||||
self, tasks_images: List[Tuple[str, str]]
|
||||
) -> List[str]:
|
||||
"""Process a batch of tasks and images asynchronously with retries"""
|
||||
loop = asyncio.get_event_loop()
|
||||
futures = [
|
||||
loop.run_in_executor(None, self.run_with_retries, task, img)
|
||||
for task, img in tasks_images
|
||||
]
|
||||
return await asyncio.gather(*futures)
|
||||
|
||||
def unique_chat_history(self):
|
||||
"""Get the unique chat history"""
|
||||
return list(set(self.chat_history))
|
||||
|
||||
def run_with_retries(self, task: str, img: str):
|
||||
"""Run the model with retries"""
|
||||
for i in range(self.retries):
|
||||
try:
|
||||
return self.run(task, img)
|
||||
except Exception as error:
|
||||
print(f"Error with the request {error}")
|
||||
continue
|
||||
|
||||
def run_batch_with_retries(self, tasks_images: List[Tuple[str, str]]):
|
||||
"""Run the model with retries"""
|
||||
for i in range(self.retries):
|
||||
try:
|
||||
return self.run_batch(tasks_images)
|
||||
except Exception as error:
|
||||
print(f"Error with the request {error}")
|
||||
continue
|
||||
|
||||
def _tokens_per_second(self) -> float:
|
||||
"""Tokens per second"""
|
||||
elapsed_time = self.end_time - self.start_time
|
||||
if elapsed_time == 0:
|
||||
return float("inf")
|
||||
return self._num_tokens() / elapsed_time
|
||||
|
||||
def _time_for_generation(self, task: str) -> float:
|
||||
"""Time for Generation"""
|
||||
self.start_time = time.time()
|
||||
self.run(task)
|
||||
self.end_time = time.time()
|
||||
return self.end_time - self.start_time
|
||||
|
||||
@abstractmethod
|
||||
def generate_summary(self, text: str) -> str:
|
||||
"""Generate Summary"""
|
||||
pass
|
||||
|
||||
def set_temperature(self, value: float):
|
||||
"""Set Temperature"""
|
||||
self.temperature = value
|
||||
|
||||
def set_max_tokens(self, value: int):
|
||||
"""Set new max tokens"""
|
||||
self.max_tokens = value
|
||||
|
||||
def get_generation_time(self) -> float:
|
||||
"""Get generation time"""
|
||||
if self.start_time and self.end_time:
|
||||
return self.end_time - self.start_time
|
||||
return 0
|
||||
|
||||
def get_chat_history(self):
|
||||
"""Get the chat history"""
|
||||
return self.chat_history
|
||||
|
||||
def get_unique_chat_history(self):
|
||||
"""Get the unique chat history"""
|
||||
return list(set(self.chat_history))
|
||||
|
||||
def get_chat_history_length(self):
|
||||
"""Get the chat history length"""
|
||||
return len(self.chat_history)
|
||||
|
||||
def get_unique_chat_history_length(self):
|
||||
"""Get the unique chat history length"""
|
||||
return len(list(set(self.chat_history)))
|
||||
|
||||
def get_chat_history_tokens(self):
|
||||
"""Get the chat history tokens"""
|
||||
return self._num_tokens()
|
||||
|
||||
def print_beautiful(self, content: str, color: str = "cyan"):
|
||||
"""Print Beautifully with termcolor"""
|
||||
content = colored(content, color)
|
||||
print(content)
|
||||
|
||||
def stream(self, content: str):
|
||||
"""Stream the output
|
||||
|
||||
Args:
|
||||
content (str): _description_
|
||||
"""
|
||||
for chunk in content:
|
||||
print(chunk)
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,421 @@
|
||||
import asyncio
|
||||
import base64
|
||||
import concurrent.futures
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
import aiohttp
|
||||
import requests
|
||||
from dotenv import load_dotenv
|
||||
from termcolor import colored
|
||||
|
||||
try:
|
||||
import cv2
|
||||
except ImportError:
|
||||
print("OpenCV not installed. Please install OpenCV to use this model.")
|
||||
raise ImportError
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
openai_api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
|
||||
class GPT4VisionAPI:
|
||||
"""
|
||||
GPT-4 Vision API
|
||||
|
||||
This class is a wrapper for the OpenAI API. It is used to run the GPT-4 Vision model.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
openai_api_key : str
|
||||
The OpenAI API key. Defaults to the OPENAI_API_KEY environment variable.
|
||||
max_tokens : int
|
||||
The maximum number of tokens to generate. Defaults to 300.
|
||||
|
||||
|
||||
Methods
|
||||
-------
|
||||
encode_image(img: str)
|
||||
Encode image to base64.
|
||||
run(task: str, img: str)
|
||||
Run the model.
|
||||
__call__(task: str, img: str)
|
||||
Run the model.
|
||||
|
||||
Examples:
|
||||
---------
|
||||
>>> from swarms.models import GPT4VisionAPI
|
||||
>>> llm = GPT4VisionAPI()
|
||||
>>> task = "What is the color of the object?"
|
||||
>>> img = "https://i.imgur.com/2M2ZGwC.jpeg"
|
||||
>>> llm.run(task, img)
|
||||
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
openai_api_key: str = openai_api_key,
|
||||
model_name: str = "gpt-4-vision-preview",
|
||||
logging_enabled: bool = False,
|
||||
max_workers: int = 10,
|
||||
max_tokens: str = 300,
|
||||
openai_proxy: str = "https://api.openai.com/v1/chat/completions",
|
||||
beautify: bool = False,
|
||||
streaming_enabled: Optional[bool] = False,
|
||||
):
|
||||
super().__init__()
|
||||
self.openai_api_key = openai_api_key
|
||||
self.logging_enabled = logging_enabled
|
||||
self.model_name = model_name
|
||||
self.max_workers = max_workers
|
||||
self.max_tokens = max_tokens
|
||||
self.openai_proxy = openai_proxy
|
||||
self.beautify = beautify
|
||||
self.streaming_enabled = streaming_enabled
|
||||
|
||||
if self.logging_enabled:
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
else:
|
||||
# Disable debug logs for requests and urllib3
|
||||
logging.getLogger("requests").setLevel(logging.WARNING)
|
||||
logging.getLogger("urllib3").setLevel(logging.WARNING)
|
||||
|
||||
def encode_image(self, img: str):
|
||||
"""Encode image to base64."""
|
||||
with open(img, "rb") as image_file:
|
||||
return base64.b64encode(image_file.read()).decode("utf-8")
|
||||
|
||||
def download_img_then_encode(self, img: str):
|
||||
"""Download image from URL then encode image to base64 using requests"""
|
||||
pass
|
||||
|
||||
# Function to handle vision tasks
|
||||
def run(self, task: Optional[str] = None, img: Optional[str] = None, *args, **kwargs):
|
||||
"""Run the model."""
|
||||
try:
|
||||
base64_image = self.encode_image(img)
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {openai_api_key}",
|
||||
}
|
||||
payload = {
|
||||
"model": "gpt-4-vision-preview",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": task},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": (
|
||||
f"data:image/jpeg;base64,{base64_image}"
|
||||
)
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
],
|
||||
"max_tokens": self.max_tokens,
|
||||
}
|
||||
response = requests.post(
|
||||
self.openai_proxy,
|
||||
headers=headers,
|
||||
json=payload,
|
||||
)
|
||||
|
||||
out = response.json()
|
||||
content = out["choices"][0]["message"]["content"]
|
||||
|
||||
if self.streaming_enabled:
|
||||
content = self.stream_response(content)
|
||||
else:
|
||||
pass
|
||||
|
||||
if self.beautify:
|
||||
content = colored(content, "cyan")
|
||||
print(content)
|
||||
else:
|
||||
print(content)
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error with the request: {error}")
|
||||
raise error
|
||||
|
||||
def video_prompt(self, frames):
|
||||
"""
|
||||
SystemPrompt is a class that generates a prompt for the user to respond to.
|
||||
The prompt is generated based on the current state of the system.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
frames : list
|
||||
A list of base64 frames
|
||||
|
||||
Returns
|
||||
-------
|
||||
PROMPT : str
|
||||
The system prompt
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
>>> from swarms.models import GPT4VisionAPI
|
||||
>>> llm = GPT4VisionAPI()
|
||||
>>> video = "video.mp4"
|
||||
>>> base64_frames = llm.process_video(video)
|
||||
>>> prompt = llm.video_prompt(base64_frames)
|
||||
>>> print(prompt)
|
||||
|
||||
"""
|
||||
PROMPT = f"""
|
||||
These are frames from a video that I want to upload. Generate a compelling description that I can upload along with the video:
|
||||
|
||||
{frames}
|
||||
"""
|
||||
return PROMPT
|
||||
|
||||
def stream_response(self, content: str):
|
||||
"""Stream the response of the output
|
||||
|
||||
Args:
|
||||
content (str): _description_
|
||||
"""
|
||||
for chunk in content:
|
||||
print(chunk)
|
||||
|
||||
def process_video(self, video: str):
|
||||
"""
|
||||
Process a video into a list of base64 frames
|
||||
|
||||
Parameters
|
||||
----------
|
||||
video : str
|
||||
The path to the video file
|
||||
|
||||
Returns
|
||||
-------
|
||||
base64_frames : list
|
||||
A list of base64 frames
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> from swarms.models import GPT4VisionAPI
|
||||
>>> llm = GPT4VisionAPI()
|
||||
>>> video = "video.mp4"
|
||||
>>> base64_frames = llm.process_video(video)
|
||||
|
||||
"""
|
||||
video = cv2.VideoCapture(video)
|
||||
|
||||
base64_frames = []
|
||||
while video.isOpened():
|
||||
success, frame = video.read()
|
||||
if not success:
|
||||
break
|
||||
_, buffer = cv2.imencode(".jpg", frame)
|
||||
base64_frames.append(base64.b64encode(buffer).decode("utf-8"))
|
||||
|
||||
video.release()
|
||||
print(len(base64_frames), "frames read.")
|
||||
|
||||
for img in base64_frames:
|
||||
base64.b64decode(img.encode("utf-8"))
|
||||
|
||||
def __call__(self, task: str, img: str):
|
||||
"""Run the model."""
|
||||
try:
|
||||
base64_image = self.encode_image(img)
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {openai_api_key}",
|
||||
}
|
||||
payload = {
|
||||
"model": "gpt-4-vision-preview",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": task},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": (
|
||||
f"data:image/jpeg;base64,{base64_image}"
|
||||
)
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
],
|
||||
"max_tokens": self.max_tokens,
|
||||
}
|
||||
response = requests.post(
|
||||
self.openai_proxy,
|
||||
headers=headers,
|
||||
json=payload,
|
||||
)
|
||||
|
||||
out = response.json()
|
||||
content = out["choices"][0]["message"]["content"]
|
||||
|
||||
if self.streaming_enabled:
|
||||
content = self.stream_response(content)
|
||||
else:
|
||||
pass
|
||||
|
||||
if self.beautify:
|
||||
content = colored(content, "cyan")
|
||||
print(content)
|
||||
else:
|
||||
print(content)
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error with the request: {error}")
|
||||
raise error
|
||||
|
||||
def run_many(
|
||||
self,
|
||||
tasks: List[str],
|
||||
imgs: List[str],
|
||||
):
|
||||
"""
|
||||
Run the model on multiple tasks and images all at once using concurrent
|
||||
|
||||
Args:
|
||||
tasks (List[str]): List of tasks
|
||||
imgs (List[str]): List of image paths
|
||||
|
||||
Returns:
|
||||
List[str]: List of responses
|
||||
|
||||
|
||||
"""
|
||||
# Instantiate the thread pool executor
|
||||
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
||||
results = executor.map(self.run, tasks, imgs)
|
||||
|
||||
# Print the results for debugging
|
||||
for result in results:
|
||||
print(result)
|
||||
|
||||
return list(results)
|
||||
|
||||
async def arun(
|
||||
self,
|
||||
task: Optional[str] = None,
|
||||
img: Optional[str] = None,
|
||||
):
|
||||
"""
|
||||
Asynchronously run the model
|
||||
|
||||
Overview:
|
||||
---------
|
||||
This method is used to asynchronously run the model. It is used to run the model
|
||||
on a single task and image.
|
||||
|
||||
Parameters:
|
||||
----------
|
||||
task : str
|
||||
The task to run the model on.
|
||||
img : str
|
||||
The image to run the task on
|
||||
|
||||
"""
|
||||
try:
|
||||
base64_image = self.encode_image(img)
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": f"Bearer {openai_api_key}",
|
||||
}
|
||||
payload = {
|
||||
"model": "gpt-4-vision-preview",
|
||||
"messages": [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": task},
|
||||
{
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": (
|
||||
f"data:image/jpeg;base64,{base64_image}"
|
||||
)
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
],
|
||||
"max_tokens": self.max_tokens,
|
||||
}
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.post(
|
||||
self.openai_proxy, headers=headers, data=json.dumps(payload)
|
||||
) as response:
|
||||
out = await response.json()
|
||||
content = out["choices"][0]["message"]["content"]
|
||||
print(content)
|
||||
except Exception as error:
|
||||
print(f"Error with the request {error}")
|
||||
raise error
|
||||
|
||||
def run_batch(self, tasks_images: List[Tuple[str, str]]) -> List[str]:
|
||||
"""Process a batch of tasks and images"""
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
futures = [
|
||||
executor.submit(self.run, task, img)
|
||||
for task, img in tasks_images
|
||||
]
|
||||
results = [future.result() for future in futures]
|
||||
return results
|
||||
|
||||
async def run_batch_async(
|
||||
self, tasks_images: List[Tuple[str, str]]
|
||||
) -> List[str]:
|
||||
"""Process a batch of tasks and images asynchronously"""
|
||||
loop = asyncio.get_event_loop()
|
||||
futures = [
|
||||
loop.run_in_executor(None, self.run, task, img)
|
||||
for task, img in tasks_images
|
||||
]
|
||||
return await asyncio.gather(*futures)
|
||||
|
||||
async def run_batch_async_with_retries(
|
||||
self, tasks_images: List[Tuple[str, str]]
|
||||
) -> List[str]:
|
||||
"""Process a batch of tasks and images asynchronously with retries"""
|
||||
loop = asyncio.get_event_loop()
|
||||
futures = [
|
||||
loop.run_in_executor(None, self.run_with_retries, task, img)
|
||||
for task, img in tasks_images
|
||||
]
|
||||
return await asyncio.gather(*futures)
|
||||
|
||||
def health_check(self):
|
||||
"""Health check for the GPT4Vision model"""
|
||||
try:
|
||||
response = requests.get("https://api.openai.com/v1/engines")
|
||||
return response.status_code == 200
|
||||
except requests.RequestException as error:
|
||||
print(f"Health check failed: {error}")
|
||||
return False
|
||||
|
||||
def print_dashboard(self):
|
||||
dashboard = print(
|
||||
colored(
|
||||
f"""
|
||||
GPT4Vision Dashboard
|
||||
-------------------
|
||||
Model: {self.model_name}
|
||||
Max Workers: {self.max_workers}
|
||||
OpenAIProxy: {self.openai_proxy}
|
||||
""",
|
||||
"green",
|
||||
)
|
||||
)
|
||||
return dashboard
|
@ -0,0 +1,30 @@
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
|
||||
|
||||
def disable_logging():
|
||||
warnings.filterwarnings("ignore", category=UserWarning)
|
||||
|
||||
# disable tensorflow warnings
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
|
||||
|
||||
# Set the logging level for the entire module
|
||||
logging.basicConfig(level=logging.WARNING)
|
||||
|
||||
try:
|
||||
log = logging.getLogger("pytorch")
|
||||
log.propagate = False
|
||||
log.setLevel(logging.ERROR)
|
||||
except Exception as error:
|
||||
print(f"Pytorch logging not disabled: {error}")
|
||||
|
||||
for logger_name in [
|
||||
"tensorflow",
|
||||
"h5py",
|
||||
"numexpr",
|
||||
"git",
|
||||
"wandb.docker.auth",
|
||||
]:
|
||||
logger = logging.getLogger(logger_name)
|
||||
logger.setLevel(logging.WARNING) # Supress DEBUG and info logs
|
@ -1,93 +0,0 @@
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
from RevChatGPTModelv4 import RevChatGPTModelv4
|
||||
|
||||
|
||||
class TestRevChatGPT(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.access_token = "123"
|
||||
self.model = RevChatGPTModelv4(access_token=self.access_token)
|
||||
|
||||
def test_run(self):
|
||||
prompt = "What is the capital of France?"
|
||||
self.model.start_time = 10
|
||||
self.model.end_time = 20
|
||||
response = self.model.run(prompt)
|
||||
self.assertEqual(response, "The capital of France is Paris.")
|
||||
self.assertEqual(self.model.start_time, 10)
|
||||
self.assertEqual(self.model.end_time, 20)
|
||||
|
||||
def test_generate_summary(self):
|
||||
text = "Hello world. This is some text. It has multiple sentences."
|
||||
summary = self.model.generate_summary(text)
|
||||
self.assertEqual(summary, "")
|
||||
|
||||
@patch("RevChatGPTModelv4.Chatbot.install_plugin")
|
||||
def test_enable_plugin(self, mock_install_plugin):
|
||||
plugin_id = "plugin123"
|
||||
self.model.enable_plugin(plugin_id)
|
||||
mock_install_plugin.assert_called_with(plugin_id=plugin_id)
|
||||
|
||||
@patch("RevChatGPTModelv4.Chatbot.get_plugins")
|
||||
def test_list_plugins(self, mock_get_plugins):
|
||||
mock_get_plugins.return_value = [{"id": "123", "name": "Test Plugin"}]
|
||||
plugins = self.model.list_plugins()
|
||||
self.assertEqual(len(plugins), 1)
|
||||
self.assertEqual(plugins[0]["id"], "123")
|
||||
self.assertEqual(plugins[0]["name"], "Test Plugin")
|
||||
|
||||
@patch("RevChatGPTModelv4.Chatbot.get_conversations")
|
||||
def test_get_conversations(self, mock_get_conversations):
|
||||
self.model.chatbot.get_conversations()
|
||||
mock_get_conversations.assert_called()
|
||||
|
||||
@patch("RevChatGPTModelv4.Chatbot.get_msg_history")
|
||||
def test_get_msg_history(self, mock_get_msg_history):
|
||||
convo_id = "123"
|
||||
self.model.chatbot.get_msg_history(convo_id)
|
||||
mock_get_msg_history.assert_called_with(convo_id)
|
||||
|
||||
@patch("RevChatGPTModelv4.Chatbot.share_conversation")
|
||||
def test_share_conversation(self, mock_share_conversation):
|
||||
self.model.chatbot.share_conversation()
|
||||
mock_share_conversation.assert_called()
|
||||
|
||||
@patch("RevChatGPTModelv4.Chatbot.gen_title")
|
||||
def test_gen_title(self, mock_gen_title):
|
||||
convo_id = "123"
|
||||
message_id = "456"
|
||||
self.model.chatbot.gen_title(convo_id, message_id)
|
||||
mock_gen_title.assert_called_with(convo_id, message_id)
|
||||
|
||||
@patch("RevChatGPTModelv4.Chatbot.change_title")
|
||||
def test_change_title(self, mock_change_title):
|
||||
convo_id = "123"
|
||||
title = "New Title"
|
||||
self.model.chatbot.change_title(convo_id, title)
|
||||
mock_change_title.assert_called_with(convo_id, title)
|
||||
|
||||
@patch("RevChatGPTModelv4.Chatbot.delete_conversation")
|
||||
def test_delete_conversation(self, mock_delete_conversation):
|
||||
convo_id = "123"
|
||||
self.model.chatbot.delete_conversation(convo_id)
|
||||
mock_delete_conversation.assert_called_with(convo_id)
|
||||
|
||||
@patch("RevChatGPTModelv4.Chatbot.clear_conversations")
|
||||
def test_clear_conversations(self, mock_clear_conversations):
|
||||
self.model.chatbot.clear_conversations()
|
||||
mock_clear_conversations.assert_called()
|
||||
|
||||
@patch("RevChatGPTModelv4.Chatbot.rollback_conversation")
|
||||
def test_rollback_conversation(self, mock_rollback_conversation):
|
||||
num = 2
|
||||
self.model.chatbot.rollback_conversation(num)
|
||||
mock_rollback_conversation.assert_called_with(num)
|
||||
|
||||
@patch("RevChatGPTModelv4.Chatbot.reset_chat")
|
||||
def test_reset_chat(self, mock_reset_chat):
|
||||
self.model.chatbot.reset_chat()
|
||||
mock_reset_chat.assert_called()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
@ -0,0 +1,238 @@
|
||||
import asyncio
|
||||
import os
|
||||
from unittest.mock import AsyncMock, Mock, mock_open, patch
|
||||
from aiohttp import ClientResponseError
|
||||
import pytest
|
||||
from dotenv import load_dotenv
|
||||
from requests.exceptions import RequestException
|
||||
|
||||
from swarms.models.gpt4_vision_api import GPT4VisionAPI
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
custom_api_key = os.environ.get("OPENAI_API_KEY")
|
||||
img = "images/swarms.jpeg"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def vision_api():
|
||||
return GPT4VisionAPI(openai_api_key="test_api_key")
|
||||
|
||||
|
||||
def test_init(vision_api):
|
||||
assert vision_api.openai_api_key == "test_api_key"
|
||||
|
||||
|
||||
def test_encode_image(vision_api):
|
||||
with patch(
|
||||
"builtins.open", mock_open(read_data=b"test_image_data"), create=True
|
||||
):
|
||||
encoded_image = vision_api.encode_image(img)
|
||||
assert encoded_image == "dGVzdF9pbWFnZV9kYXRh"
|
||||
|
||||
|
||||
def test_run_success(vision_api):
|
||||
expected_response = {"choices": [{"text": "This is the model's response."}]}
|
||||
with patch(
|
||||
"requests.post", return_value=Mock(json=lambda: expected_response)
|
||||
) as mock_post:
|
||||
result = vision_api.run("What is this?", img)
|
||||
mock_post.assert_called_once()
|
||||
assert result == "This is the model's response."
|
||||
|
||||
|
||||
def test_run_request_error(vision_api):
|
||||
with patch(
|
||||
"requests.post", side_effect=RequestException("Request Error")
|
||||
) as mock_post:
|
||||
with pytest.raises(RequestException):
|
||||
vision_api.run("What is this?", img)
|
||||
|
||||
|
||||
def test_run_response_error(vision_api):
|
||||
expected_response = {"error": "Model Error"}
|
||||
with patch(
|
||||
"requests.post", return_value=Mock(json=lambda: expected_response)
|
||||
) as mock_post:
|
||||
with pytest.raises(RuntimeError):
|
||||
vision_api.run("What is this?", img)
|
||||
|
||||
|
||||
def test_call(vision_api):
|
||||
expected_response = {"choices": [{"text": "This is the model's response."}]}
|
||||
with patch(
|
||||
"requests.post", return_value=Mock(json=lambda: expected_response)
|
||||
) as mock_post:
|
||||
result = vision_api("What is this?", img)
|
||||
mock_post.assert_called_once()
|
||||
assert result == "This is the model's response."
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def gpt_api():
|
||||
return GPT4VisionAPI()
|
||||
|
||||
|
||||
def test_initialization_with_default_key():
|
||||
api = GPT4VisionAPI()
|
||||
assert api.openai_api_key == custom_api_key
|
||||
|
||||
|
||||
def test_initialization_with_custom_key():
|
||||
custom_key = custom_api_key
|
||||
api = GPT4VisionAPI(openai_api_key=custom_key)
|
||||
assert api.openai_api_key == custom_key
|
||||
|
||||
|
||||
def test_run_successful_response(gpt_api):
|
||||
task = "What is in the image?"
|
||||
img_url = img
|
||||
response_json = {"choices": [{"text": "Answer from GPT-4 Vision"}]}
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = response_json
|
||||
with patch("requests.post", return_value=mock_response) as mock_post:
|
||||
result = gpt_api.run(task, img_url)
|
||||
mock_post.assert_called_once()
|
||||
assert result == response_json["choices"][0]["text"]
|
||||
|
||||
|
||||
def test_run_with_exception(gpt_api):
|
||||
task = "What is in the image?"
|
||||
img_url = img
|
||||
with patch("requests.post", side_effect=Exception("Test Exception")):
|
||||
with pytest.raises(Exception):
|
||||
gpt_api.run(task, img_url)
|
||||
|
||||
|
||||
def test_call_method_successful_response(gpt_api):
|
||||
task = "What is in the image?"
|
||||
img_url = img
|
||||
response_json = {"choices": [{"text": "Answer from GPT-4 Vision"}]}
|
||||
mock_response = Mock()
|
||||
mock_response.json.return_value = response_json
|
||||
with patch("requests.post", return_value=mock_response) as mock_post:
|
||||
result = gpt_api(task, img_url)
|
||||
mock_post.assert_called_once()
|
||||
assert result == response_json
|
||||
|
||||
|
||||
def test_call_method_with_exception(gpt_api):
|
||||
task = "What is in the image?"
|
||||
img_url = img
|
||||
with patch("requests.post", side_effect=Exception("Test Exception")):
|
||||
with pytest.raises(Exception):
|
||||
gpt_api(task, img_url)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_arun_success(vision_api):
|
||||
expected_response = {
|
||||
"choices": [{"message": {"content": "This is the model's response."}}]
|
||||
}
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post",
|
||||
new_callable=AsyncMock,
|
||||
return_value=AsyncMock(json=AsyncMock(return_value=expected_response)),
|
||||
) as mock_post:
|
||||
result = await vision_api.arun("What is this?", img)
|
||||
mock_post.assert_called_once()
|
||||
assert result == "This is the model's response."
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_arun_request_error(vision_api):
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=Exception("Request Error"),
|
||||
) as mock_post:
|
||||
with pytest.raises(Exception):
|
||||
await vision_api.arun("What is this?", img)
|
||||
|
||||
|
||||
def test_run_many_success(vision_api):
|
||||
expected_response = {
|
||||
"choices": [{"message": {"content": "This is the model's response."}}]
|
||||
}
|
||||
with patch(
|
||||
"requests.post", return_value=Mock(json=lambda: expected_response)
|
||||
) as mock_post:
|
||||
tasks = ["What is this?", "What is that?"]
|
||||
imgs = [img, img]
|
||||
results = vision_api.run_many(tasks, imgs)
|
||||
assert mock_post.call_count == 2
|
||||
assert results == [
|
||||
"This is the model's response.",
|
||||
"This is the model's response.",
|
||||
]
|
||||
|
||||
|
||||
def test_run_many_request_error(vision_api):
|
||||
with patch(
|
||||
"requests.post", side_effect=RequestException("Request Error")
|
||||
) as mock_post:
|
||||
tasks = ["What is this?", "What is that?"]
|
||||
imgs = [img, img]
|
||||
with pytest.raises(RequestException):
|
||||
vision_api.run_many(tasks, imgs)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_arun_json_decode_error(vision_api):
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post",
|
||||
new_callable=AsyncMock,
|
||||
return_value=AsyncMock(json=AsyncMock(side_effect=ValueError)),
|
||||
) as mock_post:
|
||||
with pytest.raises(ValueError):
|
||||
await vision_api.arun("What is this?", img)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_arun_api_error(vision_api):
|
||||
error_response = {"error": {"message": "API Error"}}
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post",
|
||||
new_callable=AsyncMock,
|
||||
return_value=AsyncMock(json=AsyncMock(return_value=error_response)),
|
||||
) as mock_post:
|
||||
with pytest.raises(Exception, match="API Error"):
|
||||
await vision_api.arun("What is this?", img)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_arun_unexpected_response(vision_api):
|
||||
unexpected_response = {"unexpected": "response"}
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post",
|
||||
new_callable=AsyncMock,
|
||||
return_value=AsyncMock(
|
||||
json=AsyncMock(return_value=unexpected_response)
|
||||
),
|
||||
) as mock_post:
|
||||
with pytest.raises(Exception, match="Unexpected response"):
|
||||
await vision_api.arun("What is this?", img)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_arun_retries(vision_api):
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=ClientResponseError(None, None),
|
||||
) as mock_post:
|
||||
with pytest.raises(ClientResponseError):
|
||||
await vision_api.arun("What is this?", img)
|
||||
assert mock_post.call_count == vision_api.retries + 1
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_arun_timeout(vision_api):
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post",
|
||||
new_callable=AsyncMock,
|
||||
side_effect=asyncio.TimeoutError,
|
||||
) as mock_post:
|
||||
with pytest.raises(asyncio.TimeoutError):
|
||||
await vision_api.arun("What is this?", img)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue