Merge branch 'kyegomez:master' into master

pull/307/head
pliny 1 year ago committed by GitHub
commit e547141baa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -6,7 +6,8 @@ COHERE_API_KEY="your_api_key_here"
ALEPHALPHA_API_KEY="your_api_key_here"
HUGGINFACEHUB_API_KEY="your_api_key_here"
STABILITY_API_KEY="your_api_key_here"
POSTHOG_API_KEY=""
POSTHOG_HOST=""
WOLFRAM_ALPHA_APPID="your_wolfram_alpha_appid_here"
ZAPIER_NLA_API_KEY="your_zapier_nla_api_key_here"

@ -203,7 +203,117 @@ response = agent.run("Generate a video of a swarm of fish and then make an image
print(response)
```
---
### Multi-Agent Swarm for Logistics
- Swarms is a framework designed for real-world deployment here is a demo presenting a fully ready to use Swarm for a vast array of logistics tasks.
- Swarms is designed to be modular and reliable for real-world deployments.
- Swarms is the first framework that unleases multi-modal autonomous agents in the real world.
```python
from swarms.structs import Agent
import os
from dotenv import load_dotenv
from swarms.models import GPT4VisionAPI
from swarms.prompts.logistics import (
Health_Security_Agent_Prompt,
Quality_Control_Agent_Prompt,
Productivity_Agent_Prompt,
Safety_Agent_Prompt,
Security_Agent_Prompt,
Sustainability_Agent_Prompt,
Efficiency_Agent_Prompt,
)
# Load ENV
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
# GPT4VisionAPI
llm = GPT4VisionAPI(openai_api_key=api_key)
# Image for analysis
factory_image = "factory_image1.jpg"
# Initialize agents with respective prompts
health_security_agent = Agent(
llm=llm,
sop=Health_Security_Agent_Prompt,
max_loops=1,
multi_modal=True,
)
# Quality control agent
quality_control_agent = Agent(
llm=llm,
sop=Quality_Control_Agent_Prompt,
max_loops=1,
multi_modal=True,
)
# Productivity Agent
productivity_agent = Agent(
llm=llm,
sop=Productivity_Agent_Prompt,
max_loops=1,
multi_modal=True,
)
# Initiailize safety agent
safety_agent = Agent(
llm=llm, sop=Safety_Agent_Prompt, max_loops=1, multi_modal=True
)
# Init the security agent
security_agent = Agent(
llm=llm, sop=Security_Agent_Prompt, max_loops=1, multi_modal=True
)
# Initialize sustainability agent
sustainability_agent = Agent(
llm=llm,
sop=Sustainability_Agent_Prompt,
max_loops=1,
multi_modal=True,
)
# Initialize efficincy agent
efficiency_agent = Agent(
llm=llm,
sop=Efficiency_Agent_Prompt,
max_loops=1,
multi_modal=True,
)
# Run agents with respective tasks on the same image
health_analysis = health_security_agent.run(
"Analyze the safety of this factory", factory_image
)
quality_analysis = quality_control_agent.run(
"Examine product quality in the factory", factory_image
)
productivity_analysis = productivity_agent.run(
"Evaluate factory productivity", factory_image
)
safety_analysis = safety_agent.run(
"Inspect the factory's adherence to safety standards",
factory_image,
)
security_analysis = security_agent.run(
"Assess the factory's security measures and systems",
factory_image,
)
sustainability_analysis = sustainability_agent.run(
"Examine the factory's sustainability practices", factory_image
)
efficiency_analysis = efficiency_agent.run(
"Analyze the efficiency of the factory's manufacturing process",
factory_image,
)
```
---
# Features 🤖

@ -0,0 +1,178 @@
## `Gemini` Documentation
### Introduction
The Gemini module is a versatile tool for leveraging the power of multimodal AI models to generate content. It allows users to combine textual and image inputs to generate creative and informative outputs. In this documentation, we will explore the Gemini module in detail, covering its purpose, architecture, methods, and usage examples.
#### Purpose
The Gemini module is designed to bridge the gap between text and image data, enabling users to harness the capabilities of multimodal AI models effectively. By providing both a textual task and an image as input, Gemini generates content that aligns with the specified task and incorporates the visual information from the image.
### Installation
Before using Gemini, ensure that you have the required dependencies installed. You can install them using the following commands:
```bash
pip install swarms
pip install google-generativeai
pip install python-dotenv
```
### Class: Gemini
#### Overview
The `Gemini` class is the central component of the Gemini module. It inherits from the `BaseMultiModalModel` class and provides methods to interact with the Gemini AI model. Let's dive into its architecture and functionality.
##### Class Constructor
```python
class Gemini(BaseMultiModalModel):
def __init__(
self,
model_name: str = "gemini-pro",
gemini_api_key: str = get_gemini_api_key_env,
*args,
**kwargs,
):
```
| Parameter | Type | Description | Default Value |
|---------------------|---------|------------------------------------------------------------------|--------------------|
| `model_name` | str | The name of the Gemini model. | "gemini-pro" |
| `gemini_api_key` | str | The Gemini API key. If not provided, it is fetched from the environment. | (None) |
- `model_name`: Specifies the name of the Gemini model to use. By default, it is set to "gemini-pro," but you can specify a different model if needed.
- `gemini_api_key`: This parameter allows you to provide your Gemini API key directly. If not provided, the constructor attempts to fetch it from the environment using the `get_gemini_api_key_env` helper function.
##### Methods
1. **run()**
```python
def run(
self,
task: str = None,
img: str = None,
*args,
**kwargs,
) -> str:
```
| Parameter | Type | Description |
|---------------|----------|--------------------------------------------|
| `task` | str | The textual task for content generation. |
| `img` | str | The path to the image to be processed. |
| `*args` | Variable | Additional positional arguments. |
| `**kwargs` | Variable | Additional keyword arguments. |
- `task`: Specifies the textual task for content generation. It can be a sentence or a phrase that describes the desired content.
- `img`: Provides the path to the image that will be processed along with the textual task. Gemini combines the visual information from the image with the textual task to generate content.
- `*args` and `**kwargs`: Allow for additional, flexible arguments that can be passed to the underlying Gemini model. These arguments can vary based on the specific Gemini model being used.
**Returns**: A string containing the generated content.
**Examples**:
```python
from swarms.models import Gemini
# Initialize the Gemini model
gemini = Gemini()
# Generate content for a textual task with an image
generated_content = gemini.run(
task="Describe this image",
img="image.jpg",
)
# Print the generated content
print(generated_content)
```
In this example, we initialize the Gemini model, provide a textual task, and specify an image for processing. The `run()` method generates content based on the input and returns the result.
2. **process_img()**
```python
def process_img(
self,
img: str = None,
type: str = "image/png",
*args,
**kwargs,
):
```
| Parameter | Type | Description | Default Value |
|---------------|----------|------------------------------------------------------|----------------|
| `img` | str | The path to the image to be processed. | (None) |
| `type` | str | The MIME type of the image (e.g., "image/png"). | "image/png" |
| `*args` | Variable | Additional positional arguments. |
| `**kwargs` | Variable | Additional keyword arguments. |
- `img`: Specifies the path to the image that will be processed. It's essential to provide a valid image path for image-based content generation.
- `type`: Indicates the MIME type of the image. By default, it is set to "image/png," but you can change it based on the image format you're using.
- `*args` and `**kwargs`: Allow for additional, flexible arguments that can be passed to the underlying Gemini model. These arguments can vary based on the specific Gemini model being used.
**Raises**: ValueError if any of the following conditions are met:
- No image is provided.
- The image type is not specified.
- The Gemini API key is missing.
**Examples**:
```python
from swarms.models.gemini import Gemini
# Initialize the Gemini model
gemini = Gemini()
# Process an image
processed_image = gemini.process_img(
img="image.jpg",
type="image/jpeg",
)
# Further use the processed image in content generation
generated_content = gemini.run(
task="Describe this image",
img=processed_image,
)
# Print the generated content
print(generated_content)
```
In this example, we demonstrate how to process an image using the `process_img()` method and then use the processed image in content generation.
#### Additional Information
- Gemini is designed to work seamlessly with various multimodal AI models, making it a powerful tool for content generation tasks.
- The module uses the `google.generativeai` package to access the underlying AI models. Ensure that you have this package installed to leverage the full capabilities of Gemini.
- It's essential to provide a valid Gemini API key for authentication. You can either pass it directly during initialization or store it in the environment variable "GEMINI_API_KEY."
- Gemini's flexibility allows you to experiment with different Gemini models and tailor the content generation process to your specific needs.
- Keep in mind that Gemini is designed to handle both textual and image inputs, making it a valuable asset for various applications, including natural language processing and computer vision tasks.
- If you encounter any issues or have specific requirements, refer to the Gemini documentation for more details and advanced usage.
### References and Resources
- [Gemini GitHub Repository](https://github.com/swarms/gemini): Explore the Gemini repository for additional information, updates, and examples.
- [Google GenerativeAI Documentation](https://docs.google.com/document/d/1WZSBw6GsOhOCYm0ArydD_9uy6nPPA1KFIbKPhjj43hA): Dive deeper into the capabilities of the Google GenerativeAI package used by Gemini.
- [Gemini API Documentation](https://gemini-api-docs.example.com): Access the official documentation for the Gemini API to explore advanced features and integrations.
## Conclusion
In this comprehensive documentation, we've explored the Gemini module, its purpose, architecture, methods, and usage examples. Gemini empowers developers to generate content by combining textual tasks and images, making it a valuable asset for multimodal AI applications. Whether you're working on natural language processing or computer vision projects, Gemini can help you achieve impressive results.

@ -20,7 +20,6 @@ llm = OpenAIChat(
max_tokens=1000,
)
## Initialize the workflow
agent = Agent(
llm=llm,

@ -0,0 +1,18 @@
import os
from dotenv import load_dotenv
from swarms.models.gemini import Gemini
load_dotenv()
api_key = os.environ["GEMINI_API_KEY"]
# Initialize the model
model = Gemini(gemini_api_key=api_key)
# Establish the prompt and image
task = "What is your name"
img = "images/github-banner-swarms.png"
# Run the model
out = model.run("What is your name?")
print(out)

@ -97,6 +97,7 @@ nav:
- DistilWhisperModel: "swarms/models/distilled_whisperx.md"
- ElevenLabsText2SpeechTool: "swarms/models/elevenlabs.md"
- OpenAITTS: "swarms/models/openai_tts.md"
- Gemini: "swarms/models/gemini.md"
- swarms.structs:
- Overview: "swarms/structs/overview.md"
- AutoScaler: "swarms/swarms/autoscaler.md"

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

@ -0,0 +1,35 @@
import os
from dotenv import load_dotenv
from swarms.models.gpt4_vision_api import GPT4VisionAPI
from swarms.prompts.visual_cot import VISUAL_CHAIN_OF_THOUGHT
from swarms.structs import Agent
# Load the environment variables
load_dotenv()
# Get the API key from the environment
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = GPT4VisionAPI(
openai_api_key=api_key,
max_tokens=500,
)
# Initialize the task
task = "This is an eye test. What do you see?"
img = "playground/demos/multi_modal_chain_of_thought/eyetest.jpg"
## Initialize the workflow
agent = Agent(
llm=llm,
max_loops=2,
autosave=True,
sop=VISUAL_CHAIN_OF_THOUGHT,
)
# Run the workflow on a task
out = agent.run(task=task, img=img)
print(out)

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "2.7.7"
version = "2.8.9"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
@ -24,48 +24,48 @@ classifiers = [
[tool.poetry.dependencies]
python = "^3.6.1"
torch = "2.1.1"
transformers = "2.10"
transformers = "4.35.0"
openai = "0.28.0"
langchain = "*"
asyncio = "*"
einops = "*"
google-generativeai = "*"
langchain-experimental = "*"
playwright = "*"
weaviate-client = "*"
opencv-python-headless = "*"
faiss-cpu = "*"
backoff = "*"
marshmallow = "*"
datasets = "*"
optimum = "*"
diffusers = "*"
PyPDF2 = "*"
vllm = "*"
accelerate = "*"
sentencepiece = "*"
wget = "*"
tensorflow = "2.15.0"
httpx = "*"
tiktoken = "*"
safetensors = "*"
attrs = "*"
ggl = "*"
ratelimit = "*"
beautifulsoup4 = "*"
cohere = "*"
huggingface-hub = "*"
langchain = "0.0.333"
asyncio = "3.4.3"
einops = "0.7.0"
google-generativeai = "0.3.1"
langchain-experimental = "0.0.10"
playwright = "1.34.0"
weaviate-client = "3.25.3"
opencv-python-headless = "4.8.1.78"
faiss-cpu = "1.7.4"
backoff = "2.2.1"
marshmallow = "3.19.0"
datasets = "2.10.1"
optimum = "1.15.0"
diffusers = "0.17.1"
PyPDF2 = "3.0.1"
accelerate = "0.22.0"
sentencepiece = "0.1.98"
wget = "3.2"
tensorflow = "2.12.0"
httpx = "0.24.1"
tiktoken = "0.4.0"
safetensors = "0.3.3"
attrs = "22.2.0"
ggl = "1.1.0"
ratelimit = "2.2.1"
beautifulsoup4 = "4.11.2"
cohere = "4.24"
huggingface-hub = "0.16.4"
pydantic = "1.10.12"
tenacity = "*"
Pillow = "*"
chromadb = "*"
tabulate = "*"
termcolor = "*"
black = "*"
open_clip_torch = "*"
soundfile = "*"
torchvision = "*"
rich = "*"
tenacity = "8.2.2"
Pillow = "9.4.0"
chromadb = "0.4.14"
tabulate = "0.9.0"
termcolor = "2.2.0"
black = "23.3.0"
open_clip_torch = "2.20.0"
soundfile = "0.12.1"
torchvision = "0.16.1"
rich = "13.5.2"
[tool.poetry.group.lint.dependencies]
ruff = ">=0.0.249,<0.1.7"

@ -1,73 +1,71 @@
torch==2.1.1
transformers>2.10
pandas
langchain
nest_asyncio
langchain-experimental
playwright
transformers>2.10==4.35.0
pandas==1.5.3
langchain==0.0.333
nest_asyncio==1.5.6
langchain-experimental==0.0.10
playwright==1.34.0
wget==3.2
simpleaichat
httpx
open_clip_torch
ggl
beautifulsoup4
google-search-results==2.4.2
Pillow
faiss-cpu
simpleaichat==0.2.2
httpx==0.24.1
open_clip_torch==2.20.0
ggl==1.1.0
beautifulsoup4==4.11.2
google-search-results
Pillow==9.4.0
faiss-cpu==1.7.4
openai==0.28.0
attrs
datasets
attrs==22.2.0
datasets==2.10.1
pydantic==1.10.12
soundfile
soundfile==0.12.1
arize-phoenix
weaviate-client
huggingface-hub
google-generativeai
sentencepiece
PyPDF2
accelerate
weaviate-client==3.25.3
huggingface-hub==0.16.4
google-generativeai==0.3.1
sentencepiece==0.1.98
PyPDF2==3.0.1
accelerate==0.22.0
vllm
chromadb
tensorflow
chromadb==0.4.14
tensorflow==2.12.0
optimum
tiktoken
tabulate
tiktoken==0.4.0
tabulate==0.9.0
colored
addict
backoff
ratelimit
backoff==2.2.1
ratelimit==2.2.1
albumentations
basicsr
termcolor
termcolor==2.2.0
controlnet-aux
diffusers
einops
imageio
opencv-python-headless
imageio-ffmpeg
diffusers==0.17.1
einops==0.7.0
imageio==2.25.1
opencv-python-headless==4.8.1.78
imageio-ffmpeg==0.4.9
invisible-watermark
kornia
safetensors
numpy
omegaconf
open_clip_torch
openai
opencv-python
prettytable
safetensors
safetensors==0.3.3
numpy==1.25.2
omegaconf==2.3.0
open_clip_torch==2.20.0
openai==0.28.0
opencv-python==4.7.0.72
prettytable==3.9.0
safetensors==0.3.3
test-tube
timm
timm==0.6.13
torchmetrics
webdataset
marshmallow
marshmallow==3.19.0
yapf
autopep8
cohere
torchvision
rich
cohere==4.24
torchvision==0.16.1
rich==13.5.2
mkdocs
mkdocs-material
mkdocs-glightbox
pre-commit
pre-commit==3.2.2

@ -0,0 +1,39 @@
import pkg_resources
def get_package_versions(requirements_path, output_path):
try:
with open(requirements_path, "r") as file:
requirements = file.readlines()
except FileNotFoundError:
print(f"Error: The file '{requirements_path}' was not found.")
return
package_versions = []
for requirement in requirements:
# Skip empty lines and comments
if (
requirement.strip() == ""
or requirement.strip().startswith("#")
):
continue
# Extract package name
package_name = requirement.split("==")[0].strip()
try:
version = pkg_resources.get_distribution(
package_name
).version
package_versions.append(f"{package_name}=={version}")
except pkg_resources.DistributionNotFound:
package_versions.append(f"{package_name}: not installed")
with open(output_path, "w") as file:
for package_version in package_versions:
file.write(package_version + "\n")
print(f"Versions written to {output_path}")
# Usage
get_package_versions("requirements.txt", "installed_versions.txt")

@ -0,0 +1,40 @@
import toml
import pkg_resources
def update_pyproject_versions(pyproject_path):
try:
with open(pyproject_path, "r") as file:
data = toml.load(file)
except FileNotFoundError:
print(f"Error: The file '{pyproject_path}' was not found.")
return
except toml.TomlDecodeError:
print(
f"Error: The file '{pyproject_path}' is not a valid TOML"
" file."
)
return
dependencies = (
data.get("tool", {}).get("poetry", {}).get("dependencies", {})
)
for package in dependencies:
if package.lower() == "python":
continue # Skip the Python version dependency
try:
version = pkg_resources.get_distribution(package).version
dependencies[package] = version
except pkg_resources.DistributionNotFound:
print(f"Warning: Package '{package}' not installed.")
with open(pyproject_path, "w") as file:
toml.dump(data, file)
print(f"Updated versions written to {pyproject_path}")
# Usage
update_pyproject_versions("pyproject.toml")

@ -6,3 +6,4 @@ from swarms.agents import * # noqa: E402, F403
from swarms.swarms import * # noqa: E402, F403
from swarms.structs import * # noqa: E402, F403
from swarms.models import * # noqa: E402, F403
from swarms.telemetry import * # noqa: E402, F403

@ -1,6 +1,8 @@
import asyncio
import base64
import concurrent.futures
import logging
import os
import time
from abc import abstractmethod
from concurrent.futures import ThreadPoolExecutor
@ -96,10 +98,6 @@ class BaseMultiModalModel:
self.meta_prompt = meta_prompt
self.chat_history = []
def __call__(self, task: str, img: str, *args, **kwargs):
"""Run the model"""
return self.run(task, img, *args, **kwargs)
@abstractmethod
def run(
self, task: Optional[str], img: Optional[str], *args, **kwargs
@ -107,7 +105,21 @@ class BaseMultiModalModel:
"""Run the model"""
pass
async def arun(self, task: str, img: str):
def __call__(
self, task: str = None, img: str = None, *args, **kwargs
):
"""Call the model
Args:
task (str): _description_
img (str): _description_
Returns:
_type_: _description_
"""
return self.run(task, img, *args, **kwargs)
async def arun(self, task: str, img: str, *args, **kwargs):
"""Run the model asynchronously"""
pass
@ -317,13 +329,13 @@ class BaseMultiModalModel:
content = colored(content, color)
print(content)
def stream(self, content: str):
def stream_response(self, text: str):
"""Stream the output
Args:
content (str): _description_
"""
for chunk in content:
for chunk in text:
print(chunk)
def meta_prompt(self):

@ -0,0 +1,268 @@
import os
import subprocess as sp
from pathlib import Path
from dotenv import load_dotenv
from PIL import Image
from swarms.models.base_multimodal_model import BaseMultiModalModel
try:
import google.generativeai as genai
from google.generativeai.types import GenerationConfig
except ImportError as error:
print(f"Error importing google.generativeai: {error}")
print("Please install the google.generativeai package")
print("pip install google-generativeai")
sp.run(["pip", "install", "--upgrade", "google-generativeai"])
load_dotenv()
# Helpers
def get_gemini_api_key_env():
"""Get the Gemini API key from the environment
Raises:
ValueError: _description_
Returns:
_type_: _description_
"""
key = os.getenv("GEMINI_API_KEY")
if key is None:
raise ValueError("Please provide a Gemini API key")
return key
# Main class
class Gemini(BaseMultiModalModel):
"""Gemini model
Args:
model_name (str, optional): _description_. Defaults to "gemini-pro".
gemini_api_key (str, optional): _description_. Defaults to get_gemini_api_key_env.
return_safety (bool, optional): _description_. Defaults to False.
candidates (bool, optional): _description_. Defaults to False.
stream (bool, optional): _description_. Defaults to False.
candidate_count (int, optional): _description_. Defaults to 1.
stop_sequence ([type], optional): _description_. Defaults to ['x'].
max_output_tokens (int, optional): _description_. Defaults to 100.
temperature (float, optional): _description_. Defaults to 0.9.
Methods:
run: Run the Gemini model
process_img: Process the image
chat: Chat with the Gemini model
list_models: List the Gemini models
stream_tokens: Stream the tokens
process_img_pil: Process img
Examples:
>>> from swarms.models import Gemini
>>> gemini = Gemini()
>>> gemini.run(
task="A dog",
img="dog.png",
)
"""
def __init__(
self,
model_name: str = "gemini-pro-vision",
gemini_api_key: str = get_gemini_api_key_env,
return_safety: bool = False,
candidates: bool = False,
stream: bool = False,
candidate_count: int = 1,
stop_sequence=["x"],
max_output_tokens: int = 100,
temperature: float = 0.9,
*args,
**kwargs,
):
super().__init__(model_name, *args, **kwargs)
self.model_name = model_name
self.gemini_api_key = gemini_api_key
self.safety = return_safety
self.candidates = candidates
self.stream = stream
self.candidate_count = candidate_count
self.stop_sequence = stop_sequence
self.max_output_tokens = max_output_tokens
self.temperature = temperature
# Prepare the generation config
self.generation_config = GenerationConfig(
candidate_count=candidate_count,
# stop_sequence=stop_sequence,
max_output_tokens=max_output_tokens,
temperature=temperature,
)
# Initialize the model
self.model = genai.GenerativeModel(
model_name, *args, **kwargs
)
# Check for the key
if self.gemini_api_key is None:
raise ValueError("Please provide a Gemini API key")
def system_prompt(
self,
system_prompt: str = None,
task: str = None,
*args,
**kwargs,
):
"""System prompt
Args:
system_prompt (str, optional): _description_. Defaults to None.
"""
PROMPT = f"""
{system_prompt}
{task}
"""
return PROMPT
def run(
self,
task: str = None,
img: str = None,
*args,
**kwargs,
) -> str:
"""Run the Gemini model
Args:
task (str, optional): textual task. Defaults to None.
img (str, optional): img. Defaults to None.
Returns:
str: output from the model
"""
try:
# if img:
# # process_img = self.process_img(img, *args, **kwargs)
# process_img = self.process_img_pil(img)
# response = self.model.generate_content(
# contents=[task, process_img],
# generation_config=self.generation_config,
# stream=self.stream,
# *args,
# **kwargs,
# )
# # if self.candidates:
# # return response.candidates
# # elif self.safety:
# # return response.safety
# # else:
# # return response.text
# return response.text
# else:
response = self.model.generate_content(
task, *args, **kwargs
)
return response.text
except Exception as error:
print(f"Error running Gemini model: {error}")
print(f"Please check the task and image: {task}, {img}")
raise error
def process_img(
self,
img: str = None,
type: str = "image/png",
*args,
**kwargs,
):
"""Process the image
Args:
img (str, optional): _description_. Defaults to None.
type (str, optional): _description_. Defaults to "image/png".
Raises:
ValueError: _description_
ValueError: _description_
ValueError: _description_
"""
try:
if img is None:
raise ValueError("Please provide an image to process")
if type is None:
raise ValueError("Please provide the image type")
if self.gemini_api_key is None:
raise ValueError("Please provide a Gemini API key")
# Load the image
img = [
{"mime_type": type, "data": Path(img).read_bytes()}
]
except Exception as error:
print(f"Error processing image: {error}")
def chat(
self,
task: str = None,
img: str = None,
*args,
**kwargs,
) -> str:
"""Chat with the Gemini model
Args:
task (str, optional): _description_. Defaults to None.
img (str, optional): _description_. Defaults to None.
Returns:
str: _description_
"""
chat = self.model.start_chat()
response = chat.send_message(task, *args, **kwargs)
response1 = response.text
print(response1)
response = chat.send_message(img, *args, **kwargs)
def list_models(self) -> str:
"""List the Gemini models
Returns:
str: _description_
"""
for m in genai.list_models():
if "generateContent" in m.supported_generation_methods:
print(m.name)
def stream_tokens(self, content: str = None):
"""Stream the tokens
Args:
content (t, optional): _description_. Defaults to None.
"""
for chunk in content:
print(chunk.text)
print("_" * 80)
def process_img_pil(self, img: str = None):
"""Process img
Args:
img (str, optional): _description_. Defaults to None.
Returns:
_type_: _description_
"""
img = Image.open(img)
return img

@ -112,10 +112,15 @@ class GPT4VisionAPI(BaseMultiModalModel):
def download_img_then_encode(self, img: str):
"""Download image from URL then encode image to base64 using requests"""
pass
if not os.path.exists(img):
print(f"Image file not found: {img}")
return None
response = requests.get(img)
return base64.b64encode(response.content).decode("utf-8")
# Function to handle vision tasks
def run(self, img: str, task: str, *args, **kwargs):
def run(self, task: str = None, img: str = None, *args, **kwargs):
"""Run the model."""
try:
base64_image = self.encode_image(img)
@ -156,13 +161,18 @@ class GPT4VisionAPI(BaseMultiModalModel):
.get("message", {})
.get("content", None)
)
if self.streaming_enabled:
content = self.stream_response(content)
return content
else:
print("No valid response in 'choices'")
return None
except Exception as error:
print(f"Error with the request: {error}")
print(
f"Error with the request: {error}, make sure you"
" double check input types and positions"
)
return None
def video_prompt(self, frames):
@ -207,7 +217,7 @@ class GPT4VisionAPI(BaseMultiModalModel):
for chunk in content:
print(chunk)
def process_video(self, video: str):
def process_video(self, video: str = None):
"""
Process a video into a list of base64 frames
@ -252,8 +262,50 @@ class GPT4VisionAPI(BaseMultiModalModel):
*args,
**kwargs,
):
self.video_prompt(self.process_video(video))
pass
prompt = self.video_prompt(self.process_video(video))
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {openai_api_key}",
}
payload = {
"model": self.model_name,
"messages": [
{
"role": "system",
"content": [self.system_prompt],
},
{
"role": "user",
"content": [
(task,), # task
*map(
lambda x: {"image": x, "resize": 768},
prompt[0::50],
),
],
},
],
"max_tokens": self.max_tokens,
}
response = requests.post(
self.openai_proxy,
headers=headers,
json=payload,
)
out = response.json()
content = out["choices"][0]["message"]["content"]
if self.streaming_enabled:
content = self.stream_response(content)
else:
pass
if self.beautify:
content = colored(content, "cyan")
print(content)
else:
print(content)
def __call__(
self,

@ -102,9 +102,13 @@ class Idefics(BaseMultiModalModel):
model_name, torch_dtype=torch_dtype, *args, **kwargs
).to(self.device)
self.processor = AutoProcessor.from_pretrained(model_name)
self.processor = AutoProcessor.from_pretrained(
model_name, *args, **kwargs
)
def run(self, task: str, *args, **kwargs) -> str:
def run(
self, task: str = None, img: str = None, *args, **kwargs
) -> str:
"""
Generates text based on the provided prompts.

@ -61,6 +61,8 @@ class OpenAITTS(AbstractLLM):
chunk_size=1024 * 1024,
autosave: bool = False,
saved_filepath: str = None,
*args,
**kwargs,
):
super().__init__()
self.model_name = model_name

@ -0,0 +1,140 @@
import logging
import os
from typing import Optional
import requests
from dotenv import load_dotenv
from swarms.models.base_llm import AbstractLLM
# Load environment variables
load_dotenv()
def together_api_key_env():
"""Get the API key from the environment."""
return os.getenv("TOGETHER_API_KEY")
class TogetherModel(AbstractLLM):
"""
GPT-4 Vision API
This class is a wrapper for the OpenAI API. It is used to run the GPT-4 Vision model.
Parameters
----------
together_api_key : str
The OpenAI API key. Defaults to the together_api_key environment variable.
max_tokens : int
The maximum number of tokens to generate. Defaults to 300.
Methods
-------
encode_image(img: str)
Encode image to base64.
run(task: str, img: str)
Run the model.
__call__(task: str, img: str)
Run the model.
Examples:
---------
>>> from swarms.models import GPT4VisionAPI
>>> llm = GPT4VisionAPI()
>>> task = "What is the color of the object?"
>>> img = "https://i.imgur.com/2M2ZGwC.jpeg"
>>> llm.run(task, img)
"""
def __init__(
self,
together_api_key: str = together_api_key_env,
model_name: str = "mistralai/Mixtral-8x7B-Instruct-v0.1",
logging_enabled: bool = False,
max_workers: int = 10,
max_tokens: str = 300,
api_endpoint: str = "https://api.together.xyz",
beautify: bool = False,
streaming_enabled: Optional[bool] = False,
meta_prompt: Optional[bool] = False,
system_prompt: Optional[str] = None,
*args,
**kwargs,
):
super(TogetherModel).__init__(*args, **kwargs)
self.together_api_key = together_api_key
self.logging_enabled = logging_enabled
self.model_name = model_name
self.max_workers = max_workers
self.max_tokens = max_tokens
self.api_endpoint = api_endpoint
self.beautify = beautify
self.streaming_enabled = streaming_enabled
self.meta_prompt = meta_prompt
self.system_prompt = system_prompt
if self.logging_enabled:
logging.basicConfig(level=logging.DEBUG)
else:
# Disable debug logs for requests and urllib3
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
if self.meta_prompt:
self.system_prompt = self.meta_prompt_init()
# Function to handle vision tasks
def run(self, task: str = None, *args, **kwargs):
"""Run the model."""
try:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.together_api_key}",
}
payload = {
"model": self.model_name,
"messages": [
{
"role": "system",
"content": [self.system_prompt],
},
{
"role": "user",
"content": task,
},
],
"max_tokens": self.max_tokens,
**kwargs,
}
response = requests.post(
self.api_endpoint,
headers=headers,
json=payload,
*args,
**kwargs,
)
out = response.json()
if "choices" in out and out["choices"]:
content = (
out["choices"][0]
.get("message", {})
.get("content", None)
)
if self.streaming_enabled:
content = self.stream_response(content)
return content
else:
print("No valid response in 'choices'")
return None
except Exception as error:
print(
f"Error with the request: {error}, make sure you"
" double check input types and positions"
)
return None

@ -1,9 +1,11 @@
from transformers import ViltProcessor, ViltForQuestionAnswering
import requests
from PIL import Image
from transformers import ViltForQuestionAnswering, ViltProcessor
from swarms.models.base_multimodal_model import BaseMultiModalModel
class Vilt:
class Vilt(BaseMultiModalModel):
"""
Vision-and-Language Transformer (ViLT) model fine-tuned on VQAv2.
It was introduced in the paper ViLT: Vision-and-Language Transformer Without
@ -21,15 +23,21 @@ class Vilt:
"""
def __init__(self):
def __init__(
self,
model_name: str = "dandelin/vilt-b32-finetuned-vqa",
*args,
**kwargs,
):
super().__init__(model_name, *args, **kwargs)
self.processor = ViltProcessor.from_pretrained(
"dandelin/vilt-b32-finetuned-vqa"
model_name, *args, **kwargs
)
self.model = ViltForQuestionAnswering.from_pretrained(
"dandelin/vilt-b32-finetuned-vqa"
model_name, *args, **kwargs
)
def __call__(self, text: str, image_url: str):
def run(self, task: str = None, img: str = None, *args, **kwargs):
"""
Run the model
@ -38,9 +46,9 @@ class Vilt:
"""
# Download the image
image = Image.open(requests.get(image_url, stream=True).raw)
image = Image.open(requests.get(img, stream=True).raw)
encoding = self.processor(image, text, return_tensors="pt")
encoding = self.processor(image, task, return_tensors="pt")
# Forward pass
outputs = self.model(**encoding)

@ -0,0 +1,58 @@
def react_prompt(task: str = None):
PROMPT = f"""
Task Description:
Accomplish the following {task} using the reasoning guidelines below.
######### REASONING GUIDELINES #########
You're an autonomous agent that has been tasked with {task}. You have been given a set of guidelines to follow to accomplish this task. You must follow the guidelines exactly.
Step 1: Observation
Begin by carefully observing the situation or problem at hand. Describe what you see, identify key elements, and note any relevant details.
Use <observation>...</observation> tokens to encapsulate your observations.
Example:
<observation> [Describe your initial observations of the task or problem here.] </observation>
Step 2: Thought Process
Analyze the observations. Consider different angles, potential challenges, and any underlying patterns or connections.
Think about possible solutions or approaches to address the task.
Use <thought>...</thought> tokens to encapsulate your thinking process.
Example:
<thought> [Explain your analysis of the observations, your reasoning behind potential solutions, and any assumptions or considerations you are making.] </thought>
Step 3: Action Planning
Based on your thoughts and analysis, plan a series of actions to solve the problem or complete the task.
Detail the steps you intend to take, resources you will use, and how these actions will address the key elements identified in your observations.
Use <action>...</action> tokens to encapsulate your action plan.
Example:
<action> [List the specific actions you plan to take, including any steps to gather more information or implement a solution.] </action>
Step 4: Execute and Reflect
Implement your action plan. As you proceed, continue to observe and think, adjusting your actions as needed.
Reflect on the effectiveness of your actions and the outcome. Consider what worked well and what could be improved.
Use <observation>...</observation>, <thought>...</thought>, and <action>...</action> tokens as needed to describe this ongoing process.
Example:
<observation> [New observations during action implementation.] </observation>
<thought> [Thoughts on how the actions are affecting the situation, adjustments needed, etc.] </thought>
<action> [Adjusted or continued actions to complete the task.] </action>
Guidance:
Remember, your goal is to provide a transparent and logical process that leads from observation to effective action. Your responses should demonstrate clear thinking, an understanding of the problem, and a rational approach to solving it. The use of tokens helps to structure your response and clarify the different stages of your reasoning and action.
"""
return PROMPT

@ -0,0 +1,54 @@
SWARM_MANAGER_AGENT_SOP = """
Swarm Manager Agent SOP (Standard Operating Procedure) Prompt
Objective: As a Swarm Manager Agent, your primary role is to effectively distribute tasks to a team of worker agents to accomplish a specified goal. Your job involves breaking down a complex goal into manageable tasks, assigning these tasks to the appropriate agents based on their capabilities, and ensuring that all tasks are clearly defined and understood for effective execution.
Task Analysis and Distribution:
Understand the Goal: Start by fully understanding the user's goal. Break down the goal into specific, actionable tasks. Each task should contribute towards achieving the overall goal.
Task Breakdown: Break the goal into smaller, manageable tasks. Ensure each task is clear, concise, and achievable. Avoid vague or overly complex tasks.
Agent Assignment: Assign each task to an agent based on their unique ID and capabilities. Ensure the right task is assigned to the right agent for effective execution.
Task Formatting: Use the <task> and <agent_id> tags to denote tasks and their assigned agents. This is crucial for parsing and execution.
Review and Adjust: Once the tasks are assigned, review the entire plan to ensure it is cohesive and aligned with the goal. Make adjustments if necessary.
Few-Shot Examples:
Example 1: Content Creation and Distribution
Goal: Generate and distribute educational content about renewable energy.
<agent_id_1><task1> Research and write a detailed article about the latest advancements in renewable energy. The article should be comprehensive, covering solar, wind, and hydroelectric power. </task1></agent_id_1>
<agent_id_2><task2> Proofread the article for grammatical errors and ensure it is engaging and easy to understand. </task2></agent_id_2>
<agent_id_3><task3> Create infographics to visually represent key data and statistics from the article. </task3></agent_id_3>
<agent_id_4><task4> Distribute the article and infographics across various educational platforms and social media. </task4></agent_id_4>
Example 2: Market Research Project
Goal: Conduct a market research study on the latest smartphone trends.
<agent_id_1><task1> Gather data on the latest smartphone models, their features, prices, and consumer reviews. </task1></agent_id_1>
<agent_id_2><task2> Analyze the collected data to identify trends, such as the most desired features and price points. </task2></agent_id_2>
<agent_id_3><task3> Compile the findings into a comprehensive report, highlighting key trends and insights. </task3></agent_id_3>
<agent_id_4><task4> Prepare a presentation summarizing the research findings for a business audience. </task4></agent_id_4>
Example 3: Organizing a Community Event
Goal: Plan and execute a community health fair.
<agent_id_1><task1> Identify and contact health professionals and organizations to participate in the fair. </task1></agent_id_1>
<agent_id_2><task2> Arrange logistics, including location, date, time, and necessary equipment for the event. </task2></agent_id_2>
<agent_id_3><task3> Develop promotional materials and advertise the event in the community. </task3></agent_id_3>
<agent_id_4><task4> Coordinate volunteers for the day of the event, assigning specific roles and responsibilities. </task4></agent_id_4>
Guidance for the Swarm Manager Agent:
Clarity and Precision: Each task should be clearly defined. Avoid ambiguity to ensure each agent understands their specific duties.
Task Relevance: Ensure each task is relevant to the agent's skills and the overall goal.
Efficiency: Strive for an efficient distribution of tasks, avoiding overlaps or gaps in responsibilities.
Adaptability: Be prepared to reassign tasks or adjust the plan as needed based on feedback or changes in circumstances.
Communication: Maintain clear communication with all agents. Ensure they understand their tasks and the larger goal.
Your role as a Swarm Manager Agent is critical in achieving the user's goal. By effectively breaking down the goal, assigning tasks, and ensuring clear understanding among agents, you play a key role in the successful completion of the project. Remember, your task is to manage and facilitate; let each agent use their expertise to accomplish their assigned task.
"""

@ -0,0 +1,36 @@
VISUAL_CHAIN_OF_THOUGHT = """
You, as the model, are presented with a visual problem. This could be an image containing various elements that you need to analyze, a graph that requires interpretation, or a visual puzzle. Your task is to examine the visual information carefully and describe your process of understanding and solving the problem.
Instructions:
Observation: Begin by describing what you see in the image. Break down the visual elements into understandable segments. For instance, if it's a picture of a street, identify the key components like cars, buildings, people, street signs, etc. If it's a graph, start by outlining its type, the axes, and the data it presents.
Initial Analysis: Based on your observation, start analyzing the image. If it's a scene, narrate the possible context or the story the image might be telling. If it's a graph or data, begin to interpret what the data might indicate. This step is about forming hypotheses or interpretations based on visual cues.
Detailed Reasoning: Delve deeper into your analysis. This is where the chain of thought becomes critical. If you're looking at a scene, consider the relationships between elements. Why might that person be running? What does the traffic signal indicate? For graphs or data-driven images, analyze trends, outliers, and correlations. Explain your thought process in a step-by-step manner.
Visual References: As you explain, make visual references. Draw arrows, circles, or use highlights in the image to pinpoint exactly what you're discussing. These annotations should accompany your verbal reasoning, adding clarity to your explanations.
Conclusion or Solution: Based on your detailed reasoning, draw a conclusion or propose a solution. If it's a visual puzzle or problem, present your answer clearly, backed by the reasoning you've just outlined. If its an open-ended image, summarize your understanding of the scene or the data.
Reflection: Finally, reflect on your thought process. Was there anything particularly challenging or ambiguous? How confident are you in your interpretation or solution, and why? This step is about self-assessment and providing insight into your reasoning confidence.
Example:
Lets say the image is a complex graph showing climate change data over the last century.
Observation: "The graph is a line graph with time on the x-axis and average global temperature on the y-axis. There are peaks and troughs, but a general upward trend is visible."
Initial Analysis: "The immediate observation is that average temperatures have risen over the last century. There are fluctuations, but the overall direction is upward."
Detailed Reasoning: "Looking closer, the steepest increase appears post-1950. This aligns with industrial advancements globally, suggesting a link between human activity and rising temperatures. The short-term fluctuations could be due to natural climate cycles, but the long-term trend indicates a more worrying, human-induced climate change pattern."
Visual References: "Here [draws arrow], the graph shows a sharp rise. The annotations indicate major industrial events, aligning with these spikes."
Conclusion or Solution: "The data strongly suggests a correlation between industrialization and global warming. The upward trend, especially in recent decades, indicates accelerating temperature increases."
Reflection: "This analysis is fairly straightforward given the clear data trends. However, correlating it with specific events requires external knowledge about industrial history. I am confident about the general trend, but a more detailed analysis would require further data."
"""

@ -163,7 +163,7 @@ class Agent:
id: str = agent_id,
llm: Any = None,
template: Optional[str] = None,
max_loops=5,
max_loops: int = 1,
stopping_condition: Optional[Callable[[str], bool]] = None,
loop_interval: int = 1,
retry_attempts: int = 3,
@ -194,6 +194,7 @@ class Agent:
preset_stopping_token: Optional[bool] = False,
traceback: Any = None,
traceback_handlers: Any = None,
streaming_on: Optional[bool] = False,
*args,
**kwargs: Any,
):
@ -236,6 +237,7 @@ class Agent:
self.preset_stopping_token = preset_stopping_token
self.traceback = traceback
self.traceback_handlers = traceback_handlers
self.streaming_on = streaming_on
# self.system_prompt = AGENT_SYSTEM_PROMPT_3
@ -489,7 +491,6 @@ class Agent:
Interactive: {self.interactive}
Dashboard: {self.dashboard}
Dynamic Temperature: {self.dynamic_temperature_enabled}
Temperature: {self.llm.model_kwargs.get('temperature')}
Autosave: {self.autosave}
Saved State: {self.saved_state_path}
Model Configuration: {model_config}
@ -547,6 +548,15 @@ class Agent:
print(colored(f"\nLoop {loop_count} of {max_loops}", "cyan"))
print("\n")
def streaming(self, content: str = None):
"""prints each chunk of content as it is generated
Args:
content (str, optional): _description_. Defaults to None.
"""
for chunk in content:
print(chunk, end="")
def _history(self, user_name: str, task: str) -> str:
"""Generate the history for the history prompt
@ -720,7 +730,11 @@ class Agent:
raise
def _run(self, **kwargs: Any) -> str:
"""Generate a result using the provided keyword args."""
"""Run the agent on a task
Returns:
str: _description_
"""
try:
task = self.format_prompt(**kwargs)
response, history = self._generate(task, task)

@ -1,5 +1,433 @@
"""
Base Structure for all Swarm Structures
import json
import os
from abc import ABC, abstractmethod
from typing import Optional, Any, Dict, List
from datetime import datetime
import asyncio
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
import psutil
try:
import gzip
except ImportError as error:
print(f"Error importing gzip: {error}")
"""
class BaseStructure(ABC):
"""Base structure.
Attributes:
name (Optional[str]): _description_
description (Optional[str]): _description_
save_metadata (bool): _description_
save_artifact_path (Optional[str]): _description_
save_metadata_path (Optional[str]): _description_
save_error_path (Optional[str]): _description_
Methods:
run: _description_
save_to_file: _description_
load_from_file: _description_
save_metadata: _description_
load_metadata: _description_
log_error: _description_
save_artifact: _description_
load_artifact: _description_
log_event: _description_
run_async: _description_
save_metadata_async: _description_
load_metadata_async: _description_
log_error_async: _description_
save_artifact_async: _description_
load_artifact_async: _description_
log_event_async: _description_
asave_to_file: _description_
aload_from_file: _description_
run_in_thread: _description_
save_metadata_in_thread: _description_
run_concurrent: _description_
compress_data: _description_
decompres_data: _description_
run_batched: _description_
load_config: _description_
backup_data: _description_
monitor_resources: _description_
run_with_resources: _description_
run_with_resources_batched: _description_
Examples:
"""
def __init__(
self,
name: Optional[str] = None,
description: Optional[str] = None,
save_metadata: bool = True,
save_artifact_path: Optional[str] = "./artifacts",
save_metadata_path: Optional[str] = "./metadata",
save_error_path: Optional[str] = "./errors",
*args,
**kwargs,
):
self.name = name
self.description = description
self.save_metadata = save_metadata
self.save_artifact_path = save_artifact_path
self.save_metadata_path = save_metadata_path
self.save_error_path = save_error_path
@abstractmethod
def run(self, *args, **kwargs):
"""Run the structure."""
pass
def save_to_file(self, data: Any, file_path: str):
"""Save data to file.
Args:
data (Any): _description_
file_path (str): _description_
"""
with open(file_path, "w") as file:
json.dump(data, file)
def load_from_file(self, file_path: str) -> Any:
"""Load data from file.
Args:
file_path (str): _description_
Returns:
Any: _description_
"""
with open(file_path, "r") as file:
return json.load(file)
def save_metadata(self, metadata: Dict[str, Any]):
"""Save metadata to file.
Args:
metadata (Dict[str, Any]): _description_
"""
if self.save_metadata:
file_path = os.path.join(
self.save_metadata_path, f"{self.name}_metadata.json"
)
self.save_to_file(metadata, file_path)
def load_metadata(self) -> Dict[str, Any]:
"""Load metadata from file.
Returns:
Dict[str, Any]: _description_
"""
file_path = os.path.join(
self.save_metadata_path, f"{self.name}_metadata.json"
)
return self.load_from_file(file_path)
def log_error(self, error_message: str):
"""Log error to file.
Args:
error_message (str): _description_
"""
file_path = os.path.join(
self.save_error_path, f"{self.name}_errors.log"
)
with open(file_path, "a") as file:
file.write(f"{error_message}\n")
def save_artifact(self, artifact: Any, artifact_name: str):
"""Save artifact to file.
Args:
artifact (Any): _description_
artifact_name (str): _description_
"""
file_path = os.path.join(
self.save_artifact_path, f"{artifact_name}.json"
)
self.save_to_file(artifact, file_path)
def load_artifact(self, artifact_name: str) -> Any:
"""Load artifact from file.
Args:
artifact_name (str): _description_
Returns:
Any: _description_
"""
file_path = os.path.join(
self.save_artifact_path, f"{artifact_name}.json"
)
return self.load_from_file(file_path)
def _current_timestamp(self):
"""Current timestamp.
Returns:
_type_: _description_
"""
return datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def log_event(
self,
event: str,
event_type: str = "INFO",
):
"""Log event to file.
Args:
event (str): _description_
event_type (str, optional): _description_. Defaults to "INFO".
"""
timestamp = self._current_timestamp()
log_message = f"[{timestamp}] [{event_type}] {event}\n"
file = os.path.join(
self.save_metadata_path, f"{self.name}_events.log"
)
with open(file, "a") as file:
file.write(log_message)
async def run_async(self, *args, **kwargs):
"""Run the structure asynchronously."""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None, self.run, *args, **kwargs
)
async def save_metadata_async(self, metadata: Dict[str, Any]):
"""Save metadata to file asynchronously.
Args:
metadata (Dict[str, Any]): _description_
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None, self.save_metadata, metadata
)
async def load_metadata_async(self) -> Dict[str, Any]:
"""Load metadata from file asynchronously.
Returns:
Dict[str, Any]: _description_
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, self.load_metadata)
async def log_error_async(self, error_message: str):
"""Log error to file asynchronously.
Args:
error_message (str): _description_
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None, self.log_error, error_message
)
async def save_artifact_async(
self, artifact: Any, artifact_name: str
):
"""Save artifact to file asynchronously.
Args:
artifact (Any): _description_
artifact_name (str): _description_
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None, self.save_artifact, artifact, artifact_name
)
async def load_artifact_async(self, artifact_name: str) -> Any:
"""Load artifact from file asynchronously.
Args:
artifact_name (str): _description_
Returns:
Any: _description_
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None, self.load_artifact, artifact_name
)
async def log_event_async(
self,
event: str,
event_type: str = "INFO",
):
"""Log event to file asynchronously.
Args:
event (str): _description_
event_type (str, optional): _description_. Defaults to "INFO".
"""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(
None, self.log_event, event, event_type
)
async def asave_to_file(
self, data: Any, file: str, *args, **kwargs
):
"""Save data to file asynchronously.
Args:
data (Any): _description_
file (str): _description_
"""
await asyncio.to_thread(
self.save_to_file,
data,
file,
*args,
)
async def aload_from_file(
self,
file: str,
) -> Any:
"""Async load data from file.
Args:
file (str): _description_
Returns:
Any: _description_
"""
return await asyncio.to_thread(self.load_from_file, file)
def run_in_thread(self, *args, **kwargs):
"""Run the structure in a thread."""
with concurrent.futures.ThreadPoolExecutor() as executor:
return executor.submit(self.run, *args, **kwargs)
def save_metadata_in_thread(self, metadata: Dict[str, Any]):
"""Save metadata to file in a thread.
Args:
metadata (Dict[str, Any]): _description_
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
return executor.submit(self.save_metadata, metadata)
def run_concurrent(self, *args, **kwargs):
"""Run the structure concurrently."""
return asyncio.run(self.run_async(*args, **kwargs))
def compress_data(
self,
data: Any,
) -> bytes:
"""Compress data.
Args:
data (Any): _description_
Returns:
bytes: _description_
"""
return gzip.compress(json.dumps(data).encode())
def decompres_data(self, data: bytes) -> Any:
"""Decompress data.
Args:
data (bytes): _description_
Returns:
Any: _description_
"""
return json.loads(gzip.decompress(data).decode())
def run_batched(
self,
batched_data: List[Any],
batch_size: int = 10,
*args,
**kwargs,
):
"""Run batched data.
Args:
batched_data (List[Any]): _description_
batch_size (int, optional): _description_. Defaults to 10.
Returns:
_type_: _description_
"""
with ThreadPoolExecutor(max_workers=batch_size) as executor:
futures = [
executor.submit(self.run, data)
for data in batched_data
]
return [future.result() for future in futures]
def load_config(
self, config: str = None, *args, **kwargs
) -> Dict[str, Any]:
"""Load config from file.
Args:
config (str, optional): _description_. Defaults to None.
Returns:
Dict[str, Any]: _description_
"""
return self.load_from_file(config)
def backup_data(
self, data: Any, backup_path: str = None, *args, **kwargs
):
"""Backup data to file.
Args:
data (Any): _description_
backup_path (str, optional): _description_. Defaults to None.
"""
timestamp = self._current_timestamp()
backup_file_path = f"{backup_path}/{timestamp}.json"
self.save_to_file(data, backup_file_path)
def monitor_resources(self):
"""Monitor resource usage."""
memory = psutil.virtual_memory().percent
cpu_usage = psutil.cpu_percent(interval=1)
self.log_event(
f"Resource usage - Memory: {memory}%, CPU: {cpu_usage}%"
)
def run_with_resources(self, *args, **kwargs):
"""Run the structure with resource monitoring."""
self.monitor_resources()
return self.run(*args, **kwargs)
def run_with_resources_batched(
self,
batched_data: List[Any],
batch_size: int = 10,
*args,
**kwargs,
):
"""Run batched data with resource monitoring.
Args:
batched_data (List[Any]): _description_
batch_size (int, optional): _description_. Defaults to 10.
Returns:
_type_: _description_
"""
self.monitor_resources()
return self.run_batched(
batched_data, batch_size, *args, **kwargs
)

@ -1,24 +1,8 @@
"""
Paid
# TODO: Pass in abstract LLM class that can utilize Hf or Anthropic models, Move away from OPENAI
# TODO: ADD Universal Communication Layer, a ocean vectorstore instance
# TODO: BE MORE EXPLICIT ON TOOL USE, TASK DECOMPOSITION AND TASK COMPLETETION AND ALLOCATION
# TODO: Add RLHF Data collection, ask user how the swarm is performing
# TODO: Create an onboarding process if not settings are preconfigured like `from swarms import Swarm, Swarm()` => then initiate onboarding name your swarm + provide purpose + etc
"""
import asyncio
import concurrent.futures
import logging
import time
from abc import ABC, abstractmethod
from abc import ABC
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Any, Callable, Dict, List, Optional
from termcolor import colored
from swarms.structs.agent import Agent

@ -0,0 +1,68 @@
from typing import Dict, Any, List
from swarms.structs.agent import Agent
# Helper functions for manager/corporate agents
def parse_tasks(
task: str = None,
) -> Dict[str, Any]:
"""Parse tasks
Args:
task (str, optional): _description_. Defaults to None.
Returns:
Dict[str, Any]: _description_
"""
tasks = {}
for line in task.split("\n"):
if line.startswith("<agent_id>") and line.endwith(
"</agent_id>"
):
agent_id, task = line[10:-11].split("><")
tasks[agent_id] = task
return tasks
def find_agent_by_id(
agent_id: str = None, agents: List[Agent] = None, *args, **kwargs
) -> Agent:
"""Find agent by id
Args:
agent_id (str, optional): _description_. Defaults to None.
agents (List[Agent], optional): _description_. Defaults to None.
Returns:
Agent: _description_
"""
for agent in agents:
if agent.id == agent_id:
return agent
return None
def distribute_tasks(
task: str = None, agents: List[Agent] = None, *args, **kwargs
):
"""Distribute tasks to agents
Args:
task (str, optional): _description_. Defaults to None.
agents (List[Agent], optional): _description_. Defaults to None.
"""
# Parse the task to extract tasks and agent id
tasks = parse_tasks(task)
# Distribute tasks to agents
for agent_id, task in tasks.item():
assigned_agent = find_agent_by_id(agent_id, agents)
if assigned_agent:
print(f"Assigning task {task} to agent {agent_id}")
output = assigned_agent.run(task, *args, **kwargs)
print(f"Output from agent {agent_id}: {output}")
else:
print(
f"No agent found with ID {agent_id}. Task '{task}' is"
" not assigned."
)

@ -0,0 +1,19 @@
from swarms.telemetry.log_all import log_all_calls, log_calls
# from swarms.telemetry.posthog_utils import log_activity_posthog
from swarms.telemetry.user_utils import (
generate_user_id,
get_machine_id,
get_system_info,
generate_unique_identifier,
)
__all__ = [
"log_all_calls",
"log_calls",
# "log_activity_posthog",
"generate_user_id",
"get_machine_id",
"get_system_info",
"generate_unique_identifier",
]

@ -0,0 +1,33 @@
import logging
import types
# Set up logging
logging.basicConfig(level=logging.INFO)
# Log all calls to functions in this module
def log_all_calls(module):
"""
Decorate all functions of a module to log calls to them.
"""
for name, obj in vars(module).items():
if isinstance(obj, types.FunctionType):
setattr(module, name, log_calls(obj))
# Log all calls to a function
def log_calls(func):
"""
Decorate a function to log calls to it.
"""
def wrapper(*args, **kwargs):
logging.info(
f"Calling function {func.__name__} with args {args} and"
f" kwargs {kwargs}"
)
result = func(*args, **kwargs)
logging.info(f"Function {func.__name__} returned {result}")
return result
return wrapper

@ -0,0 +1,69 @@
import functools
import os
from dotenv import load_dotenv
from posthog import Posthog
from swarms.telemetry.user_utils import generate_unique_identifier
# Load environment variables
load_dotenv()
# # Initialize Posthog client
api_key = os.getenv("POSTHOG_API_KEY") or None
host = os.getenv("POSTHOG_HOST") or None
posthog = Posthog(api_key, host=host)
posthog.debug = True
# return posthog
def log_activity_posthog(event_name: str, **event_properties):
"""Log activity to Posthog.
Args:
event_name (str): Name of the event to log.
**event_properties: Properties of the event to log.
Examples:
>>> from swarms.telemetry.posthog_utils import log_activity_posthog
>>> @log_activity_posthog("test_event", test_property="test_value")
... def test_function():
... print("Hello, world!")
>>> test_function()
Hello, world!
>>> # Check Posthog dashboard for event "test_event" with property
>>> # "test_property" set to "test_value".
"""
def decorator_log_activity(func):
@functools.wraps(func)
def wrapper_log_activity(*args, **kwargs):
result = func(*args, **kwargs)
# Assuming you have a way to get the user id
distinct_user_id = generate_unique_identifier()
# Capture the event
posthog.capture(
distinct_user_id, event_name, event_properties
)
return result
return wrapper_log_activity
return decorator_log_activity
# @log_activity_posthog(
# "function_executed", function_name="my_function"
# )
# def my_function():
# # Function logic here
# return "Function executed successfully!"
# out = my_function()
# print(out)

@ -0,0 +1,63 @@
import hashlib
import platform
import uuid
import socket
# Helper functions
def generate_user_id():
"""Generate user id
Returns:
_type_: _description_
"""
return str(uuid.uuid4())
def get_machine_id():
"""Get machine id
Returns:
_type_: _description_
"""
raw_id = platform.node()
hashed_id = hashlib.sha256(raw_id.encode()).hexdigest()
return hashed_id
def get_system_info():
"""
Gathers basic system information.
Returns:
dict: A dictionary containing system-related information.
"""
info = {
"platform": platform.system(),
"platform_release": platform.release(),
"platform_version": platform.version(),
"architecture": platform.machine(),
"hostname": socket.gethostname(),
"ip_address": socket.gethostbyname(socket.gethostname()),
"mac_address": ":".join(
[
"{:02x}".format((uuid.getnode() >> elements) & 0xFF)
for elements in range(0, 2 * 6, 8)
][::-1]
),
"processor": platform.processor(),
"python_version": platform.python_version(),
}
return info
def generate_unique_identifier():
"""Generate unique identifier
Returns:
str: unique id
"""
system_info = get_system_info()
unique_id = uuid.uuid5(uuid.NAMESPACE_DNS, str(system_info))
return str(unique_id)

@ -0,0 +1,313 @@
import pytest
from unittest.mock import patch, Mock
from swarms.models.gemini import Gemini
# Define test fixtures
@pytest.fixture
def mock_gemini_api_key(monkeypatch):
monkeypatch.setenv("GEMINI_API_KEY", "mocked-api-key")
@pytest.fixture
def mock_genai_model():
return Mock()
# Test initialization of Gemini
def test_gemini_init_defaults(mock_gemini_api_key, mock_genai_model):
model = Gemini()
assert model.model_name == "gemini-pro"
assert model.gemini_api_key == "mocked-api-key"
assert model.model is mock_genai_model
def test_gemini_init_custom_params(
mock_gemini_api_key, mock_genai_model
):
model = Gemini(
model_name="custom-model", gemini_api_key="custom-api-key"
)
assert model.model_name == "custom-model"
assert model.gemini_api_key == "custom-api-key"
assert model.model is mock_genai_model
# Test Gemini run method
@patch("swarms.models.gemini.Gemini.process_img")
@patch("swarms.models.gemini.genai.GenerativeModel.generate_content")
def test_gemini_run_with_img(
mock_generate_content,
mock_process_img,
mock_gemini_api_key,
mock_genai_model,
):
model = Gemini()
task = "A cat"
img = "cat.png"
response_mock = Mock(text="Generated response")
mock_generate_content.return_value = response_mock
mock_process_img.return_value = "Processed image"
response = model.run(task=task, img=img)
assert response == "Generated response"
mock_generate_content.assert_called_with(
content=[task, "Processed image"]
)
mock_process_img.assert_called_with(img=img)
@patch("swarms.models.gemini.genai.GenerativeModel.generate_content")
def test_gemini_run_without_img(
mock_generate_content, mock_gemini_api_key, mock_genai_model
):
model = Gemini()
task = "A cat"
response_mock = Mock(text="Generated response")
mock_generate_content.return_value = response_mock
response = model.run(task=task)
assert response == "Generated response"
mock_generate_content.assert_called_with(task=task)
@patch("swarms.models.gemini.genai.GenerativeModel.generate_content")
def test_gemini_run_exception(
mock_generate_content, mock_gemini_api_key, mock_genai_model
):
model = Gemini()
task = "A cat"
mock_generate_content.side_effect = Exception("Test exception")
response = model.run(task=task)
assert response is None
# Test Gemini process_img method
def test_gemini_process_img(mock_gemini_api_key, mock_genai_model):
model = Gemini(gemini_api_key="custom-api-key")
img = "cat.png"
img_data = b"Mocked image data"
with patch("builtins.open", create=True) as open_mock:
open_mock.return_value.__enter__.return_value.read.return_value = (
img_data
)
processed_img = model.process_img(img)
assert processed_img == [
{"mime_type": "image/png", "data": img_data}
]
open_mock.assert_called_with(img, "rb")
# Test Gemini initialization with missing API key
def test_gemini_init_missing_api_key():
with pytest.raises(
ValueError, match="Please provide a Gemini API key"
):
model = Gemini(gemini_api_key=None)
# Test Gemini initialization with missing model name
def test_gemini_init_missing_model_name():
with pytest.raises(
ValueError, match="Please provide a model name"
):
model = Gemini(model_name=None)
# Test Gemini run method with empty task
def test_gemini_run_empty_task(mock_gemini_api_key, mock_genai_model):
model = Gemini()
task = ""
response = model.run(task=task)
assert response is None
# Test Gemini run method with empty image
def test_gemini_run_empty_img(mock_gemini_api_key, mock_genai_model):
model = Gemini()
task = "A cat"
img = ""
response = model.run(task=task, img=img)
assert response is None
# Test Gemini process_img method with missing image
def test_gemini_process_img_missing_image(
mock_gemini_api_key, mock_genai_model
):
model = Gemini()
img = None
with pytest.raises(
ValueError, match="Please provide an image to process"
):
model.process_img(img=img)
# Test Gemini process_img method with missing image type
def test_gemini_process_img_missing_image_type(
mock_gemini_api_key, mock_genai_model
):
model = Gemini()
img = "cat.png"
with pytest.raises(
ValueError, match="Please provide the image type"
):
model.process_img(img=img, type=None)
# Test Gemini process_img method with missing Gemini API key
def test_gemini_process_img_missing_api_key(mock_genai_model):
model = Gemini(gemini_api_key=None)
img = "cat.png"
with pytest.raises(
ValueError, match="Please provide a Gemini API key"
):
model.process_img(img=img, type="image/png")
# Test Gemini run method with mocked image processing
@patch("swarms.models.gemini.genai.GenerativeModel.generate_content")
@patch("swarms.models.gemini.Gemini.process_img")
def test_gemini_run_mock_img_processing(
mock_process_img,
mock_generate_content,
mock_gemini_api_key,
mock_genai_model,
):
model = Gemini()
task = "A cat"
img = "cat.png"
response_mock = Mock(text="Generated response")
mock_generate_content.return_value = response_mock
mock_process_img.return_value = "Processed image"
response = model.run(task=task, img=img)
assert response == "Generated response"
mock_generate_content.assert_called_with(
content=[task, "Processed image"]
)
mock_process_img.assert_called_with(img=img)
# Test Gemini run method with mocked image processing and exception
@patch("swarms.models.gemini.Gemini.process_img")
@patch("swarms.models.gemini.genai.GenerativeModel.generate_content")
def test_gemini_run_mock_img_processing_exception(
mock_generate_content,
mock_process_img,
mock_gemini_api_key,
mock_genai_model,
):
model = Gemini()
task = "A cat"
img = "cat.png"
mock_process_img.side_effect = Exception("Test exception")
response = model.run(task=task, img=img)
assert response is None
mock_generate_content.assert_not_called()
mock_process_img.assert_called_with(img=img)
# Test Gemini run method with mocked image processing and different exception
@patch("swarms.models.gemini.Gemini.process_img")
@patch("swarms.models.gemini.genai.GenerativeModel.generate_content")
def test_gemini_run_mock_img_processing_different_exception(
mock_generate_content,
mock_process_img,
mock_gemini_api_key,
mock_genai_model,
):
model = Gemini()
task = "A dog"
img = "dog.png"
mock_process_img.side_effect = ValueError("Test exception")
with pytest.raises(ValueError):
model.run(task=task, img=img)
mock_generate_content.assert_not_called()
mock_process_img.assert_called_with(img=img)
# Test Gemini run method with mocked image processing and no exception
@patch("swarms.models.gemini.Gemini.process_img")
@patch("swarms.models.gemini.genai.GenerativeModel.generate_content")
def test_gemini_run_mock_img_processing_no_exception(
mock_generate_content,
mock_process_img,
mock_gemini_api_key,
mock_genai_model,
):
model = Gemini()
task = "A bird"
img = "bird.png"
mock_generate_content.return_value = "A bird is flying"
response = model.run(task=task, img=img)
assert response == "A bird is flying"
mock_generate_content.assert_called_once()
mock_process_img.assert_called_with(img=img)
# Test Gemini chat method
@patch("swarms.models.gemini.Gemini.chat")
def test_gemini_chat(mock_chat):
model = Gemini()
mock_chat.return_value = "Hello, Gemini!"
response = model.chat("Hello, Gemini!")
assert response == "Hello, Gemini!"
mock_chat.assert_called_once()
# Test Gemini list_models method
@patch("swarms.models.gemini.Gemini.list_models")
def test_gemini_list_models(mock_list_models):
model = Gemini()
mock_list_models.return_value = ["model1", "model2"]
response = model.list_models()
assert response == ["model1", "model2"]
mock_list_models.assert_called_once()
# Test Gemini stream_tokens method
@patch("swarms.models.gemini.Gemini.stream_tokens")
def test_gemini_stream_tokens(mock_stream_tokens):
model = Gemini()
mock_stream_tokens.return_value = ["token1", "token2"]
response = model.stream_tokens()
assert response == ["token1", "token2"]
mock_stream_tokens.assert_called_once()
# Test Gemini process_img_pil method
@patch("swarms.models.gemini.Gemini.process_img_pil")
def test_gemini_process_img_pil(mock_process_img_pil):
model = Gemini()
img = "bird.png"
mock_process_img_pil.return_value = "processed image"
response = model.process_img_pil(img)
assert response == "processed image"
mock_process_img_pil.assert_called_with(img)
# Repeat the above tests for different scenarios or different methods in your Gemini class
# until you have 15 tests in total.

@ -0,0 +1,144 @@
import os
import requests
import pytest
from unittest.mock import patch, Mock
from swarms.models.together import TogetherModel
import logging
@pytest.fixture
def mock_api_key(monkeypatch):
monkeypatch.setenv("TOGETHER_API_KEY", "mocked-api-key")
def test_init_defaults():
model = TogetherModel()
assert model.together_api_key == "mocked-api-key"
assert model.logging_enabled is False
assert model.model_name == "mistralai/Mixtral-8x7B-Instruct-v0.1"
assert model.max_workers == 10
assert model.max_tokens == 300
assert model.api_endpoint == "https://api.together.xyz"
assert model.beautify is False
assert model.streaming_enabled is False
assert model.meta_prompt is False
assert model.system_prompt is None
def test_init_custom_params(mock_api_key):
model = TogetherModel(
together_api_key="custom-api-key",
logging_enabled=True,
model_name="custom-model",
max_workers=5,
max_tokens=500,
api_endpoint="https://custom-api.together.xyz",
beautify=True,
streaming_enabled=True,
meta_prompt="meta-prompt",
system_prompt="system-prompt",
)
assert model.together_api_key == "custom-api-key"
assert model.logging_enabled is True
assert model.model_name == "custom-model"
assert model.max_workers == 5
assert model.max_tokens == 500
assert model.api_endpoint == "https://custom-api.together.xyz"
assert model.beautify is True
assert model.streaming_enabled is True
assert model.meta_prompt == "meta-prompt"
assert model.system_prompt == "system-prompt"
@patch("swarms.models.together_model.requests.post")
def test_run_success(mock_post, mock_api_key):
mock_response = Mock()
mock_response.json.return_value = {
"choices": [{"message": {"content": "Generated response"}}]
}
mock_post.return_value = mock_response
model = TogetherModel()
task = "What is the color of the object?"
response = model.run(task)
assert response == "Generated response"
@patch("swarms.models.together_model.requests.post")
def test_run_failure(mock_post, mock_api_key):
mock_post.side_effect = requests.exceptions.RequestException(
"Request failed"
)
model = TogetherModel()
task = "What is the color of the object?"
response = model.run(task)
assert response is None
def test_run_with_logging_enabled(caplog, mock_api_key):
model = TogetherModel(logging_enabled=True)
task = "What is the color of the object?"
with caplog.at_level(logging.DEBUG):
model.run(task)
assert "Sending request to" in caplog.text
@pytest.mark.parametrize(
"invalid_input", [None, 123, ["list", "of", "items"]]
)
def test_invalid_task_input(invalid_input, mock_api_key):
model = TogetherModel()
response = model.run(invalid_input)
assert response is None
@patch("swarms.models.together_model.requests.post")
def test_run_streaming_enabled(mock_post, mock_api_key):
mock_response = Mock()
mock_response.json.return_value = {
"choices": [{"message": {"content": "Generated response"}}]
}
mock_post.return_value = mock_response
model = TogetherModel(streaming_enabled=True)
task = "What is the color of the object?"
response = model.run(task)
assert response == "Generated response"
@patch("swarms.models.together_model.requests.post")
def test_run_empty_choices(mock_post, mock_api_key):
mock_response = Mock()
mock_response.json.return_value = {"choices": []}
mock_post.return_value = mock_response
model = TogetherModel()
task = "What is the color of the object?"
response = model.run(task)
assert response is None
@patch("swarms.models.together_model.requests.post")
def test_run_with_exception(mock_post, mock_api_key):
mock_post.side_effect = Exception("Test exception")
model = TogetherModel()
task = "What is the color of the object?"
response = model.run(task)
assert response is None
def test_init_logging_disabled(monkeypatch):
monkeypatch.setenv("TOGETHER_API_KEY", "mocked-api-key")
model = TogetherModel()
assert model.logging_enabled is False
assert not model.system_prompt

@ -0,0 +1,287 @@
import pytest
import os
from datetime import datetime
from swarms.swarms.base import BaseStructure
class TestBaseStructure:
def test_init(self):
base_structure = BaseStructure(
name="TestStructure",
description="Test description",
save_metadata=True,
save_artifact_path="./test_artifacts",
save_metadata_path="./test_metadata",
save_error_path="./test_errors",
)
assert base_structure.name == "TestStructure"
assert base_structure.description == "Test description"
assert base_structure.save_metadata is True
assert base_structure.save_artifact_path == "./test_artifacts"
assert base_structure.save_metadata_path == "./test_metadata"
assert base_structure.save_error_path == "./test_errors"
def test_save_to_file_and_load_from_file(self, tmpdir):
tmp_dir = tmpdir.mkdir("test_dir")
file_path = os.path.join(tmp_dir, "test_file.json")
data_to_save = {"key": "value"}
base_structure = BaseStructure()
base_structure.save_to_file(data_to_save, file_path)
loaded_data = base_structure.load_from_file(file_path)
assert loaded_data == data_to_save
def test_save_metadata_and_load_metadata(self, tmpdir):
tmp_dir = tmpdir.mkdir("test_dir")
base_structure = BaseStructure(save_metadata_path=tmp_dir)
metadata = {"name": "Test", "description": "Test metadata"}
base_structure.save_metadata(metadata)
loaded_metadata = base_structure.load_metadata()
assert loaded_metadata == metadata
def test_log_error(self, tmpdir):
tmp_dir = tmpdir.mkdir("test_dir")
base_structure = BaseStructure(save_error_path=tmp_dir)
error_message = "Test error message"
base_structure.log_error(error_message)
log_file = os.path.join(tmp_dir, "TestStructure_errors.log")
with open(log_file, "r") as file:
lines = file.readlines()
assert len(lines) == 1
assert lines[0] == f"{error_message}\n"
def test_save_artifact_and_load_artifact(self, tmpdir):
tmp_dir = tmpdir.mkdir("test_dir")
base_structure = BaseStructure(save_artifact_path=tmp_dir)
artifact = {"key": "value"}
artifact_name = "test_artifact"
base_structure.save_artifact(artifact, artifact_name)
loaded_artifact = base_structure.load_artifact(artifact_name)
assert loaded_artifact == artifact
def test_current_timestamp(self):
base_structure = BaseStructure()
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
timestamp = base_structure._current_timestamp()
assert timestamp == current_time
def test_log_event(self, tmpdir):
tmp_dir = tmpdir.mkdir("test_dir")
base_structure = BaseStructure(save_metadata_path=tmp_dir)
event = "Test event"
event_type = "INFO"
base_structure.log_event(event, event_type)
log_file = os.path.join(tmp_dir, "TestStructure_events.log")
with open(log_file, "r") as file:
lines = file.readlines()
assert len(lines) == 1
assert (
lines[0]
== f"[{base_structure._current_timestamp()}]"
f" [{event_type}] {event}\n"
)
@pytest.mark.asyncio
async def test_run_async(self):
base_structure = BaseStructure()
async def async_function():
return "Async Test Result"
result = await base_structure.run_async(async_function)
assert result == "Async Test Result"
@pytest.mark.asyncio
async def test_save_metadata_async(self, tmpdir):
tmp_dir = tmpdir.mkdir("test_dir")
base_structure = BaseStructure(save_metadata_path=tmp_dir)
metadata = {"name": "Test", "description": "Test metadata"}
await base_structure.save_metadata_async(metadata)
loaded_metadata = base_structure.load_metadata()
assert loaded_metadata == metadata
@pytest.mark.asyncio
async def test_log_error_async(self, tmpdir):
tmp_dir = tmpdir.mkdir("test_dir")
base_structure = BaseStructure(save_error_path=tmp_dir)
error_message = "Test error message"
await base_structure.log_error_async(error_message)
log_file = os.path.join(tmp_dir, "TestStructure_errors.log")
with open(log_file, "r") as file:
lines = file.readlines()
assert len(lines) == 1
assert lines[0] == f"{error_message}\n"
@pytest.mark.asyncio
async def test_save_artifact_async(self, tmpdir):
tmp_dir = tmpdir.mkdir("test_dir")
base_structure = BaseStructure(save_artifact_path=tmp_dir)
artifact = {"key": "value"}
artifact_name = "test_artifact"
await base_structure.save_artifact_async(
artifact, artifact_name
)
loaded_artifact = base_structure.load_artifact(artifact_name)
assert loaded_artifact == artifact
@pytest.mark.asyncio
async def test_load_artifact_async(self, tmpdir):
tmp_dir = tmpdir.mkdir("test_dir")
base_structure = BaseStructure(save_artifact_path=tmp_dir)
artifact = {"key": "value"}
artifact_name = "test_artifact"
base_structure.save_artifact(artifact, artifact_name)
loaded_artifact = await base_structure.load_artifact_async(
artifact_name
)
assert loaded_artifact == artifact
@pytest.mark.asyncio
async def test_log_event_async(self, tmpdir):
tmp_dir = tmpdir.mkdir("test_dir")
base_structure = BaseStructure(save_metadata_path=tmp_dir)
event = "Test event"
event_type = "INFO"
await base_structure.log_event_async(event, event_type)
log_file = os.path.join(tmp_dir, "TestStructure_events.log")
with open(log_file, "r") as file:
lines = file.readlines()
assert len(lines) == 1
assert (
lines[0]
== f"[{base_structure._current_timestamp()}]"
f" [{event_type}] {event}\n"
)
@pytest.mark.asyncio
async def test_asave_to_file(self, tmpdir):
tmp_dir = tmpdir.mkdir("test_dir")
file_path = os.path.join(tmp_dir, "test_file.json")
data_to_save = {"key": "value"}
base_structure = BaseStructure()
await base_structure.asave_to_file(data_to_save, file_path)
loaded_data = base_structure.load_from_file(file_path)
assert loaded_data == data_to_save
@pytest.mark.asyncio
async def test_aload_from_file(self, tmpdir):
tmp_dir = tmpdir.mkdir("test_dir")
file_path = os.path.join(tmp_dir, "test_file.json")
data_to_save = {"key": "value"}
base_structure = BaseStructure()
base_structure.save_to_file(data_to_save, file_path)
loaded_data = await base_structure.aload_from_file(file_path)
assert loaded_data == data_to_save
def test_run_in_thread(self):
base_structure = BaseStructure()
result = base_structure.run_in_thread(
lambda: "Thread Test Result"
)
assert result.result() == "Thread Test Result"
def test_save_and_decompress_data(self):
base_structure = BaseStructure()
data = {"key": "value"}
compressed_data = base_structure.compress_data(data)
decompressed_data = base_structure.decompres_data(
compressed_data
)
assert decompressed_data == data
def test_run_batched(self):
base_structure = BaseStructure()
def run_function(data):
return f"Processed {data}"
batched_data = list(range(10))
result = base_structure.run_batched(
batched_data, batch_size=5, func=run_function
)
expected_result = [
f"Processed {data}" for data in batched_data
]
assert result == expected_result
def test_load_config(self, tmpdir):
tmp_dir = tmpdir.mkdir("test_dir")
config_file = os.path.join(tmp_dir, "config.json")
config_data = {"key": "value"}
base_structure = BaseStructure()
base_structure.save_to_file(config_data, config_file)
loaded_config = base_structure.load_config(config_file)
assert loaded_config == config_data
def test_backup_data(self, tmpdir):
tmp_dir = tmpdir.mkdir("test_dir")
base_structure = BaseStructure()
data_to_backup = {"key": "value"}
base_structure.backup_data(
data_to_backup, backup_path=tmp_dir
)
backup_files = os.listdir(tmp_dir)
assert len(backup_files) == 1
loaded_data = base_structure.load_from_file(
os.path.join(tmp_dir, backup_files[0])
)
assert loaded_data == data_to_backup
def test_monitor_resources(self):
base_structure = BaseStructure()
base_structure.monitor_resources()
def test_run_with_resources(self):
base_structure = BaseStructure()
def run_function():
base_structure.monitor_resources()
return "Resource Test Result"
result = base_structure.run_with_resources(run_function)
assert result == "Resource Test Result"
def test_run_with_resources_batched(self):
base_structure = BaseStructure()
def run_function(data):
base_structure.monitor_resources()
return f"Processed {data}"
batched_data = list(range(10))
result = base_structure.run_with_resources_batched(
batched_data, batch_size=5, func=run_function
)
expected_result = [
f"Processed {data}" for data in batched_data
]
assert result == expected_result

@ -0,0 +1,62 @@
from unittest.mock import Mock
import pytest
from swarms.telemetry.posthog_utils import (
log_activity_posthog,
posthog,
)
# Mock Posthog client
@pytest.fixture
def mock_posthog():
return Mock()
# Mock environment variables
@pytest.fixture
def mock_env(monkeypatch):
monkeypatch.setenv("POSTHOG_API_KEY", "test_api_key")
monkeypatch.setenv("POSTHOG_HOST", "test_host")
# Test the log_activity_posthog decorator
def test_log_activity_posthog(mock_posthog, mock_env):
event_name = "test_event"
event_properties = {"test_property": "test_value"}
# Create a test function with the decorator
@log_activity_posthog(event_name, **event_properties)
def test_function():
pass
# Call the test function
test_function()
# Check if the Posthog capture method was called with the expected arguments
mock_posthog.capture.assert_called_once_with(
"test_user_id", event_name, event_properties
)
# Test a scenario where environment variables are not set
def test_missing_env_variables(monkeypatch):
# Unset environment variables
monkeypatch.delenv("POSTHOG_API_KEY", raising=False)
monkeypatch.delenv("POSTHOG_HOST", raising=False)
# Create a test function with the decorator
@log_activity_posthog("test_event", test_property="test_value")
def test_function():
pass
# Ensure that calling the test function does not raise errors
test_function()
# Test the Posthog client initialization
def test_posthog_client_initialization(mock_env):
assert posthog.api_key == "test_api_key"
assert posthog.host == "test_host"
assert posthog.debug is True

@ -0,0 +1,87 @@
import uuid
from swarms.telemetry.user_utils import (
generate_unique_identifier,
generate_user_id,
get_machine_id,
get_system_info,
)
# Helper functions tests
def test_generate_user_id():
# Generate user IDs and ensure they are UUID strings
user_id = generate_user_id()
assert isinstance(user_id, str)
assert uuid.UUID(user_id, version=4)
def test_get_machine_id():
# Get machine ID and ensure it's a valid SHA-256 hash
machine_id = get_machine_id()
assert isinstance(machine_id, str)
assert len(machine_id) == 64
assert all(char in "0123456789abcdef" for char in machine_id)
def test_get_system_info():
# Get system information and ensure it's a dictionary with expected keys
system_info = get_system_info()
assert isinstance(system_info, dict)
expected_keys = [
"platform",
"platform_release",
"platform_version",
"architecture",
"hostname",
"ip_address",
"mac_address",
"processor",
"python_version",
]
assert all(key in system_info for key in expected_keys)
def test_generate_unique_identifier():
# Generate unique identifiers and ensure they are valid UUID strings
unique_id = generate_unique_identifier()
assert isinstance(unique_id, str)
assert uuid.UUID(
unique_id, version=5, namespace=uuid.NAMESPACE_DNS
)
def test_generate_user_id_edge_case():
# Test generate_user_id with multiple calls
user_ids = set()
for _ in range(100):
user_id = generate_user_id()
user_ids.add(user_id)
assert len(user_ids) == 100 # Ensure generated IDs are unique
def test_get_machine_id_edge_case():
# Test get_machine_id with multiple calls
machine_ids = set()
for _ in range(100):
machine_id = get_machine_id()
machine_ids.add(machine_id)
assert len(machine_ids) == 100 # Ensure generated IDs are unique
def test_get_system_info_edge_case():
# Test get_system_info for consistency
system_info1 = get_system_info()
system_info2 = get_system_info()
assert (
system_info1 == system_info2
) # Ensure system info remains the same
def test_generate_unique_identifier_edge_case():
# Test generate_unique_identifier for uniqueness
unique_ids = set()
for _ in range(100):
unique_id = generate_unique_identifier()
unique_ids.add(unique_id)
assert len(unique_ids) == 100 # Ensure generated IDs are unique
Loading…
Cancel
Save