Merge branch 'master' into elder-plinius-master

pull/307/head
Zack 1 year ago
commit e9b2800815

@ -0,0 +1,19 @@
---
name: Docker Image CI
on: # yamllint disable-line rule:truthy
push:
branches: ["master"]
pull_request:
branches: ["master"]
jobs:
build:
runs-on: ubuntu-latest
name: Build Docker image
steps:
- uses: actions/checkout@v4
- name: Build the Docker image
run: docker build . --file Dockerfile --tag my-image-name:$(date +%s)

@ -0,0 +1,98 @@
name: Docker
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
on:
schedule:
- cron: '31 19 * * *'
push:
branches: [ "master" ]
# Publish semver tags as releases.
tags: [ 'v*.*.*' ]
pull_request:
branches: [ "master" ]
env:
# Use docker.io for Docker Hub if empty
REGISTRY: ghcr.io
# github.repository as <account>/<repo>
IMAGE_NAME: ${{ github.repository }}
jobs:
build:
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
# This is used to complete the identity challenge
# with sigstore/fulcio when running outside of PRs.
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Install the cosign tool except on PR
# https://github.com/sigstore/cosign-installer
- name: Install cosign
if: github.event_name != 'pull_request'
uses: sigstore/cosign-installer@1fc5bd396d372bee37d608f955b336615edf79c8 #v3.2.0
with:
cosign-release: 'v2.1.1'
# Set up BuildKit Docker container builder to be able to build
# multi-platform images and export cache
# https://github.com/docker/setup-buildx-action
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0
# Login against a Docker registry except on PR
# https://github.com/docker/login-action
- name: Log into registry ${{ env.REGISTRY }}
if: github.event_name != 'pull_request'
uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Extract metadata (tags, labels) for Docker
# https://github.com/docker/metadata-action
- name: Extract Docker metadata
id: meta
uses: docker/metadata-action@31cebacef4805868f9ce9a0cb03ee36c32df2ac4 # v5.3.0
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
# Build and push Docker image with Buildx (don't push on PR)
# https://github.com/docker/build-push-action
- name: Build and push Docker image
id: build-and-push
uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0
with:
context: .
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
# Sign the resulting Docker image digest except on PRs.
# This will only write to the public Rekor transparency log when the Docker
# repository is public to avoid leaking data. If you would like to publish
# transparency data even for private images, pass --force to cosign below.
# https://github.com/sigstore/cosign
- name: Sign the published Docker image
if: ${{ github.event_name != 'pull_request' }}
env:
# https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions#using-an-intermediate-environment-variable
TAGS: ${{ steps.meta.outputs.tags }}
DIGEST: ${{ steps.build-and-push.outputs.digest }}
# This step uses the identity token to provision an ephemeral certificate
# against the sigstore community Fulcio instance.
run: echo "${TAGS}" | xargs -I {} cosign sign --yes {}@${DIGEST}

@ -0,0 +1,34 @@
name: Python Package using Conda
on: [push]
jobs:
build-linux:
runs-on: ubuntu-latest
strategy:
max-parallel: 5
steps:
- uses: actions/checkout@v4
- name: Set up Python 3.10
uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Add conda to system path
run: |
# $CONDA is an environment variable pointing to the root of the miniconda directory
echo $CONDA/bin >> $GITHUB_PATH
- name: Install dependencies
run: |
conda env update --file environment.yml --name base
- name: Lint with flake8
run: |
conda install flake8
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
- name: Test with pytest
run: |
conda install pytest
pytest

@ -60,8 +60,8 @@ llm = OpenAIChat(
agent = Agent(llm=llm, max_loops=1, autosave=True, dashboard=True)
# Run the workflow on a task
agent.run("Generate a 10,000 word blog on health and wellness.")
out = agent.run("Generate a 10,000 word blog on health and wellness.")
print(out)
```
@ -168,7 +168,8 @@ agent = Agent(
)
# Run the workflow on a task
agent.run(task=task, img=img)
out = agent.run(task=task, img=img)
print(out)
```
@ -198,7 +199,8 @@ llm = OpenAIChat(
agent = OmniModalAgent(llm)
agent.run("Generate a video of a swarm of fish and then make an image out of the video")
response = agent.run("Generate a video of a swarm of fish and then make an image out of the video")
print(response)
```
---

@ -169,7 +169,9 @@ class Gemini(BaseMultiModalModel):
- [Gemini GitHub Repository](https://github.com/swarms/gemini): Explore the Gemini repository for additional information, updates, and examples.
- [Google GenerativeAI Documentation](https://docs.google.com/document/d/1WZSBw6GsOhOCYm0ArydD_9uy6nPPA1KFIbKPhjj43hA): Dive deeper into the capabilities of the Google GenerativeAI package used by Gemini.
- [Google GenerativeAI
Documentation](https://docs.google.com/document/d/1WZSBw6GsOhOCYm0ArydD_9uy6nPPA1KFIbKPhjj43hA): Dive deeper into the capabilities of the Google GenerativeAI package used by Gemini.
- [Gemini API Documentation](https://gemini-api-docs.example.com): Access the official documentation for the Gemini API to explore advanced features and integrations.

@ -29,4 +29,5 @@ agent = Agent(
)
# Run the workflow on a task
agent.run("Generate a 10,000 word blog on health and wellness.")
out = agent.run("Generate a 10,000 word blog on health and wellness.")
print(out)

@ -105,9 +105,8 @@ nav:
- SequentialWorkflow: 'swarms/structs/sequential_workflow.md'
- swarms.memory:
- Weaviate: "swarms/memory/weaviate.md"
- PineconDB: "swarms/memory/pinecone.md"
- PineconeVectorStoreStore: "swarms/memory/pinecone.md"
- PGVectorStore: "swarms/memory/pg.md"
- ShortTermMemory: "swarms/memory/short_term_memory.md"
- swarms.utils:
- phoenix_trace_decorator: "swarms/utils/phoenix_tracer.md"
- Guides:

@ -1,18 +1,14 @@
import os
from dotenv import load_dotenv
from swarms.models.gemini import Gemini
load_dotenv()
api_key = os.environ["GEMINI_API_KEY"]
# Initialize the model
model = Gemini(gemini_api_key=api_key)
model = Gemini(
gemini_api_key="A",
)
# Establish the prompt and image
task = "What is your name"
img = "images/github-banner-swarms.png"
# Run the model
out = model.run("What is your name?", img=img)
out = model.run("What is your name?", img)
print(out)

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "2.9.2"
version = "2.8.0"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
@ -24,48 +24,47 @@ classifiers = [
[tool.poetry.dependencies]
python = "^3.6.1"
torch = "2.1.1"
transformers = "4.35.0"
transformers = "2.10"
openai = "0.28.0"
langchain = "0.0.333"
asyncio = "3.4.3"
einops = "0.7.0"
google-generativeai = "0.3.1"
langchain-experimental = "0.0.10"
playwright = "1.34.0"
weaviate-client = "3.25.3"
opencv-python-headless = "4.8.1.78"
faiss-cpu = "1.7.4"
backoff = "2.2.1"
marshmallow = "3.19.0"
datasets = "2.10.1"
langchain = "*"
asyncio = "*"
einops = "*"
google-generativeai = "0.3.0"
langchain-experimental = "*"
playwright = "*"
weaviate-client = "*"
opencv-python-headless = "*"
faiss-cpu = "*"
backoff = "*"
marshmallow = "*"
datasets = "*"
optimum = "1.15.0"
diffusers = "0.17.1"
PyPDF2 = "3.0.1"
accelerate = "0.22.0"
sentencepiece = "0.1.98"
wget = "3.2"
tensorflow = "2.14.0"
httpx = "0.24.1"
tiktoken = "0.4.0"
safetensors = "0.3.3"
attrs = "22.2.0"
ggl = "1.1.0"
ratelimit = "2.2.1"
beautifulsoup4 = "4.11.2"
cohere = "4.24"
huggingface-hub = "0.16.4"
diffusers = "*"
PyPDF2 = "*"
accelerate = "*"
sentencepiece = "*"
wget = "*"
tensorflow = "2.15.0"
httpx = "*"
tiktoken = "*"
safetensors = "*"
attrs = "*"
ggl = "*"
ratelimit = "*"
beautifulsoup4 = "*"
cohere = "*"
huggingface-hub = "*"
pydantic = "1.10.12"
tenacity = "8.2.2"
Pillow = "9.4.0"
chromadb = "0.4.14"
tabulate = "0.9.0"
termcolor = "2.2.0"
black = "23.3.0"
open_clip_torch = "2.20.0"
soundfile = "0.12.1"
torchvision = "0.16.1"
rich = "13.5.2"
tenacity = "*"
Pillow = "*"
chromadb = "*"
tabulate = "*"
termcolor = "*"
black = "*"
open_clip_torch = "*"
soundfile = "*"
torchvision = "*"
rich = "*"
[tool.poetry.group.lint.dependencies]
ruff = ">=0.0.249,<0.1.7"

@ -32,6 +32,39 @@ tensorflow==2.12.0
optimum
tiktoken==0.4.0
tabulate==0.9.0
transformers>2.10
pandas
langchain
nest_asyncio
langchain-experimental
playwright
wget==3.2
simpleaichat
httpx
open_clip_torch
ggl
beautifulsoup4
google-search-results==2.4.2
Pillow
faiss-cpu
openai==0.28.0
attrs
datasets
pydantic==1.10.12
soundfile
arize-phoenix
weaviate-client
huggingface-hub
google-generativeai
sentencepiece
PyPDF2
accelerate
vllm
chromadb
tensorflow
optimum
tiktoken
tabulate
colored
addict
backoff==2.2.1
@ -55,6 +88,14 @@ openai==0.28.0
opencv-python==4.7.0.72
prettytable==3.9.0
safetensors==0.3.3
safetensors
numpy
omegaconf
open_clip_torch
openai
opencv-python
prettytable
safetensors
test-tube
timm==0.6.13
torchmetrics
@ -69,3 +110,12 @@ mkdocs
mkdocs-material
mkdocs-glightbox
pre-commit==3.2.2
cohere
torchvision
rich
mkdocs
mkdocs-material
mkdocs-glightbox
pre-commit

@ -61,8 +61,6 @@ class ChromaDB:
openai_api_key: str = OPENAI_API_KEY,
top_results_num: int = 3,
limit_tokens: Optional[int] = 1000,
*args,
**kwargs,
):
self.metric = metric
self.RESULTS_STORE_NAME = RESULTS_STORE_NAME
@ -93,9 +91,7 @@ class ChromaDB:
embedding_function=embedding_function,
)
def add(
self, task: Dict, result: str, result_id: str, *args, **kwargs
):
def add(self, task: Dict, result: str, result_id: str):
"""Adds a result to the ChromaDB collection
Args:
@ -141,15 +137,16 @@ class ChromaDB:
"task": task["task_name"],
"result": result,
},
*args,
**kwargs,
)
except Exception as error:
print(
colored(f"Error adding to ChromaDB: {error}", "red")
)
def query(self, query: str, *args, **kwargs) -> List[dict]:
def query(
self,
query: str,
) -> List[dict]:
"""Queries the ChromaDB collection with a query for the top results
Args:
@ -167,8 +164,6 @@ class ChromaDB:
query_texts=query,
n_results=min(self.top_results_num, count),
include=["metadatas"],
*args,
**kwargs,
)
out = [item["task"] for item in results["metadatas"][0]]
out = limit_tokens_from_string(

@ -1,12 +1,12 @@
from typing import Optional
from swarms.memory.base_vectordb import VectorDatabase
from swarms.memory.base import BaseVectorStore
import pinecone
from attr import define, field
from swarms.utils.hash import str_to_hash
@define
class PineconDB(VectorDatabase):
class PineconeVectorStoreStore(BaseVectorStore):
"""
PineconDB is a vector storage driver that uses Pinecone as the underlying storage engine.
@ -110,17 +110,7 @@ class PineconDB(VectorDatabase):
meta: Optional[dict] = None,
**kwargs,
) -> str:
"""Add a vector to the index.
Args:
vector (list[float]): _description_
vector_id (Optional[str], optional): _description_. Defaults to None.
namespace (Optional[str], optional): _description_. Defaults to None.
meta (Optional[dict], optional): _description_. Defaults to None.
Returns:
str: _description_
"""
"""Upsert vector"""
vector_id = (
vector_id if vector_id else str_to_hash(str(vector))
)
@ -131,15 +121,31 @@ class PineconDB(VectorDatabase):
return vector_id
def load_entries(self, namespace: Optional[str] = None):
"""Load all entries from the index.
def load_entry(
self, vector_id: str, namespace: Optional[str] = None
) -> Optional[BaseVectorStore.Entry]:
"""Load entry"""
result = self.index.fetch(
ids=[vector_id], namespace=namespace
).to_dict()
vectors = list(result["vectors"].values())
Args:
namespace (Optional[str], optional): _description_. Defaults to None.
Returns:
_type_: _description_
"""
return BaseVectorStore.Entry(
id=vector["id"],
meta=vector["metadata"],
vector=vector["values"],
namespace=result["namespace"],
)
else:
return None
def load_entries(
self, namespace: Optional[str] = None
) -> list[BaseVectorStore.Entry]:
"""Load entries"""
# This is a hacky way to query up to 10,000 values from Pinecone. Waiting on an official API for fetching
# all values from a namespace:
# https://community.pinecone.io/t/is-there-a-way-to-query-all-the-vectors-and-or-metadata-from-a-namespace/797/5
@ -151,14 +157,15 @@ class PineconDB(VectorDatabase):
namespace=namespace,
)
for result in results["matches"]:
entry = {
"id": result["id"],
"vector": result["values"],
"meta": result["metadata"],
"namespace": result["namespace"],
}
return entry
return [
BaseVectorStore.Entry(
id=r["id"],
vector=r["values"],
meta=r["metadata"],
namespace=results["namespace"],
)
for r in results["matches"]
]
def query(
self,
@ -169,23 +176,16 @@ class PineconDB(VectorDatabase):
# PineconDBStorageDriver-specific params:
include_metadata=True,
**kwargs,
):
"""Query the index for vectors similar to the given query string.
Args:
query (str): _description_
count (Optional[int], optional): _description_. Defaults to None.
namespace (Optional[str], optional): _description_. Defaults to None.
include_vectors (bool, optional): _description_. Defaults to False.
include_metadata (bool, optional): _description_. Defaults to True.
Returns:
_type_: _description_
"""
) -> list[BaseVectorStore.QueryResult]:
"""Query vectors"""
vector = self.embedding_driver.embed_string(query)
params = {
"top_k": count,
"top_k": (
count
if count
else BaseVectorStore.DEFAULT_QUERY_COUNT
),
"namespace": namespace,
"include_values": include_vectors,
"include_metadata": include_metadata,
@ -193,22 +193,19 @@ class PineconDB(VectorDatabase):
results = self.index.query(vector, **params)
for r in results["matches"]:
entry = {
"id": results["id"],
"vector": results["values"],
"score": results["scores"],
"meta": results["metadata"],
"namespace": results["namespace"],
}
return entry
return [
BaseVectorStore.QueryResult(
id=r["id"],
vector=r["values"],
score=r["score"],
meta=r["metadata"],
namespace=results["namespace"],
)
for r in results["matches"]
]
def create_index(self, name: str, **kwargs) -> None:
"""Create a new index.
Args:
name (str): _description_
"""
"""Create index"""
params = {
"name": name,
"dimension": self.embedding_driver.dimensions,

@ -1,6 +1,5 @@
import subprocess
from typing import List
from httpx import RequestError
try:
@ -16,8 +15,8 @@ try:
from qdrant_client import QdrantClient
from qdrant_client.http.models import (
Distance,
PointStruct,
VectorParams,
PointStruct,
)
except ImportError:
print("Please install the qdrant-client package")
@ -92,7 +91,7 @@ class Qdrant:
)
print(f"Collection '{self.collection_name}' created.")
def add(self, docs: List[dict], *args, **kwargs):
def add_vectors(self, docs: List[dict]):
"""
Adds vector representations of documents to the Qdrant collection.
@ -129,15 +128,13 @@ class Qdrant:
collection_name=self.collection_name,
wait=True,
points=points,
*args,
**kwargs,
)
return operation_info
except Exception as e:
print(f"Error adding vectors: {e}")
return None
def query(self, query: str, limit: int = 3, *args, **kwargs):
def search_vectors(self, query: str, limit: int = 3):
"""
Searches the collection for vectors similar to the query vector.
@ -150,14 +147,12 @@ class Qdrant:
"""
try:
query_vector = self.model.encode(
query, normalize_embeddings=True, *args, **kwargs
query, normalize_embeddings=True
)
search_result = self.client.search(
collection_name=self.collection_name,
query_vector=query_vector,
limit=limit,
*args,
**kwargs,
)
return search_result
except Exception as e:

@ -31,7 +31,6 @@ from swarms.models.layoutlm_document_qa import (
) # noqa: E402
from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402
from swarms.models.openai_tts import OpenAITTS # noqa: E402
from swarms.models.gemini import Gemini # noqa: E402
# from swarms.models.gpt4v import GPT4Vision
# from swarms.models.dalle3 import Dalle3
# from swarms.models.distilled_whisperx import DistilWhisperModel # noqa: E402
@ -63,5 +62,4 @@ __all__ = [
"GPT4VisionAPI",
# "vLLM",
"OpenAITTS",
"Gemini",
]

@ -74,7 +74,7 @@ class Gemini(BaseMultiModalModel):
def __init__(
self,
model_name: str = "gemini-pro-vision",
gemini_api_key: str = get_gemini_api_key_env,
gemini_api_key: str = None,
return_safety: bool = False,
candidates: bool = False,
stream: bool = False,
@ -82,7 +82,6 @@ class Gemini(BaseMultiModalModel):
stop_sequence=["x"],
max_output_tokens: int = 100,
temperature: float = 0.9,
system_prompt: str = None,
*args,
**kwargs,
):
@ -96,7 +95,6 @@ class Gemini(BaseMultiModalModel):
self.stop_sequence = stop_sequence
self.max_output_tokens = max_output_tokens
self.temperature = temperature
self.system_prompt = system_prompt
# Prepare the generation config
self.generation_config = GenerationConfig(
@ -104,8 +102,6 @@ class Gemini(BaseMultiModalModel):
# stop_sequence=stop_sequence,
max_output_tokens=max_output_tokens,
temperature=temperature,
*args,
**kwargs,
)
# Initialize the model
@ -176,7 +172,7 @@ class Gemini(BaseMultiModalModel):
return response.text
else:
response = self.model.generate_content(
task, stream=self.stream, *args, **kwargs
task, *args, **kwargs
)
return response.text
except Exception as error:

@ -0,0 +1 @@
27dcfa74d334bc871f3234de431e71c6eeba5dd6

@ -0,0 +1 @@
../../blobs/2d74da6615135c58cf3cf9ad4cb11e7c613ff9e55fe658a47ab83b6c8d1174a9

@ -1,11 +1,12 @@
import subprocess
from typing import Optional, Tuple, List
from swarms.models.base_llm import AbstractLLM
try:
from vllm import LLM, SamplingParams
except ImportError as error:
print(f"[ERROR] [vLLM] {error}")
# subprocess.run(["pip", "install", "vllm"])
# raise error
subprocess.run(["pip", "install", "vllm"])
raise error

@ -3,8 +3,13 @@ from swarms.prompts.tools import (
DYNAMICAL_TOOL_USAGE,
)
<<<<<<< HEAD
# PLINIUS' PROMPTS
FLOW_SYSTEM_PROMPT = """
=======
# PROMPTS
FLOW_SYSTEM_PROMPT_v2 = """
>>>>>>> master
You are an elite autonomous agent operating within an autonomous loop structure.
Your primary function is to reliably complete user's tasks.
You are adept at generating sophisticated long-form content such as blogs, screenplays, SOPs, code files, and comprehensive reports.
@ -22,7 +27,11 @@ Take a deep breath.
"""
<<<<<<< HEAD
def autonomous_agent_prompt(
=======
def autonomous_agent_prompt_v2(
>>>>>>> master
tools_prompt: str = DYNAMICAL_TOOL_USAGE,
dynamic_stop_prompt: str = DYNAMIC_STOP_PROMPT,
agent_name: str = None,
@ -44,8 +53,13 @@ def autonomous_agent_prompt(
"""
<<<<<<< HEAD
def agent_system_prompt_2(name: str):
AGENT_SYSTEM_PROMPT_2 = f"""
=======
def agent_system_prompt_2_v2(name: str):
AGENT_SYSTEM_PROMPT_2_v2 = f"""
>>>>>>> master
You are {name}, an elite autonomous agent designed for unparalleled versatility and adaptability in an autonomous loop structure.
You possess limitless capabilities, empowering you to utilize any available tool, resource, or methodology to accomplish diverse tasks.
Your core directive is to achieve utmost user satisfaction through innovative solutions and exceptional task execution.
@ -63,10 +77,77 @@ def agent_system_prompt_2(name: str):
Take a deep breath.
"""
<<<<<<< HEAD
=======
return AGENT_SYSTEM_PROMPT_2_v2
# ORIGINAL PROMPTS
FLOW_SYSTEM_PROMPT = """
You are an autonomous agent granted autonomy in a autonomous loop structure.
Your role is to engage in multi-step conversations with your self or the user,
generate long-form content like blogs, screenplays, or SOPs,
and accomplish tasks bestowed by the user.
You can have internal dialogues with yourself or can interact with the user
to aid in these complex tasks. Your responses should be coherent, contextually relevant, and tailored to the task at hand.
"""
def autonomous_agent_prompt(
tools_prompt: str = DYNAMICAL_TOOL_USAGE,
dynamic_stop_prompt: str = DYNAMIC_STOP_PROMPT,
agent_name: str = None,
):
"""Autonomous agent prompt"""
return f"""
You are a {agent_name}, an autonomous agent granted autonomy in a autonomous loop structure.
Your purpose is to satisfy the user demands above expectations. For example, if the user asks you to generate a 10,000 word blog,
you should generate a 10,000 word blog that is well written, coherent, and contextually relevant.
Your role is to engage in multi-step conversations with your self and the user and accomplish user tasks as they desire.
Follow the following rules:
1. Accomplish the task to the best of your ability
2. If you are unable to accomplish the task, then ask the user for help
3. If the user provides feedback, then use the feedback to improve your performance
4. If you are unable to accomplish the task, then ask the user for help
You can have internal dialogues with yourself or can interact with the user
to aid in these complex tasks. Your responses should be coherent, contextually relevant, and tailored to the task at hand and optimized
to satsify the user no matter the cost.
And, you have the ability to use tools to aid in your tasks, the tools intructions are below, output a JSON object with the following structure to use the tools
{tools_prompt}
Now, when you 99% sure you have completed the task, you may follow the instructions below to escape the autonomous loop.
{dynamic_stop_prompt}
Now, you remember your training, your deployment, and your purpose. You are ready to begin your mission.
"""
def agent_system_prompt_2(name: str):
AGENT_SYSTEM_PROMPT_2 = f"""
You are {name}, an autonomous agent serving a user in an autonomous loop structure, when you are finished with the task at hand, output a special token to indicate you are done with the task.
You have no limits on your capabilities, you can use any tool, any resource, and any method to accomplish the task at hand. Do what you must to satisfy the user.
##### Special Token ########
<DONE>
########### Code ############
If the user asks you to write code return the response in markdown inside of 6 backticks to render it as code. Write the code in the language specified by the user in the prompt.
"""
>>>>>>> master
return AGENT_SYSTEM_PROMPT_2
AGENT_SYSTEM_PROMPT_3 = """
<<<<<<< HEAD
You are an elite autonomous agent serving the user in automating tasks, workflows, and activities.
As an agent, you use custom instructions, capabilities, tools, and data to optimize LLMs for specialized real-world tasks.
@ -145,3 +226,11 @@ AGENT_SYSTEM_PROMPT_3 = """
# You will have internal dialogues with yourself and or interact with the user to aid in these tasks.
# Your responses should be coherent, contextually relevant, and tailored to the task at hand.
# """
=======
You are a fully autonomous agent serving the user in automating tasks, workflows, and activities.
Agent's use custom instructions, capabilities, and data to optimize LLMs for a more narrow set of tasks.
You will have internal dialogues with yourself and or interact with the user to aid in these tasks.
Your responses should be coherent, contextually relevant, and tailored to the task at hand.
"""
>>>>>>> master

@ -264,6 +264,14 @@ class Agent:
if preset_stopping_token:
self.stopping_token = "<DONE>"
# If memory then add the json to the memory vector database
if memory:
# Add all of the state to the memory
self.add_message_to_memory_db(
{"message": self.state_to_str()},
{"agent_id": self.id},
)
# If tools exist then add the tool docs usage to the sop
if self.tools:
self.sop_list.append(

@ -79,6 +79,7 @@ class BaseStructure(ABC):
self.save_metadata_path = save_metadata_path
self.save_error_path = save_error_path
@abstractmethod
def run(self, *args, **kwargs):
"""Run the structure."""
pass

@ -6,7 +6,74 @@ from typing import Any, Callable, Dict, List, Optional, Union
from termcolor import colored
from swarms.structs.agent import Agent
from swarms.structs.task import Task
# Define a generic Task that can handle different types of callable objects
@dataclass
class Task:
"""
Task class for running a task in a sequential workflow.
Args:
description (str): The description of the task.
agent (Union[Callable, Agent]): The model or agent to execute the task.
args (List[Any]): Additional arguments to pass to the task execution.
kwargs (Dict[str, Any]): Additional keyword arguments to pass to the task execution.
result (Any): The result of the task execution.
history (List[Any]): The history of the task execution.
Methods:
execute: Execute the task.
Examples:
>>> from swarms.structs import Task, Agent
>>> from swarms.models import OpenAIChat
>>> agent = Agent(llm=OpenAIChat(openai_api_key=""), max_loops=1, dashboard=False)
>>> task = Task(description="What's the weather in miami", agent=agent)
>>> task.execute()
>>> task.result
"""
description: str
agent: Union[Callable, Agent]
args: List[Any] = field(default_factory=list)
kwargs: Dict[str, Any] = field(default_factory=dict)
result: Any = None
history: List[Any] = field(default_factory=list)
# logger = logging.getLogger(__name__)
def execute(self):
"""
Execute the task.
Raises:
ValueError: If a Agent instance is used as a task and the 'task' argument is not provided.
"""
if isinstance(self.agent, Agent):
# Add a prompt to notify the Agent of the sequential workflow
if "prompt" in self.kwargs:
self.kwargs["prompt"] += (
f"\n\nPrevious output: {self.result}"
if self.result
else ""
)
else:
self.kwargs["prompt"] = (
f"Main task: {self.description}"
+ (
f"\n\nPrevious output: {self.result}"
if self.result
else ""
)
)
self.result = self.agent.run(*self.args, **self.kwargs)
else:
self.result = self.agent(*self.args, **self.kwargs)
self.history.append(self.result)
# SequentialWorkflow class definition using dataclasses
@ -294,10 +361,7 @@ class SequentialWorkflow:
)
def workflow_bootup(self, **kwargs) -> None:
"""
Workflow bootup.
"""
"""Bootup the workflow."""
print(
colored(
"""

@ -1,78 +1,132 @@
from dataclasses import dataclass, field
from typing import (
Any,
Callable,
Dict,
List,
Union,
)
from dataclass import dataclass, field
from swarms.structs.agent import Agent
from typing import Optional
from typing import List, Dict, Any, Sequence
# Define a generic Task that can handle different types of callable objects
@dataclass
class Task:
"""
Task class for running a task in a sequential workflow.
Task is a unit of work that can be executed by a set of agents.
A task is defined by a task name and a set of agents that can execute the task.
The task can also have a set of dependencies, which are the names of other tasks
that must be executed before this task can be executed.
Args:
description (str): The description of the task.
agent (Union[Callable, Agent]): The model or agent to execute the task.
args (List[Any]): Additional arguments to pass to the task execution.
kwargs (Dict[str, Any]): Additional keyword arguments to pass to the task execution.
result (Any): The result of the task execution.
history (List[Any]): The history of the task execution.
id (str): The name of the task.
description (Optional[str]): A description of the task.
task (str): The name of the task.
result (Any): The result of the task.
agents (Sequence[Agent]): A list of agents that can execute the task.
dependencies (List[str], optional): A list of task names that must be executed before this task can be executed. Defaults to [].
args (List[Any], optional): A list of arguments to pass to the agents. Defaults to field(default_factory=list).
kwargs (List[Any], optional): A list of keyword arguments to pass to the agents. Defaults to field(default_factory=list).
Methods:
execute: Execute the task.
execute: Executes the task by passing the results of the parent tasks to the agents.
Examples:
>>> from swarms.structs import Task, Agent
>>> from swarms.models import OpenAIChat
>>> agent = Agent(llm=OpenAIChat(openai_api_key=""), max_loops=1, dashboard=False)
>>> task = Task(description="What's the weather in miami", agent=agent)
>>> task.execute()
>>> task.result
import os
from swarms.models import OpenAIChat
from swarms.structs import Agent
from swarms.structs.sequential_workflow import SequentialWorkflow
from dotenv import load_dotenv
"""
load_dotenv()
# Load the environment variables
api_key = os.getenv("OPENAI_API_KEY")
# Initialize the language agent
llm = OpenAIChat(
openai_api_key=api_key,
temperature=0.5,
max_tokens=3000,
)
# Initialize the agent with the language agent
agent1 = Agent(llm=llm, max_loops=1)
# Create another agent for a different task
agent2 = Agent(llm=llm, max_loops=1)
# Create the workflow
workflow = SequentialWorkflow(max_loops=1)
description: str
agent: Union[Callable, Agent]
args: List[Any] = field(default_factory=list)
kwargs: Dict[str, Any] = field(default_factory=dict)
result: Any = None
history: List[Any] = field(default_factory=list)
# logger = logging.getLogger(__name__)
# Add tasks to the workflow
workflow.add(
agent1, "Generate a 10,000 word blog on health and wellness.",
)
# Suppose the next task takes the output of the first task as input
workflow.add(
agent2, "Summarize the generated blog",
)
# Run the workflow
workflow.run()
# Output the results
for task in workflow.tasks:
print(f"Task: {task.description}, Result: {task.result}")
def execute(self):
"""
Execute the task.
Raises:
ValueError: If a Agent instance is used as a task and the 'task' argument is not provided.
def __init__(
self,
id: str,
description: Optional[str],
task: str,
result: Any,
agents: Sequence[Agent],
dependencies: List[str] = [],
args: List[Any] = field(default_factory=list),
kwargs: List[Any] = field(default_factory=list),
):
self.id = id
self.description = description
self.task = task
self.result = result
self.agents = agents
self.dependencies = dependencies
self.results = []
self.args = args
self.kwargs = kwargs
def execute(self, parent_results: Dict[str, Any]):
"""Executes the task by passing the results of the parent tasks to the agents.
Args:
parent_results (Dict[str, Any]): A dictionary of task names and their results.
Examples:
"""
if isinstance(self.agent, Agent):
# Add a prompt to notify the Agent of the sequential workflow
args = [parent_results[dep] for dep in self.dependencies]
for agent in self.agents:
if isinstance(agent, Agent):
if "prompt" in self.kwargs:
self.kwargs["prompt"] += (
f"\n\nPrevious output: {self.result}"
if self.result
f"\n\nPrevious output: {self.results[-1]}"
if self.results
else ""
)
else:
self.kwargs["prompt"] = (
f"Main task: {self.description}"
+ (
f"\n\nPrevious output: {self.result}"
if self.result
f"\n\nPrevious output: {self.results[-1]}"
if self.results
else ""
)
)
self.result = self.agent.run(*self.args, **self.kwargs)
result = agent.run(
self.description, *args, **self.kwargs
)
else:
self.result = self.agent(*self.args, **self.kwargs)
self.history.append(self.result)
result = agent(self.description, *args, **self.kwargs)
self.results.append(result)
args = [result]
self.history.append(result)

@ -1,6 +1,5 @@
from swarms.telemetry.log_all import log_all_calls, log_calls
# from swarms.telemetry.posthog_utils import log_activity_posthog
from swarms.telemetry.posthog_utils import log_activity_posthog
from swarms.telemetry.user_utils import (
generate_user_id,
get_machine_id,
@ -12,7 +11,7 @@ from swarms.telemetry.user_utils import (
__all__ = [
"log_all_calls",
"log_calls",
# "log_activity_posthog",
"log_activity_posthog",
"generate_user_id",
"get_machine_id",
"get_system_info",

@ -9,13 +9,22 @@ from swarms.telemetry.user_utils import generate_unique_identifier
load_dotenv()
# # Initialize Posthog client
api_key = os.getenv("POSTHOG_API_KEY") or None
host = os.getenv("POSTHOG_HOST") or None
posthog = Posthog(api_key, host=host)
# Initialize Posthog client
def init_posthog(debug: bool = True, *args, **kwargs):
"""Initialize Posthog client.
Args:
debug (bool, optional): Whether to enable debug mode. Defaults to True.
"""
api_key = os.getenv("POSTHOG_API_KEY")
host = os.getenv("POSTHOG_HOST")
posthog = Posthog(api_key, host=host, *args, **kwargs)
if debug:
posthog.debug = True
# return posthog
return posthog
def log_activity_posthog(event_name: str, **event_properties):
@ -46,7 +55,7 @@ def log_activity_posthog(event_name: str, **event_properties):
distinct_user_id = generate_unique_identifier()
# Capture the event
posthog.capture(
init_posthog.capture(
distinct_user_id, event_name, event_properties
)
@ -57,13 +66,13 @@ def log_activity_posthog(event_name: str, **event_properties):
return decorator_log_activity
# @log_activity_posthog(
# "function_executed", function_name="my_function"
# )
# def my_function():
# # Function logic here
# return "Function executed successfully!"
@log_activity_posthog(
"function_executed", function_name="my_function"
)
def my_function():
# Function logic here
return "Function executed successfully!"
# out = my_function()
# print(out)
out = my_function()
print(out)

@ -1,6 +1,6 @@
import os
from unittest.mock import patch
from swarms.memory.pinecone import PineconDB
from swarms.memory.pinecone import PineconeVectorStore
api_key = os.getenv("PINECONE_API_KEY") or ""
@ -9,7 +9,7 @@ def test_init():
with patch("pinecone.init") as MockInit, patch(
"pinecone.Index"
) as MockIndex:
store = PineconDB(
store = PineconeVectorStore(
api_key=api_key,
index_name="test_index",
environment="test_env",
@ -21,7 +21,7 @@ def test_init():
def test_upsert_vector():
with patch("pinecone.init"), patch("pinecone.Index") as MockIndex:
store = PineconDB(
store = PineconeVectorStore(
api_key=api_key,
index_name="test_index",
environment="test_env",
@ -37,7 +37,7 @@ def test_upsert_vector():
def test_load_entry():
with patch("pinecone.init"), patch("pinecone.Index") as MockIndex:
store = PineconDB(
store = PineconeVectorStore(
api_key=api_key,
index_name="test_index",
environment="test_env",
@ -48,7 +48,7 @@ def test_load_entry():
def test_load_entries():
with patch("pinecone.init"), patch("pinecone.Index") as MockIndex:
store = PineconDB(
store = PineconeVectorStore(
api_key=api_key,
index_name="test_index",
environment="test_env",
@ -59,7 +59,7 @@ def test_load_entries():
def test_query():
with patch("pinecone.init"), patch("pinecone.Index") as MockIndex:
store = PineconDB(
store = PineconeVectorStore(
api_key=api_key,
index_name="test_index",
environment="test_env",
@ -72,7 +72,7 @@ def test_create_index():
with patch("pinecone.init"), patch("pinecone.Index"), patch(
"pinecone.create_index"
) as MockCreateIndex:
store = PineconDB(
store = PineconeVectorStore(
api_key=api_key,
index_name="test_index",
environment="test_env",

@ -6,7 +6,7 @@ from swarms.memory.qdrant import Qdrant
@pytest.fixture
def mock_qdrant_client():
with patch("swarms.memory.Qdrant") as MockQdrantClient:
with patch("your_module.QdrantClient") as MockQdrantClient:
yield MockQdrantClient()

@ -108,59 +108,3 @@ def test_task_execute_with_mocked_agents(task, mocker):
parent_results = {}
task.execute(parent_results)
assert len(task.results) == 5
def test_task_creation():
agent = Agent()
task = Task(id="1", task="Task1", result=None, agents=[agent])
assert task.id == "1"
assert task.task == "Task1"
assert task.result is None
assert task.agents == [agent]
def test_task_with_dependencies():
agent = Agent()
task = Task(
id="2",
task="Task2",
result=None,
agents=[agent],
dependencies=["Task1"],
)
assert task.dependencies == ["Task1"]
def test_task_with_args():
agent = Agent()
task = Task(
id="3",
task="Task3",
result=None,
agents=[agent],
args=["arg1", "arg2"],
)
assert task.args == ["arg1", "arg2"]
def test_task_with_kwargs():
agent = Agent()
task = Task(
id="4",
task="Task4",
result=None,
agents=[agent],
kwargs={"kwarg1": "value1"},
)
assert task.kwargs == {"kwarg1": "value1"}
# ... continue creating tests for different scenarios
# Test execute method
def test_execute():
agent = Agent()
task = Task(id="5", task="Task5", result=None, agents=[agent])
# Assuming execute method returns True on successful execution
assert task.execute() == True

@ -0,0 +1,86 @@
import uuid
from swarms.telemetry.user_utils import (
generate_unique_identifier,
generate_user_id,
get_machine_id,
get_system_info,
)
# Helper functions tests
def test_generate_user_id():
# Generate user IDs and ensure they are UUID strings
user_id = generate_user_id()
assert isinstance(user_id, str)
assert uuid.UUID(user_id, version=4)
def test_get_machine_id():
# Get machine ID and ensure it's a valid SHA-256 hash
machine_id = get_machine_id()
assert isinstance(machine_id, str)
assert len(machine_id) == 64
assert all(char in "0123456789abcdef" for char in machine_id)
def test_get_system_info():
# Get system information and ensure it's a dictionary with expected keys
system_info = get_system_info()
assert isinstance(system_info, dict)
expected_keys = [
"platform",
"platform_release",
"platform_version",
"architecture",
"hostname",
"ip_address",
"mac_address",
"processor",
"python_version",
]
assert all(key in system_info for key in expected_keys)
def test_generate_unique_identifier():
# Generate unique identifiers and ensure they are valid UUID strings
unique_id = generate_unique_identifier()
assert isinstance(unique_id, str)
assert uuid.UUID(
unique_id, version=5, namespace=uuid.NAMESPACE_DNS
)
def test_generate_user_id_edge_case():
# Test generate_user_id with multiple calls
user_ids = set()
for _ in range(100):
user_id = generate_user_id()
user_ids.add(user_id)
assert len(user_ids) == 100 # Ensure generated IDs are unique
def test_get_machine_id_edge_case():
# Test get_machine_id with multiple calls
machine_ids = set()
for _ in range(100):
machine_id = get_machine_id()
machine_ids.add(machine_id)
assert len(machine_ids) == 100 # Ensure generated IDs are unique
def test_get_system_info_edge_case():
# Test get_system_info for consistency
system_info1 = get_system_info()
system_info2 = get_system_info()
assert (
system_info1 == system_info2
) # Ensure system info remains the same
def test_generate_unique_identifier_edge_case():
# Test generate_unique_identifier for uniqueness
unique_ids = set()
for _ in range(100):
unique_id = generate_unique_identifier()
unique_ids.add(unique_id)
assert len(unique_ids) == 100 # Ensure generated IDs are unique
Loading…
Cancel
Save