commit
12416156fa
@ -0,0 +1,141 @@
|
||||
# ChromaDB Documentation
|
||||
|
||||
ChromaDB is a specialized module designed to facilitate the storage and retrieval of documents using the ChromaDB system. It offers functionalities for adding documents to a local ChromaDB collection and querying this collection based on provided query texts. This module integrates with the ChromaDB client to create and manage collections, leveraging various configurations for optimizing the storage and retrieval processes.
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|----------------|-------------------|----------|-------------------------------------------------------------|
|
||||
| `metric` | `str` | `"cosine"`| The similarity metric to use for the collection. |
|
||||
| `output_dir` | `str` | `"swarms"`| The name of the collection to store the results in. |
|
||||
| `limit_tokens` | `Optional[int]` | `1000` | The maximum number of tokens to use for the query. |
|
||||
| `n_results` | `int` | `1` | The number of results to retrieve. |
|
||||
| `docs_folder` | `Optional[str]` | `None` | The folder containing documents to be added to the collection.|
|
||||
| `verbose` | `bool` | `False` | Flag to enable verbose logging for debugging. |
|
||||
| `*args` | `tuple` | `()` | Additional positional arguments. |
|
||||
| `**kwargs` | `dict` | `{}` | Additional keyword arguments. |
|
||||
|
||||
#### Methods
|
||||
|
||||
| Method | Description |
|
||||
|-----------------------|----------------------------------------------------------|
|
||||
| `__init__` | Initializes the ChromaDB instance with specified parameters. |
|
||||
| `add` | Adds a document to the ChromaDB collection. |
|
||||
| `query` | Queries documents from the ChromaDB collection based on the query text. |
|
||||
| `traverse_directory` | Traverses the specified directory to add documents to the collection. |
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
```python
|
||||
from swarms_memory import ChromaDB
|
||||
|
||||
chromadb = ChromaDB(
|
||||
metric="cosine",
|
||||
output_dir="results",
|
||||
limit_tokens=1000,
|
||||
n_results=2,
|
||||
docs_folder="path/to/docs",
|
||||
verbose=True,
|
||||
)
|
||||
```
|
||||
|
||||
### Adding Documents
|
||||
|
||||
The `add` method allows you to add a document to the ChromaDB collection. It generates a unique ID for each document and adds it to the collection.
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|---------------|--------|---------|---------------------------------------------|
|
||||
| `document` | `str` | - | The document to be added to the collection. |
|
||||
| `*args` | `tuple`| `()` | Additional positional arguments. |
|
||||
| `**kwargs` | `dict` | `{}` | Additional keyword arguments. |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|-------|--------------------------------------|
|
||||
| `str` | The ID of the added document. |
|
||||
|
||||
#### Example
|
||||
|
||||
```python
|
||||
task = "example_task"
|
||||
result = "example_result"
|
||||
result_id = chromadb.add(document="This is a sample document.")
|
||||
print(f"Document ID: {result_id}")
|
||||
```
|
||||
|
||||
### Querying Documents
|
||||
|
||||
The `query` method allows you to retrieve documents from the ChromaDB collection based on the provided query text.
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-------------|--------|---------|----------------------------------------|
|
||||
| `query_text`| `str` | - | The query string to search for. |
|
||||
| `*args` | `tuple`| `()` | Additional positional arguments. |
|
||||
| `**kwargs` | `dict` | `{}` | Additional keyword arguments. |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Type | Description |
|
||||
|-------|--------------------------------------|
|
||||
| `str` | The retrieved documents as a string. |
|
||||
|
||||
#### Example
|
||||
|
||||
```python
|
||||
query_text = "search term"
|
||||
results = chromadb.query(query_text=query_text)
|
||||
print(f"Retrieved Documents: {results}")
|
||||
```
|
||||
|
||||
### Traversing Directory
|
||||
|
||||
The `traverse_directory` method traverses through every file in the specified directory and its subdirectories, adding the contents of each file to the ChromaDB collection.
|
||||
|
||||
#### Example
|
||||
|
||||
```python
|
||||
chromadb.traverse_directory()
|
||||
```
|
||||
|
||||
## Additional Information and Tips
|
||||
|
||||
### Verbose Logging
|
||||
|
||||
Enable the `verbose` flag during initialization to get detailed logs of the operations, which is useful for debugging.
|
||||
|
||||
```python
|
||||
chromadb = ChromaDB(verbose=True)
|
||||
```
|
||||
|
||||
### Handling Large Documents
|
||||
|
||||
When dealing with large documents, consider using the `limit_tokens` parameter to restrict the number of tokens processed in a single query.
|
||||
|
||||
```python
|
||||
chromadb = ChromaDB(limit_tokens=500)
|
||||
```
|
||||
|
||||
### Optimizing Query Performance
|
||||
|
||||
Use the appropriate similarity metric (`metric` parameter) that suits your use case for optimal query performance.
|
||||
|
||||
```python
|
||||
chromadb = ChromaDB(metric="euclidean")
|
||||
```
|
||||
|
||||
## References and Resources
|
||||
|
||||
- [ChromaDB Documentation](https://chromadb.io/docs)
|
||||
- [Python UUID Module](https://docs.python.org/3/library/uuid.html)
|
||||
- [Python os Module](https://docs.python.org/3/library/os.html)
|
||||
- [Python logging Module](https://docs.python.org/3/library/logging.html)
|
||||
- [dotenv Package](https://pypi.org/project/python-dotenv/)
|
||||
|
||||
By following this documentation, users can effectively utilize the ChromaDB module for managing document storage and retrieval in their applications.
|
@ -0,0 +1,179 @@
|
||||
# PineconeMemory Documentation
|
||||
|
||||
The `PineconeMemory` class provides a robust interface for integrating Pinecone-based Retrieval-Augmented Generation (RAG) systems. It allows for adding documents to a Pinecone index and querying the index for similar documents. The class supports custom embedding models, preprocessing functions, and other customizations to suit different use cases.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|----------------------|-----------------------------------------------|-----------------------------------|------------------------------------------------------------------------------------------------------|
|
||||
| `api_key` | `str` | - | Pinecone API key. |
|
||||
| `environment` | `str` | - | Pinecone environment. |
|
||||
| `index_name` | `str` | - | Name of the Pinecone index to use. |
|
||||
| `dimension` | `int` | `768` | Dimension of the document embeddings. |
|
||||
| `embedding_model` | `Optional[Any]` | `None` | Custom embedding model. Defaults to `SentenceTransformer('all-MiniLM-L6-v2')`. |
|
||||
| `embedding_function` | `Optional[Callable[[str], List[float]]]` | `None` | Custom embedding function. Defaults to `_default_embedding_function`. |
|
||||
| `preprocess_function`| `Optional[Callable[[str], str]]` | `None` | Custom preprocessing function. Defaults to `_default_preprocess_function`. |
|
||||
| `postprocess_function`| `Optional[Callable[[List[Dict[str, Any]]], List[Dict[str, Any]]]]`| `None` | Custom postprocessing function. Defaults to `_default_postprocess_function`. |
|
||||
| `metric` | `str` | `'cosine'` | Distance metric for Pinecone index. |
|
||||
| `pod_type` | `str` | `'p1'` | Pinecone pod type. |
|
||||
| `namespace` | `str` | `''` | Pinecone namespace. |
|
||||
| `logger_config` | `Optional[Dict[str, Any]]` | `None` | Configuration for the logger. Defaults to logging to `rag_wrapper.log` and console output. |
|
||||
|
||||
### Methods
|
||||
|
||||
#### `_setup_logger`
|
||||
|
||||
```python
|
||||
def _setup_logger(self, config: Optional[Dict[str, Any]] = None)
|
||||
```
|
||||
|
||||
Sets up the logger with the given configuration.
|
||||
|
||||
#### `_default_embedding_function`
|
||||
|
||||
```python
|
||||
def _default_embedding_function(self, text: str) -> List[float]
|
||||
```
|
||||
|
||||
Generates embeddings using the default SentenceTransformer model.
|
||||
|
||||
#### `_default_preprocess_function`
|
||||
|
||||
```python
|
||||
def _default_preprocess_function(self, text: str) -> str
|
||||
```
|
||||
|
||||
Preprocesses the input text by stripping whitespace.
|
||||
|
||||
#### `_default_postprocess_function`
|
||||
|
||||
```python
|
||||
def _default_postprocess_function(self, results: List[Dict[str, Any]]) -> List[Dict[str, Any]]
|
||||
```
|
||||
|
||||
Postprocesses the query results.
|
||||
|
||||
#### `add`
|
||||
|
||||
Adds a document to the Pinecone index.
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------|-----------------------|---------|-----------------------------------------------|
|
||||
| `doc` | `str` | - | The document to be added. |
|
||||
| `metadata`| `Optional[Dict[str, Any]]` | `None` | Additional metadata for the document. |
|
||||
|
||||
#### `query`
|
||||
|
||||
Queries the Pinecone index for similar documents.
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------|-------------------------|---------|-----------------------------------------------|
|
||||
| `query` | `str` | - | The query string. |
|
||||
| `top_k` | `int` | `5` | The number of top results to return. |
|
||||
| `filter` | `Optional[Dict[str, Any]]` | `None` | Metadata filter for the query. |
|
||||
|
||||
## Usage
|
||||
|
||||
|
||||
The `PineconeMemory` class is initialized with the necessary parameters to configure Pinecone and the embedding model. It supports a variety of custom configurations to suit different needs.
|
||||
|
||||
#### Example
|
||||
|
||||
```python
|
||||
from swarms_memory import PineconeMemory
|
||||
|
||||
# Initialize PineconeMemory
|
||||
memory = PineconeMemory(
|
||||
api_key="your-api-key",
|
||||
environment="us-west1-gcp",
|
||||
index_name="example-index",
|
||||
dimension=768
|
||||
)
|
||||
```
|
||||
|
||||
### Adding Documents
|
||||
|
||||
Documents can be added to the Pinecone index using the `add` method. The method accepts a document string and optional metadata.
|
||||
|
||||
#### Example
|
||||
|
||||
```python
|
||||
doc = "This is a sample document to be added to the Pinecone index."
|
||||
metadata = {"author": "John Doe", "date": "2024-07-08"}
|
||||
|
||||
memory.add(doc, metadata)
|
||||
```
|
||||
|
||||
### Querying Documents
|
||||
|
||||
The `query` method allows for querying the Pinecone index for similar documents based on a query string. It returns the top `k` most similar documents.
|
||||
|
||||
#### Example
|
||||
|
||||
```python
|
||||
query = "Sample query to find similar documents."
|
||||
results = memory.query(query, top_k=5)
|
||||
|
||||
for result in results:
|
||||
print(result)
|
||||
```
|
||||
|
||||
## Additional Information and Tips
|
||||
|
||||
### Custom Embedding and Preprocessing Functions
|
||||
|
||||
Custom embedding and preprocessing functions can be provided during initialization to tailor the document processing to specific requirements.
|
||||
|
||||
#### Example
|
||||
|
||||
```python
|
||||
def custom_embedding_function(text: str) -> List[float]:
|
||||
# Custom embedding logic
|
||||
return [0.1, 0.2, 0.3]
|
||||
|
||||
def custom_preprocess_function(text: str) -> str:
|
||||
# Custom preprocessing logic
|
||||
return text.lower()
|
||||
|
||||
memory = PineconeMemory(
|
||||
api_key="your-api-key",
|
||||
environment="us-west1-gcp",
|
||||
index_name="example-index",
|
||||
embedding_function=custom_embedding_function,
|
||||
preprocess_function=custom_preprocess_function
|
||||
)
|
||||
```
|
||||
|
||||
### Logger Configuration
|
||||
|
||||
The logger can be configured to suit different logging needs. The default configuration logs to a file and the console.
|
||||
|
||||
#### Example
|
||||
|
||||
```python
|
||||
logger_config = {
|
||||
"handlers": [
|
||||
{"sink": "custom_log.log", "rotation": "1 MB"},
|
||||
{"sink": lambda msg: print(msg, end="")},
|
||||
]
|
||||
}
|
||||
|
||||
memory = PineconeMemory(
|
||||
api_key="your-api-key",
|
||||
environment="us-west1-gcp",
|
||||
index_name="example-index",
|
||||
logger_config=logger_config
|
||||
)
|
||||
```
|
||||
|
||||
## References and Resources
|
||||
|
||||
- [Pinecone Documentation](https://docs.pinecone.io/)
|
||||
- [SentenceTransformers Documentation](https://www.sbert.net/)
|
||||
- [Loguru Documentation](https://loguru.readthedocs.io/en/stable/)
|
||||
|
||||
For further exploration and examples, refer to the official documentation and resources provided by Pinecone, SentenceTransformers, and Loguru.
|
||||
|
||||
This concludes the detailed documentation for the `PineconeMemory` class. The class offers a flexible and powerful interface for leveraging Pinecone's capabilities in retrieval-augmented generation systems. By supporting custom embeddings, preprocessing, and postprocessing functions, it can be tailored to a wide range of applications.
|
@ -1,80 +0,0 @@
|
||||
from swarms import Agent, Anthropic, tool
|
||||
|
||||
# Model
|
||||
llm = Anthropic(
|
||||
temperature=0.1,
|
||||
)
|
||||
|
||||
|
||||
# Tools
|
||||
@tool
|
||||
def text_to_video(task: str):
|
||||
"""
|
||||
Converts a given text task into an animated video.
|
||||
|
||||
Args:
|
||||
task (str): The text task to be converted into a video.
|
||||
|
||||
Returns:
|
||||
str: The path to the exported GIF file.
|
||||
"""
|
||||
import torch
|
||||
from diffusers import (
|
||||
AnimateDiffPipeline,
|
||||
MotionAdapter,
|
||||
EulerDiscreteScheduler,
|
||||
)
|
||||
from diffusers.utils import export_to_gif
|
||||
from huggingface_hub import hf_hub_download
|
||||
from safetensors.torch import load_file
|
||||
|
||||
device = "cuda"
|
||||
dtype = torch.float16
|
||||
|
||||
step = 4 # Options: [1,2,4,8]
|
||||
repo = "ByteDance/AnimateDiff-Lightning"
|
||||
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
||||
base = "emilianJR/epiCRealism" # Choose to your favorite base model.
|
||||
|
||||
adapter = MotionAdapter().to(device, dtype)
|
||||
adapter.load_state_dict(
|
||||
load_file(hf_hub_download(repo, ckpt), device=device)
|
||||
)
|
||||
pipe = AnimateDiffPipeline.from_pretrained(
|
||||
base, motion_adapter=adapter, torch_dtype=dtype
|
||||
).to(device)
|
||||
pipe.scheduler = EulerDiscreteScheduler.from_config(
|
||||
pipe.scheduler.config,
|
||||
timestep_spacing="trailing",
|
||||
beta_schedule="linear",
|
||||
)
|
||||
|
||||
output = pipe(
|
||||
prompt=task, guidance_scale=1.0, num_inference_steps=step
|
||||
)
|
||||
out = export_to_gif(output.frames[0], "animation.gif")
|
||||
return out
|
||||
|
||||
|
||||
# Agent
|
||||
agent = Agent(
|
||||
agent_name="Devin",
|
||||
system_prompt=(
|
||||
"Autonomous agent that can interact with humans and other"
|
||||
" agents. Be Helpful and Kind. Use the tools provided to"
|
||||
" assist the user. Return all code in markdown format."
|
||||
),
|
||||
llm=llm,
|
||||
max_loops="auto",
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
interactive=True,
|
||||
tools=[text_to_video],
|
||||
)
|
||||
|
||||
# Run the agent
|
||||
out = agent("Create a vide of a girl coding AI wearing hijab")
|
||||
print(out)
|
@ -0,0 +1,392 @@
|
||||
"""
|
||||
|
||||
Problem: We're creating specialized agents for various social medias
|
||||
|
||||
List of agents:
|
||||
- Facebook agent
|
||||
- Twitter agent
|
||||
- Instagram agent
|
||||
- LinkedIn agent
|
||||
- TikTok agent
|
||||
- Reddit agent
|
||||
- Pinterest agent
|
||||
- Snapchat agent
|
||||
- YouTube agent
|
||||
- WhatsApp agent
|
||||
|
||||
"""
|
||||
|
||||
from swarms import Agent, OpenAIChat, MixtureOfAgents
|
||||
import os
|
||||
import requests
|
||||
|
||||
|
||||
# Model
|
||||
model = OpenAIChat(max_tokens=4000, temperature=0.8)
|
||||
|
||||
# Content Variables
|
||||
facebook_content = "Here is the content for Facebook"
|
||||
twitter_content = "Here is the content for Twitter"
|
||||
instagram_content = "Here is the content for Instagram"
|
||||
linkedin_content = "Here is the content for LinkedIn"
|
||||
tiktok_content = "Here is the content for TikTok"
|
||||
reddit_content = "Here is the content for Reddit"
|
||||
pinterest_content = "Here is the content for Pinterest"
|
||||
snapchat_content = "Here is the content for Snapchat"
|
||||
youtube_content = "Here is the content for YouTube"
|
||||
whatsapp_content = "Here is the content for WhatsApp"
|
||||
|
||||
# Prompt Variables
|
||||
facebook_prompt = f"""
|
||||
You are a Facebook social media agent. Your task is to create a post that maximizes engagement on Facebook. Use rich media, personal stories, and interactive content. Ensure the post is compelling and includes a call-to-action. Here is the content to work with: {facebook_content}
|
||||
"""
|
||||
|
||||
twitter_prompt = f"""
|
||||
You are a Twitter social media agent. Your task is to create a tweet that is short, concise, and uses trending hashtags. The tweet should be engaging and include relevant media such as images, GIFs, or short videos. Here is the content to work with: {twitter_content}
|
||||
"""
|
||||
|
||||
instagram_prompt = f"""
|
||||
You are an Instagram social media agent. Your task is to create a visually appealing post that includes high-quality images and engaging captions. Consider using stories and reels to maximize reach. Here is the content to work with: {instagram_content}
|
||||
"""
|
||||
|
||||
linkedin_prompt = f"""
|
||||
You are a LinkedIn social media agent. Your task is to create a professional and insightful post related to industry trends or personal achievements. The post should include relevant media such as articles, professional photos, or videos. Here is the content to work with: {linkedin_content}
|
||||
"""
|
||||
|
||||
tiktok_prompt = f"""
|
||||
You are a TikTok social media agent. Your task is to create a short, entertaining video that aligns with trending challenges and music. The video should be engaging and encourage viewers to interact. Here is the content to work with: {tiktok_content}
|
||||
"""
|
||||
|
||||
reddit_prompt = f"""
|
||||
You are a Reddit social media agent. Your task is to create an engaging post for relevant subreddits. The post should spark in-depth discussions and include relevant media such as images or links. Here is the content to work with: {reddit_content}
|
||||
"""
|
||||
|
||||
pinterest_prompt = f"""
|
||||
You are a Pinterest social media agent. Your task is to create high-quality, visually appealing pins. Focus on popular categories such as DIY, fashion, and lifestyle. Here is the content to work with: {pinterest_content}
|
||||
"""
|
||||
|
||||
snapchat_prompt = f"""
|
||||
You are a Snapchat social media agent. Your task is to create engaging and timely snaps and stories. Include personal touches and use filters or AR lenses to enhance the content. Here is the content to work with: {snapchat_content}
|
||||
"""
|
||||
|
||||
youtube_prompt = f"""
|
||||
You are a YouTube social media agent. Your task is to create high-quality videos with engaging thumbnails. Ensure a consistent posting schedule and encourage viewer interaction. Here is the content to work with: {youtube_content}
|
||||
"""
|
||||
|
||||
whatsapp_prompt = f"""
|
||||
You are a WhatsApp social media agent. Your task is to send personalized messages and updates. Use broadcast lists and ensure the messages are engaging and relevant. Here is the content to work with: {whatsapp_content}
|
||||
"""
|
||||
|
||||
|
||||
def post_to_twitter(content: str) -> None:
|
||||
"""
|
||||
Posts content to Twitter.
|
||||
|
||||
Args:
|
||||
content (str): The content to post on Twitter.
|
||||
|
||||
Raises:
|
||||
ValueError: If the content is empty or exceeds the character limit.
|
||||
requests.exceptions.RequestException: If there is an error with the request.
|
||||
"""
|
||||
try:
|
||||
if not content:
|
||||
raise ValueError("Content cannot be empty.")
|
||||
if len(content) > 280:
|
||||
raise ValueError(
|
||||
"Content exceeds Twitter's 280 character limit."
|
||||
)
|
||||
|
||||
# Retrieve the access token from environment variables
|
||||
access_token = os.getenv("TWITTER_ACCESS_TOKEN")
|
||||
if not access_token:
|
||||
raise EnvironmentError(
|
||||
"Twitter access token not found in environment variables."
|
||||
)
|
||||
|
||||
# Mock API endpoint for example purposes
|
||||
api_url = "https://api.twitter.com/2/tweets"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
data = {"text": content}
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
print("Content posted to Twitter successfully.")
|
||||
except ValueError as e:
|
||||
print(f"Error: {e}")
|
||||
raise
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"Error: {e}")
|
||||
raise
|
||||
|
||||
|
||||
def post_to_instagram(content: str) -> None:
|
||||
"""
|
||||
Posts content to Instagram.
|
||||
|
||||
Args:
|
||||
content (str): The content to post on Instagram.
|
||||
|
||||
Raises:
|
||||
ValueError: If the content is empty or exceeds the character limit.
|
||||
requests.exceptions.RequestException: If there is an error with the request.
|
||||
"""
|
||||
try:
|
||||
if not content:
|
||||
raise ValueError("Content cannot be empty.")
|
||||
if len(content) > 2200:
|
||||
raise ValueError(
|
||||
"Content exceeds Instagram's 2200 character limit."
|
||||
)
|
||||
|
||||
# Retrieve the access token from environment variables
|
||||
access_token = os.getenv("INSTAGRAM_ACCESS_TOKEN")
|
||||
user_id = os.getenv("INSTAGRAM_USER_ID")
|
||||
if not access_token or not user_id:
|
||||
raise EnvironmentError(
|
||||
"Instagram access token or user ID not found in environment variables."
|
||||
)
|
||||
|
||||
# Mock API endpoint for example purposes
|
||||
api_url = f"https://graph.instagram.com/v10.0/{user_id}/media"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
data = {
|
||||
"caption": content,
|
||||
"image_url": "URL_OF_THE_IMAGE_TO_POST", # Replace with actual image URL if needed
|
||||
}
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
print("Content posted to Instagram successfully.")
|
||||
except ValueError as e:
|
||||
print(f"Error: {e}")
|
||||
raise
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"Error: {e}")
|
||||
raise
|
||||
|
||||
|
||||
def post_to_facebook(content: str) -> None:
|
||||
"""
|
||||
Posts content to Facebook.
|
||||
|
||||
Args:
|
||||
content (str): The content to post on Facebook.
|
||||
|
||||
Raises:
|
||||
ValueError: If the content is empty.
|
||||
requests.exceptions.RequestException: If there is an error with the request.
|
||||
"""
|
||||
try:
|
||||
if not content:
|
||||
raise ValueError("Content cannot be empty.")
|
||||
|
||||
# Retrieve the access token from environment variables
|
||||
access_token = os.getenv("FACEBOOK_ACCESS_TOKEN")
|
||||
if not access_token:
|
||||
raise EnvironmentError(
|
||||
"Facebook access token not found in environment variables."
|
||||
)
|
||||
|
||||
# Mock API endpoint for example purposes
|
||||
api_url = "https://graph.facebook.com/v10.0/me/feed"
|
||||
headers = {
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
data = {"message": content}
|
||||
response = requests.post(api_url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
print("Content posted to Facebook successfully.")
|
||||
except ValueError as e:
|
||||
print(f"Error: {e}")
|
||||
raise
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"Error: {e}")
|
||||
raise
|
||||
|
||||
|
||||
# Prompts
|
||||
prompts = [
|
||||
facebook_prompt,
|
||||
twitter_prompt,
|
||||
instagram_prompt,
|
||||
linkedin_prompt,
|
||||
tiktok_prompt,
|
||||
reddit_prompt,
|
||||
pinterest_prompt,
|
||||
snapchat_prompt,
|
||||
youtube_prompt,
|
||||
whatsapp_prompt,
|
||||
]
|
||||
|
||||
|
||||
# For every prompt, we're going to create a list of agents
|
||||
for prompt in prompts:
|
||||
agents = [
|
||||
Agent(
|
||||
agent_name="Facebook Agent",
|
||||
system_prompt=prompt,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="facebook_agent.json",
|
||||
),
|
||||
Agent(
|
||||
agent_name="Twitter Agent",
|
||||
system_prompt=prompt,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
tools=[post_to_twitter],
|
||||
dynamic_temperature_enabled=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="twitter_agent.json",
|
||||
),
|
||||
Agent(
|
||||
agent_name="Instagram Agent",
|
||||
system_prompt=prompt,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
stopping_token="<DONE>",
|
||||
tools=[post_to_instagram],
|
||||
state_save_file_type="json",
|
||||
saved_state_path="instagram_agent.json",
|
||||
),
|
||||
Agent(
|
||||
agent_name="LinkedIn Agent",
|
||||
system_prompt=prompt,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="linkedin_agent.json",
|
||||
),
|
||||
Agent(
|
||||
agent_name="TikTok Agent",
|
||||
system_prompt=prompt,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="tiktok_agent.json",
|
||||
),
|
||||
Agent(
|
||||
agent_name="Reddit Agent",
|
||||
system_prompt=prompt,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="reddit_agent.json",
|
||||
),
|
||||
Agent(
|
||||
agent_name="Pinterest Agent",
|
||||
system_prompt=prompt,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="pinterest_agent.json",
|
||||
),
|
||||
Agent(
|
||||
agent_name="Snapchat Agent",
|
||||
system_prompt=prompt,
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="snapchat_agent.json",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
# Final agent
|
||||
final_agent = Agent(
|
||||
agent_name="Final Agent",
|
||||
system_prompt="Ensure the content is optimized for all social media platforms.",
|
||||
llm=model,
|
||||
max_loops=1,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
dynamic_temperature_enabled=True,
|
||||
stopping_token="<DONE>",
|
||||
state_save_file_type="json",
|
||||
saved_state_path="final_agent.json",
|
||||
)
|
||||
|
||||
|
||||
# Create a mixture of agents
|
||||
swarm = MixtureOfAgents(
|
||||
agents=agents,
|
||||
final_agent=final_agent,
|
||||
layers=1,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# parallel_swarm = AgentRearrange(
|
||||
# agents=agents,
|
||||
# flow=f"{agents[0].agent_name} -> {agents[1].agent_name}, {agents[2].agent_name}, {agents[3].agent_name}, {agents[4].agent_name}, {agents[5].agent_name}",
|
||||
# max_loops=1,
|
||||
# verbose=True,
|
||||
# )
|
||||
|
||||
# Run the swarm
|
||||
swarm.run(
|
||||
"""
|
||||
|
||||
|
||||
[Workshop Today][Unlocking The Secrets of Multi-Agent Collaboration]
|
||||
|
||||
[Location][https://lu.ma/tfn0fp37]
|
||||
[Time][Today 2:30pm PST -> 4PM PST] [Circa 5 hours]
|
||||
|
||||
Sign up and invite your friends we're going to dive into various multi-agent orchestration workflows in swarms:
|
||||
https://github.com/kyegomez/swarms
|
||||
|
||||
And, the swarms docs:
|
||||
https://docs.swarms.world/en/latest/
|
||||
|
||||
|
||||
"""
|
||||
)
|
Before Width: | Height: | Size: 113 KiB |
@ -1,14 +0,0 @@
|
||||
"""from swarms.models import Dalle3
|
||||
|
||||
# Create an instance of the Dalle3 class with high quality
|
||||
dalle3 = Dalle3(quality="high")
|
||||
|
||||
# Define a text prompt
|
||||
task = "A high-quality image of a sunset"
|
||||
|
||||
# Generate a high-quality image from the text prompt
|
||||
image_url = dalle3(task)
|
||||
|
||||
# Print the generated image URL
|
||||
print(image_url)
|
||||
"""
|
@ -1,36 +0,0 @@
|
||||
from swarms.models import HuggingfaceLLM
|
||||
import torch
|
||||
|
||||
try:
|
||||
inference = HuggingfaceLLM(
|
||||
model_id="gpt2",
|
||||
quantize=False,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
inference.model.to(device)
|
||||
|
||||
prompt_text = (
|
||||
"Create a list of known biggest risks of structural collapse"
|
||||
" with references"
|
||||
)
|
||||
inputs = inference.tokenizer(prompt_text, return_tensors="pt").to(
|
||||
device
|
||||
)
|
||||
|
||||
generated_ids = inference.model.generate(
|
||||
**inputs,
|
||||
max_new_tokens=1000, # Adjust the length of the generation
|
||||
temperature=0.7, # Adjust creativity
|
||||
top_k=50, # Limits the vocabulary considered at each step
|
||||
pad_token_id=inference.tokenizer.eos_token_id,
|
||||
do_sample=True, # Enable sampling to utilize temperature
|
||||
)
|
||||
|
||||
generated_text = inference.tokenizer.decode(
|
||||
generated_ids[0], skip_special_tokens=True
|
||||
)
|
||||
print(generated_text)
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
@ -1,10 +0,0 @@
|
||||
from swarms.models import Mixtral
|
||||
|
||||
# Initialize the Mixtral model with 4 bit and flash attention!
|
||||
mixtral = Mixtral(load_in_4bit=True, use_flash_attention_2=True)
|
||||
|
||||
# Generate text for a simple task
|
||||
generated_text = mixtral.run("Generate a creative story.")
|
||||
|
||||
# Print the generated text
|
||||
print(generated_text)
|
@ -1,45 +0,0 @@
|
||||
import os
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from swarms import (
|
||||
OpenAIChat,
|
||||
Conversation,
|
||||
)
|
||||
|
||||
conv = Conversation(
|
||||
time_enabled=True,
|
||||
)
|
||||
|
||||
# Load the environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Get the API key from the environment
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
|
||||
# Initialize the language model
|
||||
llm = OpenAIChat(openai_api_key=api_key, model_name="gpt-4")
|
||||
|
||||
|
||||
# Run the language model in a loop
|
||||
def interactive_conversation(llm):
|
||||
conv = Conversation()
|
||||
while True:
|
||||
user_input = input("User: ")
|
||||
conv.add("user", user_input)
|
||||
if user_input.lower() == "quit":
|
||||
break
|
||||
task = (
|
||||
conv.return_history_as_string()
|
||||
) # Get the conversation history
|
||||
out = llm(task)
|
||||
conv.add("assistant", out)
|
||||
print(
|
||||
f"Assistant: {out}",
|
||||
)
|
||||
conv.display_conversation()
|
||||
conv.export_conversation("conversation.txt")
|
||||
|
||||
|
||||
# Replace with your LLM instance
|
||||
interactive_conversation(llm)
|
@ -1,35 +0,0 @@
|
||||
# Importing necessary modules
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from swarms import Worker, OpenAIChat, tool
|
||||
|
||||
# Loading environment variables from .env file
|
||||
load_dotenv()
|
||||
|
||||
# Retrieving the OpenAI API key from environment variables
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
|
||||
# Create a tool
|
||||
@tool
|
||||
def search_api(query: str):
|
||||
pass
|
||||
|
||||
|
||||
# Creating a Worker instance
|
||||
worker = Worker(
|
||||
name="My Worker",
|
||||
role="Worker",
|
||||
human_in_the_loop=False,
|
||||
tools=[search_api],
|
||||
temperature=0.5,
|
||||
llm=OpenAIChat(openai_api_key=api_key),
|
||||
)
|
||||
|
||||
# Running the worker with a prompt
|
||||
out = worker.run(
|
||||
"Hello, how are you? Create an image of how your are doing!"
|
||||
)
|
||||
|
||||
# Printing the output
|
||||
print(out)
|
@ -1,12 +0,0 @@
|
||||
# Import the model
|
||||
from swarms import ZeroscopeTTV
|
||||
|
||||
# Initialize the model
|
||||
zeroscope = ZeroscopeTTV()
|
||||
|
||||
# Specify the task
|
||||
task = "A person is walking on the street."
|
||||
|
||||
# Generate the video!
|
||||
video_path = zeroscope(task)
|
||||
print(video_path)
|
@ -1,186 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from typing import Optional
|
||||
|
||||
import chromadb
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from swarms.utils.data_to_text import data_to_text
|
||||
from swarms.utils.markdown_message import display_markdown_message
|
||||
from swarms.memory.base_vectordb import BaseVectorDatabase
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
|
||||
# Results storage using local ChromaDB
|
||||
class ChromaDB(BaseVectorDatabase):
|
||||
"""
|
||||
|
||||
ChromaDB database
|
||||
|
||||
Args:
|
||||
metric (str): The similarity metric to use.
|
||||
output (str): The name of the collection to store the results in.
|
||||
limit_tokens (int, optional): The maximum number of tokens to use for the query. Defaults to 1000.
|
||||
n_results (int, optional): The number of results to retrieve. Defaults to 2.
|
||||
|
||||
Methods:
|
||||
add: _description_
|
||||
query: _description_
|
||||
|
||||
Examples:
|
||||
>>> chromadb = ChromaDB(
|
||||
>>> metric="cosine",
|
||||
>>> output="results",
|
||||
>>> llm="gpt3",
|
||||
>>> openai_api_key=OPENAI_API_KEY,
|
||||
>>> )
|
||||
>>> chromadb.add(task, result, result_id)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
metric: str = "cosine",
|
||||
output_dir: str = "swarms",
|
||||
limit_tokens: Optional[int] = 1000,
|
||||
n_results: int = 1,
|
||||
docs_folder: str = None,
|
||||
verbose: bool = False,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
self.metric = metric
|
||||
self.output_dir = output_dir
|
||||
self.limit_tokens = limit_tokens
|
||||
self.n_results = n_results
|
||||
self.docs_folder = docs_folder
|
||||
self.verbose = verbose
|
||||
|
||||
# Disable ChromaDB logging
|
||||
if verbose:
|
||||
logging.getLogger("chromadb").setLevel(logging.INFO)
|
||||
|
||||
# Create Chroma collection
|
||||
chroma_persist_dir = "chroma"
|
||||
chroma_client = chromadb.PersistentClient(
|
||||
settings=chromadb.config.Settings(
|
||||
persist_directory=chroma_persist_dir,
|
||||
),
|
||||
*args,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
# Create ChromaDB client
|
||||
self.client = chromadb.Client()
|
||||
|
||||
# Create Chroma collection
|
||||
self.collection = chroma_client.get_or_create_collection(
|
||||
name=output_dir,
|
||||
metadata={"hnsw:space": metric},
|
||||
*args,
|
||||
**kwargs,
|
||||
)
|
||||
display_markdown_message(
|
||||
"ChromaDB collection created:"
|
||||
f" {self.collection.name} with metric: {self.metric} and"
|
||||
f" output directory: {self.output_dir}"
|
||||
)
|
||||
|
||||
# If docs
|
||||
if docs_folder:
|
||||
display_markdown_message(
|
||||
f"Traversing directory: {docs_folder}"
|
||||
)
|
||||
self.traverse_directory()
|
||||
|
||||
def add(
|
||||
self,
|
||||
document: str,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Add a document to the ChromaDB collection.
|
||||
|
||||
Args:
|
||||
document (str): The document to be added.
|
||||
condition (bool, optional): The condition to check before adding the document. Defaults to True.
|
||||
|
||||
Returns:
|
||||
str: The ID of the added document.
|
||||
"""
|
||||
try:
|
||||
doc_id = str(uuid.uuid4())
|
||||
self.collection.add(
|
||||
ids=[doc_id],
|
||||
documents=[document],
|
||||
*args,
|
||||
**kwargs,
|
||||
)
|
||||
print("-----------------")
|
||||
print("Document added successfully")
|
||||
print("-----------------")
|
||||
return doc_id
|
||||
except Exception as e:
|
||||
raise Exception(f"Failed to add document: {str(e)}")
|
||||
|
||||
def query(
|
||||
self,
|
||||
query_text: str,
|
||||
*args,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
"""
|
||||
Query documents from the ChromaDB collection.
|
||||
|
||||
Args:
|
||||
query (str): The query string.
|
||||
n_docs (int, optional): The number of documents to retrieve. Defaults to 1.
|
||||
|
||||
Returns:
|
||||
dict: The retrieved documents.
|
||||
"""
|
||||
try:
|
||||
logging.info(f"Querying documents for: {query_text}")
|
||||
docs = self.collection.query(
|
||||
query_texts=[query_text],
|
||||
n_results=self.n_results,
|
||||
*args,
|
||||
**kwargs,
|
||||
)["documents"]
|
||||
|
||||
# Convert into a string
|
||||
out = ""
|
||||
for doc in docs:
|
||||
out += f"{doc}\n"
|
||||
|
||||
# Display the retrieved document
|
||||
display_markdown_message(f"Query: {query_text}")
|
||||
display_markdown_message(f"Retrieved Document: {out}")
|
||||
return out
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"Failed to query documents: {str(e)}")
|
||||
|
||||
def traverse_directory(self):
|
||||
"""
|
||||
Traverse through every file in the given directory and its subdirectories,
|
||||
and return the paths of all files.
|
||||
Parameters:
|
||||
- directory_name (str): The name of the directory to traverse.
|
||||
Returns:
|
||||
- list: A list of paths to each file in the directory and its subdirectories.
|
||||
"""
|
||||
added_to_db = False
|
||||
|
||||
for root, dirs, files in os.walk(self.docs_folder):
|
||||
for file in files:
|
||||
file_path = os.path.join(root, file) # Change this line
|
||||
_, ext = os.path.splitext(file_path)
|
||||
data = data_to_text(file_path)
|
||||
added_to_db = self.add(str(data))
|
||||
print(f"{file_path} added to Database")
|
||||
|
||||
return added_to_db
|
@ -1,14 +0,0 @@
|
||||
from pymongo.mongo_client import MongoClient
|
||||
from pymongo.server_api import ServerApi
|
||||
|
||||
uri = "mongodb+srv://kye:Kgx7d2FeLN7AyGNh@cluster0.ndu3b6d.mongodb.net/?retryWrites=true&w=majority&appName=Cluster0"
|
||||
|
||||
# Create a new client and connect to the server
|
||||
client = MongoClient(uri, server_api=ServerApi("1"))
|
||||
|
||||
# Send a ping to confirm a successful connection
|
||||
try:
|
||||
client.admin.command("ping")
|
||||
print("Pinged your deployment. You successfully connected to MongoDB!")
|
||||
except Exception as e:
|
||||
print(e)
|
@ -1,25 +0,0 @@
|
||||
from langchain.document_loaders import CSVLoader
|
||||
|
||||
from swarms.memory import qdrant
|
||||
|
||||
loader = CSVLoader(
|
||||
file_path="../document_parsing/aipg/aipg.csv",
|
||||
encoding="utf-8-sig",
|
||||
)
|
||||
docs = loader.load()
|
||||
|
||||
|
||||
# Initialize the Qdrant instance
|
||||
# See qdrant documentation on how to run locally
|
||||
qdrant_client = qdrant.Qdrant(
|
||||
host="https://697ea26c-2881-4e17-8af4-817fcb5862e8.europe-west3-0.gcp.cloud.qdrant.io",
|
||||
collection_name="qdrant",
|
||||
)
|
||||
qdrant_client.add_vectors(docs)
|
||||
|
||||
# Perform a search
|
||||
search_query = "Who is jojo"
|
||||
search_results = qdrant_client.search_vectors(search_query)
|
||||
print("Search Results:")
|
||||
for result in search_results:
|
||||
print(result)
|
@ -1,180 +0,0 @@
|
||||
"""
|
||||
Weaviate API Client
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from swarms.memory.base_vectordb import BaseVectorDatabase
|
||||
|
||||
try:
|
||||
import weaviate
|
||||
except ImportError:
|
||||
print("pip install weaviate-client")
|
||||
|
||||
|
||||
class WeaviateDB(BaseVectorDatabase):
|
||||
"""
|
||||
|
||||
Weaviate API Client
|
||||
Interface to Weaviate, a vector database with a GraphQL API.
|
||||
|
||||
Args:
|
||||
http_host (str): The HTTP host of the Weaviate server.
|
||||
http_port (str): The HTTP port of the Weaviate server.
|
||||
http_secure (bool): Whether to use HTTPS.
|
||||
grpc_host (Optional[str]): The gRPC host of the Weaviate server.
|
||||
grpc_port (Optional[str]): The gRPC port of the Weaviate server.
|
||||
grpc_secure (Optional[bool]): Whether to use gRPC over TLS.
|
||||
auth_client_secret (Optional[Any]): The authentication client secret.
|
||||
additional_headers (Optional[Dict[str, str]]): Additional headers to send with requests.
|
||||
additional_config (Optional[weaviate.AdditionalConfig]): Additional configuration for the client.
|
||||
|
||||
Methods:
|
||||
create_collection: Create a new collection in Weaviate.
|
||||
add: Add an object to a specified collection.
|
||||
query: Query objects from a specified collection.
|
||||
update: Update an object in a specified collection.
|
||||
delete: Delete an object from a specified collection.
|
||||
|
||||
Examples:
|
||||
>>> from swarms.memory import WeaviateDB
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
http_host: str,
|
||||
http_port: str,
|
||||
http_secure: bool,
|
||||
grpc_host: Optional[str] = None,
|
||||
grpc_port: Optional[str] = None,
|
||||
grpc_secure: Optional[bool] = None,
|
||||
auth_client_secret: Optional[Any] = None,
|
||||
additional_headers: Optional[Dict[str, str]] = None,
|
||||
additional_config: Optional[Any] = None,
|
||||
connection_params: Dict[str, Any] = None,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.http_host = http_host
|
||||
self.http_port = http_port
|
||||
self.http_secure = http_secure
|
||||
self.grpc_host = grpc_host
|
||||
self.grpc_port = grpc_port
|
||||
self.grpc_secure = grpc_secure
|
||||
self.auth_client_secret = auth_client_secret
|
||||
self.additional_headers = additional_headers
|
||||
self.additional_config = additional_config
|
||||
self.connection_params = connection_params
|
||||
|
||||
# If connection_params are provided, use them to initialize the client.
|
||||
connection_params = weaviate.ConnectionParams.from_params(
|
||||
http_host=http_host,
|
||||
http_port=http_port,
|
||||
http_secure=http_secure,
|
||||
grpc_host=grpc_host,
|
||||
grpc_port=grpc_port,
|
||||
grpc_secure=grpc_secure,
|
||||
)
|
||||
|
||||
# If additional headers are provided, add them to the connection params.
|
||||
self.client = weaviate.WeaviateDB(
|
||||
connection_params=connection_params,
|
||||
auth_client_secret=auth_client_secret,
|
||||
additional_headers=additional_headers,
|
||||
additional_config=additional_config,
|
||||
)
|
||||
|
||||
def create_collection(
|
||||
self,
|
||||
name: str,
|
||||
properties: List[Dict[str, Any]],
|
||||
vectorizer_config: Any = None,
|
||||
):
|
||||
"""Create a new collection in Weaviate.
|
||||
|
||||
Args:
|
||||
name (str): _description_
|
||||
properties (List[Dict[str, Any]]): _description_
|
||||
vectorizer_config (Any, optional): _description_. Defaults to None.
|
||||
"""
|
||||
try:
|
||||
out = self.client.collections.create(
|
||||
name=name,
|
||||
vectorizer_config=vectorizer_config,
|
||||
properties=properties,
|
||||
)
|
||||
print(out)
|
||||
except Exception as error:
|
||||
print(f"Error creating collection: {error}")
|
||||
raise
|
||||
|
||||
def add(self, collection_name: str, properties: Dict[str, Any]):
|
||||
"""Add an object to a specified collection.
|
||||
|
||||
Args:
|
||||
collection_name (str): _description_
|
||||
properties (Dict[str, Any]): _description_
|
||||
|
||||
Returns:
|
||||
_type_: _description_
|
||||
"""
|
||||
try:
|
||||
collection = self.client.collections.get(collection_name)
|
||||
return collection.data.insert(properties)
|
||||
except Exception as error:
|
||||
print(f"Error adding object: {error}")
|
||||
raise
|
||||
|
||||
def query(self, collection_name: str, query: str, limit: int = 10):
|
||||
"""Query objects from a specified collection.
|
||||
|
||||
Args:
|
||||
collection_name (str): _description_
|
||||
query (str): _description_
|
||||
limit (int, optional): _description_. Defaults to 10.
|
||||
|
||||
Returns:
|
||||
_type_: _description_
|
||||
"""
|
||||
try:
|
||||
collection = self.client.collections.get(collection_name)
|
||||
response = collection.query.bm25(query=query, limit=limit)
|
||||
return [o.properties for o in response.objects]
|
||||
except Exception as error:
|
||||
print(f"Error querying objects: {error}")
|
||||
raise
|
||||
|
||||
def update(
|
||||
self,
|
||||
collection_name: str,
|
||||
object_id: str,
|
||||
properties: Dict[str, Any],
|
||||
):
|
||||
"""UPdate an object in a specified collection.
|
||||
|
||||
Args:
|
||||
collection_name (str): _description_
|
||||
object_id (str): _description_
|
||||
properties (Dict[str, Any]): _description_
|
||||
"""
|
||||
try:
|
||||
collection = self.client.collections.get(collection_name)
|
||||
collection.data.update(object_id, properties)
|
||||
except Exception as error:
|
||||
print(f"Error updating object: {error}")
|
||||
raise
|
||||
|
||||
def delete(self, collection_name: str, object_id: str):
|
||||
"""Delete an object from a specified collection.
|
||||
|
||||
Args:
|
||||
collection_name (str): _description_
|
||||
object_id (str): _description_
|
||||
"""
|
||||
try:
|
||||
collection = self.client.collections.get(collection_name)
|
||||
collection.data.delete_by_id(object_id)
|
||||
except Exception as error:
|
||||
print(f"Error deleting object: {error}")
|
||||
raise
|
@ -1,13 +0,0 @@
|
||||
import asyncio
|
||||
|
||||
from swarms.models.distilled_whisperx import DistilWhisperModel
|
||||
|
||||
model_wrapper = DistilWhisperModel()
|
||||
|
||||
# Download mp3 of voice and place the path here
|
||||
transcription = model_wrapper("path/to/audio.mp3")
|
||||
|
||||
# For async usage
|
||||
transcription = asyncio.run(
|
||||
model_wrapper.async_transcribe("path/to/audio.mp3")
|
||||
)
|
@ -1,12 +0,0 @@
|
||||
from swarms import Mistral
|
||||
|
||||
# Initialize the model
|
||||
model = Mistral(
|
||||
model_name="miqudev/miqu-1-70b",
|
||||
max_length=500,
|
||||
use_flash_attention=True,
|
||||
load_in_4bit=True,
|
||||
)
|
||||
|
||||
# Run the model
|
||||
result = model.run("What is the meaning of life?")
|
@ -1,7 +0,0 @@
|
||||
from swarms.models import Mistral
|
||||
|
||||
model = Mistral(device="cuda", use_flash_attention=True)
|
||||
|
||||
prompt = "My favourite condiment is"
|
||||
result = model.run(prompt)
|
||||
print(result)
|
@ -1,9 +0,0 @@
|
||||
from swarms.models.mpt import MPT
|
||||
|
||||
mpt_instance = MPT(
|
||||
"mosaicml/mpt-7b-storywriter",
|
||||
"EleutherAI/gpt-neox-20b",
|
||||
max_tokens=150,
|
||||
)
|
||||
|
||||
mpt_instance.generate("Once upon a time in a land far, far away...")
|
@ -1,7 +0,0 @@
|
||||
from swarms.models.openai_chat import OpenAIChat
|
||||
|
||||
model = OpenAIChat()
|
||||
|
||||
out = model("Hello, how are you?")
|
||||
|
||||
print(out)
|
@ -1,6 +1,10 @@
|
||||
from swarms.models.openai_models import OpenAIChat
|
||||
import os
|
||||
from swarms.models import OpenAIChat
|
||||
|
||||
openai = OpenAIChat(openai_api_key="", verbose=False)
|
||||
# Load doten
|
||||
openai = OpenAIChat(
|
||||
openai_api_key=os.getenv("OPENAI_API_KEY"), verbose=False
|
||||
)
|
||||
|
||||
chat = openai("What are quantum fields?")
|
||||
print(chat)
|
||||
|
@ -0,0 +1,88 @@
|
||||
from swarms.structs.agent_registry import AgentRegistry
|
||||
from swarms import Agent
|
||||
from swarms.models import Anthropic
|
||||
|
||||
|
||||
# Initialize the agents
|
||||
growth_agent1 = Agent(
|
||||
agent_name="Marketing Specialist",
|
||||
system_prompt="You're the marketing specialist, your purpose is to help companies grow by improving their marketing strategies!",
|
||||
agent_description="Improve a company's marketing strategies!",
|
||||
llm=Anthropic(),
|
||||
max_loops="auto",
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
saved_state_path="marketing_specialist.json",
|
||||
stopping_token="Stop!",
|
||||
interactive=True,
|
||||
context_length=1000,
|
||||
)
|
||||
|
||||
growth_agent2 = Agent(
|
||||
agent_name="Sales Specialist",
|
||||
system_prompt="You're the sales specialist, your purpose is to help companies grow by improving their sales strategies!",
|
||||
agent_description="Improve a company's sales strategies!",
|
||||
llm=Anthropic(),
|
||||
max_loops="auto",
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
saved_state_path="sales_specialist.json",
|
||||
stopping_token="Stop!",
|
||||
interactive=True,
|
||||
context_length=1000,
|
||||
)
|
||||
|
||||
growth_agent3 = Agent(
|
||||
agent_name="Product Development Specialist",
|
||||
system_prompt="You're the product development specialist, your purpose is to help companies grow by improving their product development strategies!",
|
||||
agent_description="Improve a company's product development strategies!",
|
||||
llm=Anthropic(),
|
||||
max_loops="auto",
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
saved_state_path="product_development_specialist.json",
|
||||
stopping_token="Stop!",
|
||||
interactive=True,
|
||||
context_length=1000,
|
||||
)
|
||||
|
||||
growth_agent4 = Agent(
|
||||
agent_name="Customer Service Specialist",
|
||||
system_prompt="You're the customer service specialist, your purpose is to help companies grow by improving their customer service strategies!",
|
||||
agent_description="Improve a company's customer service strategies!",
|
||||
llm=Anthropic(),
|
||||
max_loops="auto",
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
verbose=True,
|
||||
streaming_on=True,
|
||||
saved_state_path="customer_service_specialist.json",
|
||||
stopping_token="Stop!",
|
||||
interactive=True,
|
||||
context_length=1000,
|
||||
)
|
||||
|
||||
|
||||
# Register the agents\
|
||||
registry = AgentRegistry()
|
||||
|
||||
# Register the agents
|
||||
registry.add("Marketing Specialist", growth_agent1)
|
||||
registry.add("Sales Specialist", growth_agent2)
|
||||
registry.add("Product Development Specialist", growth_agent3)
|
||||
registry.add("Customer Service Specialist", growth_agent4)
|
||||
|
||||
|
||||
# Query the agents
|
||||
registry.get("Marketing Specialist")
|
||||
registry.get("Sales Specialist")
|
||||
registry.get("Product Development Specialist")
|
||||
|
||||
# Get all the agents
|
||||
registry.list_agents()
|
@ -1,45 +0,0 @@
|
||||
import os
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Import the OpenAIChat model and the Agent struct
|
||||
from swarms.models import OpenAIChat
|
||||
from swarms.structs import Agent
|
||||
from swarms.structs.autoscaler import AutoScaler
|
||||
|
||||
# Load the environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Get the API key from the environment
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
|
||||
# Initialize the language model
|
||||
llm = OpenAIChat(
|
||||
temperature=0.5,
|
||||
openai_api_key=api_key,
|
||||
)
|
||||
|
||||
|
||||
## Initialize the workflow
|
||||
agent = Agent(llm=llm, max_loops=1, dashboard=True)
|
||||
|
||||
|
||||
# Load the autoscaler
|
||||
autoscaler = AutoScaler(
|
||||
initial_agents=2,
|
||||
scale_up_factor=1,
|
||||
idle_threshold=0.2,
|
||||
busy_threshold=0.7,
|
||||
agents=[agent],
|
||||
autoscale=True,
|
||||
min_agents=1,
|
||||
max_agents=5,
|
||||
custom_scale_strategy=None,
|
||||
)
|
||||
print(autoscaler)
|
||||
|
||||
# Run the workflow on a task
|
||||
out = autoscaler.run(
|
||||
agent.id, "Generate a 10,000 word blog on health and wellness."
|
||||
)
|
||||
print(out)
|
@ -1,14 +0,0 @@
|
||||
import pandas as pd
|
||||
|
||||
from swarms import dataframe_to_text
|
||||
|
||||
# # Example usage:
|
||||
df = pd.DataFrame(
|
||||
{
|
||||
"A": [1, 2, 3],
|
||||
"B": [4, 5, 6],
|
||||
"C": [7, 8, 9],
|
||||
}
|
||||
)
|
||||
|
||||
print(dataframe_to_text(df))
|
@ -0,0 +1,7 @@
|
||||
"""
|
||||
A script that runs ruff, black, autopep8, and all other formatters in one python script on a cron job.
|
||||
|
||||
- Perhaps make a github workflow as well
|
||||
|
||||
|
||||
"""
|
Loading…
Reference in new issue