commit
48da719e9d
@ -1,15 +0,0 @@
|
||||
[package]
|
||||
name = "swarms-runtime" # The name of your project
|
||||
version = "0.1.0" # The current version, adhering to semantic versioning
|
||||
edition = "2021" # Specifies which edition of Rust you're using, e.g., 2018 or 2021
|
||||
authors = ["Your Name <your.email@example.com>"] # Optional: specify the package authors
|
||||
license = "MIT" # Optional: the license for your project
|
||||
description = "A brief description of my project" # Optional: a short description of your project
|
||||
|
||||
[dependencies]
|
||||
cpython = "0.5"
|
||||
rayon = "1.5"
|
||||
|
||||
[dependencies.pyo3]
|
||||
version = "0.20.3"
|
||||
features = ["extension-module", "auto-initialize"]
|
@ -0,0 +1,107 @@
|
||||
# How to Create A Custom Language Model
|
||||
|
||||
When working with advanced language models, there might come a time when you need a custom solution tailored to your specific needs. Inheriting from an `AbstractLLM` in a Python framework allows developers to create custom language model classes with ease. This developer guide will take you through the process step by step.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Before you begin, ensure that you have:
|
||||
|
||||
- A working knowledge of Python programming.
|
||||
- Basic understanding of object-oriented programming (OOP) in Python.
|
||||
- Familiarity with language models and natural language processing (NLP).
|
||||
- The appropriate Python framework installed, with access to `AbstractLLM`.
|
||||
|
||||
### Step-by-Step Guide
|
||||
|
||||
#### Step 1: Understand `AbstractLLM`
|
||||
|
||||
The `AbstractLLM` is an abstract base class that defines a set of methods and properties which your custom language model (LLM) should implement. Abstract classes in Python are not designed to be instantiated directly but are meant to be subclasses.
|
||||
|
||||
#### Step 2: Create a New Class
|
||||
|
||||
Start by defining a new class that inherits from `AbstractLLM`. This class will implement the required methods defined in the abstract base class.
|
||||
|
||||
```python
|
||||
from swarms import AbstractLLM
|
||||
|
||||
class vLLMLM(AbstractLLM):
|
||||
pass
|
||||
```
|
||||
|
||||
#### Step 3: Initialize Your Class
|
||||
|
||||
Implement the `__init__` method to initialize your custom LLM. You'll want to initialize the base class as well and define any additional parameters for your model.
|
||||
|
||||
```python
|
||||
class vLLMLM(AbstractLLM):
|
||||
def __init__(self, model_name='default_model', tensor_parallel_size=1, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.model_name = model_name
|
||||
self.tensor_parallel_size = tensor_parallel_size
|
||||
# Add any additional initialization here
|
||||
```
|
||||
|
||||
#### Step 4: Implement Required Methods
|
||||
|
||||
Implement the `run` method or any other abstract methods required by `AbstractLLM`. This is where you define how your model processes input and returns output.
|
||||
|
||||
```python
|
||||
class vLLMLM(AbstractLLM):
|
||||
# ... existing code ...
|
||||
|
||||
def run(self, task, *args, **kwargs):
|
||||
# Logic for running your model goes here
|
||||
return "Processed output"
|
||||
```
|
||||
|
||||
#### Step 5: Test Your Model
|
||||
|
||||
Instantiate your custom LLM and test it to ensure that it works as expected.
|
||||
|
||||
```python
|
||||
model = vLLMLM(model_name='my_custom_model', tensor_parallel_size=2)
|
||||
output = model.run("What are the symptoms of COVID-19?")
|
||||
print(output) # Outputs: "Processed output"
|
||||
```
|
||||
|
||||
#### Step 6: Integrate Additional Components
|
||||
|
||||
Depending on the requirements, you might need to integrate additional components such as database connections, parallel computing resources, or custom processing pipelines.
|
||||
|
||||
#### Step 7: Documentation
|
||||
|
||||
Write comprehensive docstrings for your class and its methods. Good documentation is crucial for maintaining the code and for other developers who might use your model.
|
||||
|
||||
```python
|
||||
class vLLMLM(AbstractLLM):
|
||||
"""
|
||||
A custom language model class that extends AbstractLLM.
|
||||
|
||||
... more detailed docstring ...
|
||||
"""
|
||||
# ... existing code ...
|
||||
```
|
||||
|
||||
#### Step 8: Best Practices
|
||||
|
||||
Follow best practices such as error handling, input validation, and resource management to ensure your model is robust and reliable.
|
||||
|
||||
#### Step 9: Packaging Your Model
|
||||
|
||||
Package your custom LLM class into a module or package that can be easily distributed and imported into other projects.
|
||||
|
||||
#### Step 10: Version Control and Collaboration
|
||||
|
||||
Use a version control system like Git to track changes to your model. This makes collaboration easier and helps you keep a history of your work.
|
||||
|
||||
### Conclusion
|
||||
|
||||
By following this guide, you should now have a custom model that extends the `AbstractLLM`. Remember that the key to a successful custom LLM is understanding the base functionalities, implementing necessary changes, and testing thoroughly. Keep iterating and improving based on feedback and performance metrics.
|
||||
|
||||
### Further Reading
|
||||
|
||||
- Official Python documentation on abstract base classes.
|
||||
- In-depth tutorials on object-oriented programming in Python.
|
||||
- Advanced NLP techniques and optimization strategies for language models.
|
||||
|
||||
This guide provides the fundamental steps to create custom models using `AbstractLLM`. For detailed implementation and advanced customization, it's essential to dive deeper into the specific functionalities and capabilities of the language model framework you are using.
|
@ -0,0 +1,9 @@
|
||||
"""
|
||||
Plan -> act in a loop until observation is met
|
||||
|
||||
|
||||
# Tools
|
||||
- Terminal
|
||||
- Text Editor
|
||||
- Browser
|
||||
"""
|
@ -0,0 +1,59 @@
|
||||
def test_create_graph():
|
||||
"""
|
||||
Tests that a graph can be created.
|
||||
"""
|
||||
graph = create_graph()
|
||||
assert isinstance(graph, dict)
|
||||
|
||||
|
||||
def test_weight_edges():
|
||||
"""
|
||||
Tests that the edges of a graph can be weighted.
|
||||
"""
|
||||
graph = create_graph()
|
||||
weight_edges(graph)
|
||||
for edge in graph.edges:
|
||||
assert isinstance(edge.weight, int)
|
||||
|
||||
|
||||
def test_create_user_list():
|
||||
"""
|
||||
Tests that a list of all the podcasts that the user has listened to can be created.
|
||||
"""
|
||||
user_list = create_user_list()
|
||||
assert isinstance(user_list, list)
|
||||
|
||||
|
||||
def test_find_most_similar_podcasts():
|
||||
"""
|
||||
Tests that the most similar podcasts to a given podcast can be found.
|
||||
"""
|
||||
graph = create_graph()
|
||||
weight_edges(graph)
|
||||
user_list = create_user_list()
|
||||
most_similar_podcasts = find_most_similar_podcasts(
|
||||
graph, user_list
|
||||
)
|
||||
assert isinstance(most_similar_podcasts, list)
|
||||
|
||||
|
||||
def test_add_most_similar_podcasts():
|
||||
"""
|
||||
Tests that the most similar podcasts to a given podcast can be added to the user's list.
|
||||
"""
|
||||
graph = create_graph()
|
||||
weight_edges(graph)
|
||||
user_list = create_user_list()
|
||||
add_most_similar_podcasts(graph, user_list)
|
||||
assert len(user_list) > 0
|
||||
|
||||
|
||||
def test_repeat_steps():
|
||||
"""
|
||||
Tests that steps 5-6 can be repeated until the user's list contains the desired number of podcasts.
|
||||
"""
|
||||
graph = create_graph()
|
||||
weight_edges(graph)
|
||||
user_list = create_user_list()
|
||||
repeat_steps(graph, user_list)
|
||||
assert len(user_list) == 10
|
@ -0,0 +1,46 @@
|
||||
import pytest
|
||||
|
||||
def test_create_youtube_account():
|
||||
# Arrange
|
||||
# Act
|
||||
# Assert
|
||||
|
||||
def test_install_video_editing_software():
|
||||
# Arrange
|
||||
# Act
|
||||
# Assert
|
||||
|
||||
def test_write_script():
|
||||
# Arrange
|
||||
# Act
|
||||
# Assert
|
||||
|
||||
def test_gather_footage():
|
||||
# Arrange
|
||||
# Act
|
||||
# Assert
|
||||
|
||||
def test_edit_video():
|
||||
# Arrange
|
||||
# Act
|
||||
# Assert
|
||||
|
||||
def test_export_video():
|
||||
# Arrange
|
||||
# Act
|
||||
# Assert
|
||||
|
||||
def test_upload_video_to_youtube():
|
||||
# Arrange
|
||||
# Act
|
||||
# Assert
|
||||
|
||||
def test_optimize_video_for_search():
|
||||
# Arrange
|
||||
# Act
|
||||
# Assert
|
||||
|
||||
def test_share_video():
|
||||
# Arrange
|
||||
# Act
|
||||
# Assert
|
@ -0,0 +1,253 @@
|
||||
import concurrent
|
||||
import csv
|
||||
from swarms import Agent, OpenAIChat
|
||||
from swarms.memory import ChromaDB
|
||||
from dotenv import load_dotenv
|
||||
from swarms.utils.parse_code import extract_code_from_markdown
|
||||
from swarms.utils.file_processing import create_file
|
||||
from swarms.utils.loguru_logger import logger
|
||||
|
||||
|
||||
# Load ENV
|
||||
load_dotenv()
|
||||
|
||||
# Gemini
|
||||
gemini = OpenAIChat()
|
||||
|
||||
# memory
|
||||
memory = ChromaDB(output_dir="swarm_hackathon")
|
||||
|
||||
|
||||
def execute_concurrently(callable_functions: callable, max_workers=5):
|
||||
"""
|
||||
Executes callable functions concurrently using multithreading.
|
||||
|
||||
Parameters:
|
||||
- callable_functions: A list of tuples, each containing the callable function and its arguments.
|
||||
For example: [(function1, (arg1, arg2), {'kwarg1': val1}), (function2, (), {})]
|
||||
- max_workers: The maximum number of threads to use.
|
||||
|
||||
Returns:
|
||||
- results: A list of results returned by the callable functions. If an error occurs in any function,
|
||||
the exception object will be placed at the corresponding index in the list.
|
||||
"""
|
||||
results = [None] * len(callable_functions)
|
||||
|
||||
def worker(fn, args, kwargs, index):
|
||||
try:
|
||||
result = fn(*args, **kwargs)
|
||||
results[index] = result
|
||||
except Exception as e:
|
||||
results[index] = e
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=max_workers
|
||||
) as executor:
|
||||
futures = []
|
||||
for i, (fn, args, kwargs) in enumerate(callable_functions):
|
||||
futures.append(
|
||||
executor.submit(worker, fn, args, kwargs, i)
|
||||
)
|
||||
|
||||
# Wait for all threads to complete
|
||||
concurrent.futures.wait(futures)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
# Adjusting the function to extract specific column values
|
||||
def extract_and_create_agents(
|
||||
csv_file_path: str, target_columns: list
|
||||
):
|
||||
"""
|
||||
Reads a CSV file, extracts "Project Name" and "Lightning Proposal" for each row,
|
||||
creates an Agent for each, and adds it to the swarm network.
|
||||
|
||||
Parameters:
|
||||
- csv_file_path: The path to the CSV file.
|
||||
- target_columns: A list of column names to extract values from.
|
||||
"""
|
||||
try:
|
||||
agents = []
|
||||
with open(csv_file_path, mode="r", encoding="utf-8") as file:
|
||||
reader = csv.DictReader(file)
|
||||
for row in reader:
|
||||
project_name = row[target_columns[0]]
|
||||
lightning_proposal = row[target_columns[1]]
|
||||
|
||||
# Example of creating and adding an agent based on the project name and lightning proposal
|
||||
agent_name = f"{project_name} agent"
|
||||
print(agent_name) # For demonstration
|
||||
|
||||
# Create the agent
|
||||
logger.info("Creating agent...")
|
||||
|
||||
# Design agent
|
||||
logger.info("Creating design agent...")
|
||||
design_agent = Agent(
|
||||
llm=gemini,
|
||||
agent_name="Design Agent",
|
||||
max_loops=1,
|
||||
stopping_token="<DONE?>",
|
||||
sop=None,
|
||||
system_prompt=(
|
||||
"Transform an app idea into step by step very"
|
||||
" simple algorithmic psuedocode so it can be"
|
||||
" implemented simply."
|
||||
),
|
||||
long_term_memory=memory,
|
||||
)
|
||||
|
||||
# Log the agent
|
||||
logger.info(
|
||||
f"Code Agent created: {agent_name} with long term"
|
||||
" memory"
|
||||
)
|
||||
agent = Agent(
|
||||
llm=gemini,
|
||||
agent_name=agent_name,
|
||||
max_loops=1,
|
||||
code_interpreter=True,
|
||||
stopping_token="<DONE?>",
|
||||
sop=None,
|
||||
system_prompt=(
|
||||
"Transform an app idea into a very simple"
|
||||
" python app in markdown. Return all the"
|
||||
" python code in a single markdown file."
|
||||
" Return only code and nothing else."
|
||||
),
|
||||
long_term_memory=memory,
|
||||
)
|
||||
|
||||
# Testing agent
|
||||
logger.info(f"Testing_agent agent: {agent_name}")
|
||||
agent = Agent(
|
||||
llm=gemini,
|
||||
agent_name=agent_name + " testing",
|
||||
max_loops=1,
|
||||
stopping_token="<DONE?>",
|
||||
sop=None,
|
||||
system_prompt=(
|
||||
"Create unit tests using pytest based on the"
|
||||
" code you see, only return unit test code in"
|
||||
" python using markdown, only return the code"
|
||||
" and nothing else."
|
||||
),
|
||||
long_term_memory=memory,
|
||||
)
|
||||
|
||||
# Log the agent
|
||||
logger.info(
|
||||
f"Agent created: {agent_name} with long term"
|
||||
" memory"
|
||||
)
|
||||
agents.append(agent)
|
||||
|
||||
# Design agent
|
||||
design_agent_output = design_agent.run(
|
||||
(
|
||||
"Create the algorithmic psuedocode for the"
|
||||
f" {lightning_proposal} in markdown and"
|
||||
" return it"
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Algorithmic psuedocode created:"
|
||||
f" {design_agent_output}"
|
||||
)
|
||||
|
||||
# Create the code for each project
|
||||
output = agent.run(
|
||||
(
|
||||
"Create the code for the"
|
||||
f" {lightning_proposal} in python using the"
|
||||
" algorithmic psuedocode"
|
||||
f" {design_agent_output} and wrap it in"
|
||||
" markdown and return it"
|
||||
),
|
||||
None,
|
||||
)
|
||||
print(output)
|
||||
# Parse the output
|
||||
output = extract_code_from_markdown(output)
|
||||
# Create the file
|
||||
output = create_file(output, f"{project_name}.py")
|
||||
|
||||
# Testing agent
|
||||
testing_agent_output = agent.run(
|
||||
(
|
||||
"Create the unit tests for the"
|
||||
f" {lightning_proposal} in python using the"
|
||||
f" code {output} and wrap it in markdown and"
|
||||
" return it"
|
||||
),
|
||||
None,
|
||||
)
|
||||
print(testing_agent_output)
|
||||
|
||||
# Parse the output
|
||||
testing_agent_output = extract_code_from_markdown(
|
||||
testing_agent_output
|
||||
)
|
||||
# Create the file
|
||||
testing_agent_output = create_file(
|
||||
testing_agent_output, f"test_{project_name}.py"
|
||||
)
|
||||
|
||||
# Log the project created
|
||||
logger.info(
|
||||
f"Project {project_name} created: {output} at"
|
||||
f" file path {project_name}.py"
|
||||
)
|
||||
print(output)
|
||||
|
||||
# Log the unit tests created
|
||||
logger.info(
|
||||
f"Unit tests for {project_name} created:"
|
||||
f" {testing_agent_output} at file path"
|
||||
f" test_{project_name}.py"
|
||||
)
|
||||
|
||||
print(
|
||||
f"Agent {agent_name} created and added to the"
|
||||
" swarm network"
|
||||
)
|
||||
|
||||
return agents
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"An error occurred while extracting and creating"
|
||||
f" agents: {e}"
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
# CSV
|
||||
csv_file = "presentation.csv"
|
||||
|
||||
# Specific columns to extract
|
||||
target_columns = ["Project Name", "Project Description"]
|
||||
|
||||
# Use the adjusted function
|
||||
specific_column_values = extract_and_create_agents(
|
||||
csv_file, target_columns
|
||||
)
|
||||
|
||||
# Display the extracted column values
|
||||
print(specific_column_values)
|
||||
|
||||
|
||||
# Concurrently execute the function
|
||||
logger.info(
|
||||
"Concurrently executing the swarm for each hackathon project..."
|
||||
)
|
||||
output = execute_concurrently(
|
||||
[
|
||||
(extract_and_create_agents, (csv_file, target_columns), {}),
|
||||
],
|
||||
max_workers=5,
|
||||
)
|
||||
print(output)
|
@ -0,0 +1,86 @@
|
||||
class MockApp:
|
||||
def __init__(self):
|
||||
self.running = True
|
||||
self.session = None
|
||||
self.slides = []
|
||||
|
||||
def main_menu(self):
|
||||
return input("Choose option: 1. Start, 2. Load, 3. Exit ")
|
||||
|
||||
def start_new_talk(self, title):
|
||||
self.session = title
|
||||
self.slides = []
|
||||
|
||||
def add_slide(self, content):
|
||||
self.slides.append(content)
|
||||
|
||||
def edit_slide(self, index, content):
|
||||
self.slides[index] = content
|
||||
|
||||
def delete_slide(self, index):
|
||||
del self.slides[index]
|
||||
|
||||
def reorder_slides(self, new_order):
|
||||
self.slides = [self.slides[i] for i in new_order]
|
||||
|
||||
def get_number_of_slides(self):
|
||||
return len(self.slides)
|
||||
|
||||
# Function to simulate user actions
|
||||
def simulate_user_action(self, action):
|
||||
# Placeholder function to simulate user interaction, not part of the actual app code
|
||||
pass
|
||||
|
||||
|
||||
# Testing starting a new talk
|
||||
def test_start_new_talk():
|
||||
app = MockApp()
|
||||
app.start_new_talk("My New Talk")
|
||||
assert app.session == "My New Talk"
|
||||
assert app.slides == []
|
||||
|
||||
|
||||
# Testing adding a slide
|
||||
def test_add_slide():
|
||||
app = MockApp()
|
||||
app.start_new_talk("Talk 1")
|
||||
app.add_slide("Slide Content 1")
|
||||
assert app.slides == ["Slide Content 1"]
|
||||
|
||||
|
||||
# Testing editing a slide
|
||||
def test_edit_slide():
|
||||
app = MockApp()
|
||||
app.start_new_talk("Talk 1")
|
||||
app.add_slide("Slide Content 1")
|
||||
app.edit_slide(0, "Updated Slide Content 1")
|
||||
assert app.slides == ["Updated Slide Content 1"]
|
||||
|
||||
|
||||
# Testing deleting a slide
|
||||
def test_delete_slide():
|
||||
app = MockApp()
|
||||
app.start_new_talk("Talk 1")
|
||||
app.add_slide("Slide Content 1")
|
||||
app.add_slide("Slide Content 2")
|
||||
app.delete_slide(0)
|
||||
assert app.slides == ["Slide Content 2"]
|
||||
|
||||
|
||||
# Testing reordering slides
|
||||
def test_reorder_slides():
|
||||
app = MockApp()
|
||||
app.start_new_talk("Talk 1")
|
||||
app.add_slide("Slide Content 1")
|
||||
app.add_slide("Slide Content 2")
|
||||
app.reorder_slides([1, 0])
|
||||
assert app.slides == ["Slide Content 2", "Slide Content 1"]
|
||||
|
||||
|
||||
# Testing the number of slides
|
||||
def test_slide_count():
|
||||
app = MockApp()
|
||||
app.start_new_talk("Talk 1")
|
||||
app.add_slide("Slide Content 1")
|
||||
app.add_slide("Slide Content 2")
|
||||
assert app.get_number_of_slides() == 2
|
|
@ -0,0 +1,38 @@
|
||||
from ai_acceleerated_learning.Vocal import Vocal
|
||||
|
||||
vocal = Vocal()
|
||||
|
||||
|
||||
def test_pass():
|
||||
assert (
|
||||
vocal.generate_video(
|
||||
"I love to play basketball, and I am a very good player.",
|
||||
"basketball",
|
||||
)
|
||||
== "Successfully generated a YouTube video for your prompt: I"
|
||||
" love to play basketball, and I am a very good player."
|
||||
)
|
||||
|
||||
|
||||
def test_invalid_sports():
|
||||
assert (
|
||||
vocal.generate_video(
|
||||
"I just ate some delicious tacos", "tacos"
|
||||
)
|
||||
== "Invalid sports entered!! Please enter a valid sport."
|
||||
)
|
||||
|
||||
|
||||
def test_invalid_prompt():
|
||||
assert (
|
||||
vocal.generate_video(987, "basketball")
|
||||
== "Invalid prompt entered!! Please enter a valid prompt."
|
||||
)
|
||||
|
||||
|
||||
def test_not_string():
|
||||
assert (
|
||||
vocal.generate_video(789, 234)
|
||||
== "Invalid prompt and sports entered!! Please enter valid"
|
||||
" prompt and sport."
|
||||
)
|
@ -0,0 +1,86 @@
|
||||
# test_presentation_assistant.py
|
||||
|
||||
import pytest
|
||||
from presentation_assistant import (
|
||||
PresentationAssistant,
|
||||
SlideNotFoundError,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def assistant():
|
||||
slides = [
|
||||
"Welcome to our presentation!",
|
||||
"Here is the agenda for today.",
|
||||
"Let's dive into the first topic.",
|
||||
"Thank you for attending.",
|
||||
]
|
||||
return PresentationAssistant(slides)
|
||||
|
||||
|
||||
def test_init():
|
||||
slides = ["Slide 1", "Slide 2"]
|
||||
pa = PresentationAssistant(slides)
|
||||
assert pa.slides == slides
|
||||
assert pa.current_slide == 0
|
||||
|
||||
|
||||
def test_next_slide(assistant):
|
||||
assistant.next_slide()
|
||||
assert assistant.current_slide == 1
|
||||
assistant.next_slide()
|
||||
assert assistant.current_slide == 2
|
||||
|
||||
|
||||
def test_previous_slide(assistant):
|
||||
assistant.current_slide = 2
|
||||
assistant.previous_slide()
|
||||
assert assistant.current_slide == 1
|
||||
assistant.previous_slide()
|
||||
assert assistant.current_slide == 0
|
||||
|
||||
|
||||
def test_next_slide_at_end(assistant):
|
||||
assistant.current_slide = len(assistant.slides) - 1
|
||||
with pytest.raises(SlideNotFoundError):
|
||||
assistant.next_slide()
|
||||
|
||||
|
||||
def test_previous_slide_at_start(assistant):
|
||||
with pytest.raises(SlideNotFoundError):
|
||||
assistant.previous_slide()
|
||||
|
||||
|
||||
def test_go_to_slide(assistant):
|
||||
assistant.go_to_slide(2)
|
||||
assert assistant.current_slide == 2
|
||||
|
||||
|
||||
def test_go_to_slide_out_of_range(assistant):
|
||||
with pytest.raises(SlideNotFoundError):
|
||||
assistant.go_to_slide(len(assistant.slides))
|
||||
|
||||
|
||||
def test_go_to_slide_negative(assistant):
|
||||
with pytest.raises(SlideNotFoundError):
|
||||
assistant.go_to_slide(-1)
|
||||
|
||||
|
||||
def test_current_slide_content(assistant):
|
||||
content = assistant.current_slide_content()
|
||||
assert content == assistant.slides[0]
|
||||
assistant.next_slide()
|
||||
content = assistant.current_slide_content()
|
||||
assert content == assistant.slides[1]
|
||||
|
||||
|
||||
def test_show_slide(
|
||||
assistant, capsys
|
||||
): # capsys is a pytest fixture to capture stdout and stderr
|
||||
assistant.show_slide()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out.strip() == assistant.slides[0]
|
||||
assistant.next_slide()
|
||||
assistant.show_slide()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out.strip() == assistant.slides[1]
|
|
@ -0,0 +1,89 @@
|
||||
from vllm import LLM
|
||||
from swarms import AbstractLLM, Agent, ChromaDB
|
||||
|
||||
|
||||
# Making an instance of the VLLM class
|
||||
class vLLMLM(AbstractLLM):
|
||||
"""
|
||||
This class represents a variant of the Language Model (LLM) called vLLMLM.
|
||||
It extends the AbstractLLM class and provides additional functionality.
|
||||
|
||||
Args:
|
||||
model_name (str): The name of the LLM model to use. Defaults to "acebook/opt-13b".
|
||||
tensor_parallel_size (int): The size of the tensor parallelism. Defaults to 4.
|
||||
*args: Variable length argument list.
|
||||
**kwargs: Arbitrary keyword arguments.
|
||||
|
||||
Attributes:
|
||||
model_name (str): The name of the LLM model.
|
||||
tensor_parallel_size (int): The size of the tensor parallelism.
|
||||
llm (LLM): An instance of the LLM class.
|
||||
|
||||
Methods:
|
||||
run(task: str, *args, **kwargs): Runs the LLM model to generate output for the given task.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_name: str = "acebook/opt-13b",
|
||||
tensor_parallel_size: int = 4,
|
||||
*args,
|
||||
**kwargs
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.model_name = model_name
|
||||
self.tensor_parallel_size = tensor_parallel_size
|
||||
|
||||
self.llm = LLM(
|
||||
model_name=self.model_name,
|
||||
tensor_parallel_size=self.tensor_parallel_size,
|
||||
)
|
||||
|
||||
def run(self, task: str, *args, **kwargs):
|
||||
"""
|
||||
Runs the LLM model to generate output for the given task.
|
||||
|
||||
Args:
|
||||
task (str): The task for which to generate output.
|
||||
*args: Variable length argument list.
|
||||
**kwargs: Arbitrary keyword arguments.
|
||||
|
||||
Returns:
|
||||
str: The generated output for the given task.
|
||||
|
||||
"""
|
||||
return self.llm.generate(task)
|
||||
|
||||
|
||||
# Initializing the agent with the vLLMLM instance and other parameters
|
||||
model = vLLMLM(
|
||||
"facebook/opt-13b",
|
||||
tensor_parallel_size=4,
|
||||
)
|
||||
|
||||
# Defining the task
|
||||
task = "What are the symptoms of COVID-19?"
|
||||
|
||||
# Running the agent with the specified task
|
||||
out = model.run(task)
|
||||
|
||||
|
||||
# Integrate Agent
|
||||
agent = Agent(
|
||||
agent_name="Doctor agent",
|
||||
agent_description=(
|
||||
"This agent provides information about COVID-19 symptoms."
|
||||
),
|
||||
llm=model,
|
||||
max_loops="auto",
|
||||
autosave=True,
|
||||
verbose=True,
|
||||
long_term_memory=ChromaDB(
|
||||
metric="cosine",
|
||||
n_results=3,
|
||||
output_dir="results",
|
||||
docs_folder="docs",
|
||||
),
|
||||
stopping_condition="finish",
|
||||
)
|
@ -0,0 +1,29 @@
|
||||
from swarms import Agent
|
||||
from swarms.models.base_llm import AbstractLLM
|
||||
|
||||
|
||||
class ExampleLLM(AbstractLLM):
|
||||
def __init__():
|
||||
pass
|
||||
|
||||
def run(self, task: str, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
## Initialize the workflow
|
||||
agent = Agent(
|
||||
llm=ExampleLLM(),
|
||||
max_loops="auto",
|
||||
autosave=True,
|
||||
dashboard=False,
|
||||
streaming_on=True,
|
||||
verbose=True,
|
||||
stopping_token="<DONE>",
|
||||
interactive=True,
|
||||
)
|
||||
|
||||
# Run the workflow on a task
|
||||
agent(
|
||||
"Generate a transcript for a youtube video on what swarms are!"
|
||||
" Output a <DONE> token when done."
|
||||
)
|
@ -0,0 +1,18 @@
|
||||
import os
|
||||
from swarms import OpenAIChat, Agent
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Create a chat instance
|
||||
llm = OpenAIChat(
|
||||
api_key=os.getenv("OPENAI_API_KEY"),
|
||||
)
|
||||
|
||||
# Create an agent
|
||||
agent = Agent(
|
||||
agent_name="GPT-3",
|
||||
llm=llm,
|
||||
)
|
@ -0,0 +1,61 @@
|
||||
import os
|
||||
from swarms import Gemini, Agent
|
||||
from swarms.structs.multi_process_workflow import MultiProcessWorkflow
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load the environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Gemini API key
|
||||
api_key = os.getenv("GEMINI_API_KEY")
|
||||
|
||||
# Initialize LLM
|
||||
llm = Gemini(
|
||||
model_name="gemini-pro",
|
||||
api_key=api_key,
|
||||
)
|
||||
|
||||
# Initialize the agents
|
||||
finance_agent = Agent(
|
||||
agent_name="Finance Agent",
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
system_prompt="Finance",
|
||||
)
|
||||
|
||||
marketing_agent = Agent(
|
||||
agent_name="Marketing Agent",
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
system_prompt="Marketing",
|
||||
)
|
||||
|
||||
product_agent = Agent(
|
||||
agent_name="Product Agent",
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
system_prompt="Product",
|
||||
)
|
||||
|
||||
other_agent = Agent(
|
||||
agent_name="Other Agent",
|
||||
llm=llm,
|
||||
max_loops=1,
|
||||
system_prompt="Other",
|
||||
)
|
||||
|
||||
# Swarm
|
||||
workflow = MultiProcessWorkflow(
|
||||
agents=[
|
||||
finance_agent,
|
||||
marketing_agent,
|
||||
product_agent,
|
||||
other_agent,
|
||||
],
|
||||
max_workers=5,
|
||||
autosave=True,
|
||||
)
|
||||
|
||||
|
||||
# Run the workflow
|
||||
results = workflow.run("What")
|
@ -1,6 +1,4 @@
|
||||
# Import necessary libraries
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
from swarms import ToolAgent
|
||||
|
||||
# Load the pre-trained model and tokenizer
|
@ -0,0 +1,183 @@
|
||||
import inspect
|
||||
import os
|
||||
import threading
|
||||
from typing import Callable, List
|
||||
|
||||
from swarms.prompts.documentation import DOCUMENTATION_WRITER_SOP
|
||||
from swarms import Agent, OpenAIChat
|
||||
from swarms.utils.loguru_logger import logger
|
||||
import concurrent
|
||||
|
||||
#########
|
||||
from swarms.utils.file_processing import (
|
||||
load_json,
|
||||
sanitize_file_path,
|
||||
zip_workspace,
|
||||
create_file_in_folder,
|
||||
zip_folders,
|
||||
)
|
||||
|
||||
|
||||
class PythonDocumentationSwarm:
|
||||
"""
|
||||
A class for automating the documentation process for Python classes.
|
||||
|
||||
Args:
|
||||
agents (List[Agent]): A list of agents used for processing the documentation.
|
||||
max_loops (int, optional): The maximum number of loops to run. Defaults to 4.
|
||||
docs_module_name (str, optional): The name of the module where the documentation will be saved. Defaults to "swarms.structs".
|
||||
docs_directory (str, optional): The directory where the documentation will be saved. Defaults to "docs/swarms/tokenizers".
|
||||
|
||||
Attributes:
|
||||
agents (List[Agent]): A list of agents used for processing the documentation.
|
||||
max_loops (int): The maximum number of loops to run.
|
||||
docs_module_name (str): The name of the module where the documentation will be saved.
|
||||
docs_directory (str): The directory where the documentation will be saved.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
agents: List[Agent],
|
||||
max_loops: int = 4,
|
||||
docs_module_name: str = "swarms.utils",
|
||||
docs_directory: str = "docs/swarms/utils",
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.agents = agents
|
||||
self.max_loops = max_loops
|
||||
self.docs_module_name = docs_module_name
|
||||
self.docs_directory = docs_directory
|
||||
|
||||
# Initialize agent name logging
|
||||
logger.info(
|
||||
"Agents used for documentation:"
|
||||
f" {', '.join([agent.name for agent in agents])}"
|
||||
)
|
||||
|
||||
# Create the directory if it doesn't exist
|
||||
dir_path = self.docs_directory
|
||||
os.makedirs(dir_path, exist_ok=True)
|
||||
logger.info(f"Documentation directory created at {dir_path}.")
|
||||
|
||||
def process_documentation(self, item):
|
||||
"""
|
||||
Process the documentation for a given class using OpenAI model and save it in a Markdown file.
|
||||
|
||||
Args:
|
||||
item: The class or function for which the documentation needs to be processed.
|
||||
"""
|
||||
try:
|
||||
doc = inspect.getdoc(item)
|
||||
source = inspect.getsource(item)
|
||||
is_class = inspect.isclass(item)
|
||||
item_type = "Class Name" if is_class else "Name"
|
||||
input_content = (
|
||||
f"{item_type}:"
|
||||
f" {item.__name__}\n\nDocumentation:\n{doc}\n\nSource"
|
||||
f" Code:\n{source}"
|
||||
)
|
||||
|
||||
# Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content)
|
||||
for agent in self.agents:
|
||||
processed_content = agent(
|
||||
DOCUMENTATION_WRITER_SOP(
|
||||
input_content, self.docs_module_name
|
||||
)
|
||||
)
|
||||
|
||||
doc_content = f"{processed_content}\n"
|
||||
|
||||
# Create the directory if it doesn't exist
|
||||
dir_path = self.docs_directory
|
||||
os.makedirs(dir_path, exist_ok=True)
|
||||
|
||||
# Write the processed documentation to a Markdown file
|
||||
file_path = os.path.join(
|
||||
dir_path, f"{item.__name__.lower()}.md"
|
||||
)
|
||||
with open(file_path, "w") as file:
|
||||
file.write(doc_content)
|
||||
|
||||
logger.info(
|
||||
f"Documentation generated for {item.__name__}."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
f"Error processing documentation for {item.__name__}."
|
||||
)
|
||||
logger.error(e)
|
||||
|
||||
def run(self, python_items: List[Callable]):
|
||||
"""
|
||||
Run the documentation process for a list of Python items.
|
||||
|
||||
Args:
|
||||
python_items (List[Callable]): A list of Python classes or functions for which the documentation needs to be generated.
|
||||
"""
|
||||
try:
|
||||
threads = []
|
||||
for item in python_items:
|
||||
thread = threading.Thread(
|
||||
target=self.process_documentation, args=(item,)
|
||||
)
|
||||
threads.append(thread)
|
||||
thread.start()
|
||||
|
||||
# Wait for all threads to complete
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
logger.info(
|
||||
"Documentation generated in 'swarms.structs'"
|
||||
" directory."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Error running documentation process.")
|
||||
logger.error(e)
|
||||
|
||||
def run_concurrently(self, python_items: List[Callable]):
|
||||
try:
|
||||
with concurrent.futures.ThreadPoolExecutor() as executor:
|
||||
executor.map(self.process_documentation, python_items)
|
||||
|
||||
logger.info(
|
||||
"Documentation generated in 'swarms.structs'"
|
||||
" directory."
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error("Error running documentation process.")
|
||||
logger.error(e)
|
||||
|
||||
|
||||
# Example usage
|
||||
# Initialize the agents
|
||||
agent = Agent(
|
||||
llm=OpenAIChat(max_tokens=3000),
|
||||
agent_name="Documentation Agent",
|
||||
system_prompt=(
|
||||
"You write documentation for Python items functions and"
|
||||
" classes, return in markdown"
|
||||
),
|
||||
max_loops=1,
|
||||
)
|
||||
|
||||
# Initialize the documentation swarm
|
||||
doc_swarm = PythonDocumentationSwarm(
|
||||
agents=[agent],
|
||||
max_loops=1,
|
||||
docs_module_name="swarms.structs",
|
||||
docs_directory="docs/swarms/tokenizers",
|
||||
)
|
||||
|
||||
# Run the documentation process
|
||||
doc_swarm.run(
|
||||
[
|
||||
load_json,
|
||||
sanitize_file_path,
|
||||
zip_workspace,
|
||||
create_file_in_folder,
|
||||
zip_folders,
|
||||
]
|
||||
)
|
@ -0,0 +1,21 @@
|
||||
import os
|
||||
import shutil
|
||||
|
||||
# Create a new directory for the log files if it doesn't exist
|
||||
if not os.path.exists("artifacts"):
|
||||
os.makedirs("artifacts")
|
||||
|
||||
# Walk through the current directory
|
||||
for dirpath, dirnames, filenames in os.walk("."):
|
||||
for filename in filenames:
|
||||
# If the file is a log file
|
||||
if filename.endswith(".log"):
|
||||
# Construct the full file path
|
||||
file_path = os.path.join(dirpath, filename)
|
||||
# Move the log file to the 'artifacts' directory
|
||||
shutil.move(file_path, "artifacts")
|
||||
|
||||
print(
|
||||
"Moved all log files into the 'artifacts' directory and deleted"
|
||||
" their original location."
|
||||
)
|
@ -1,46 +0,0 @@
|
||||
import inspect
|
||||
from typing import Callable
|
||||
|
||||
from termcolor import colored
|
||||
|
||||
|
||||
def scrape_tool_func_docs(fn: Callable) -> str:
|
||||
"""
|
||||
Scrape the docstrings and parameters of a function decorated with `tool` and return a formatted string.
|
||||
|
||||
Args:
|
||||
fn (Callable): The function to scrape.
|
||||
|
||||
Returns:
|
||||
str: A string containing the function's name, documentation string, and a list of its parameters. Each parameter is represented as a line containing the parameter's name, default value, and annotation.
|
||||
"""
|
||||
try:
|
||||
# If the function is a tool, get the original function
|
||||
if hasattr(fn, "func"):
|
||||
fn = fn.func
|
||||
|
||||
signature = inspect.signature(fn)
|
||||
parameters = []
|
||||
for name, param in signature.parameters.items():
|
||||
parameters.append(
|
||||
f"Name: {name}, Type:"
|
||||
f" {param.default if param.default is not param.empty else 'None'},"
|
||||
" Annotation:"
|
||||
f" {param.annotation if param.annotation is not param.empty else 'None'}"
|
||||
)
|
||||
parameters_str = "\n".join(parameters)
|
||||
return (
|
||||
f"Function: {fn.__name__}\nDocstring:"
|
||||
f" {inspect.getdoc(fn)}\nParameters:\n{parameters_str}"
|
||||
)
|
||||
except Exception as error:
|
||||
print(
|
||||
colored(
|
||||
(
|
||||
f"Error scraping tool function docs {error} try"
|
||||
" optimizing your inputs with different"
|
||||
" variables and attempt once more."
|
||||
),
|
||||
"red",
|
||||
)
|
||||
)
|
Loading…
Reference in new issue