cleanup operation

pull/762/head
Kye Gomez 2 months ago
parent de0ed03454
commit 5e8a3d7860

@ -1,23 +0,0 @@
# Use an official CUDA runtime as a parent image
FROM nvidia/cuda:11.4.2-runtime-ubuntu20.04
# Set the working directory in the container to /app
WORKDIR /app
# Copy the current directory contents into the container at /app
COPY . /app
# Install any needed packages specified in requirements.txt
RUN apt-get update && apt-get install -y \
python3-pip \
&& rm -rf /var/lib/apt/lists/*
RUN pip3 install --no-cache-dir -r requirements.txt
# Make port 80 available to the world outside this container
EXPOSE 80
# Define environment variable
# ENV NAME World
# Run app.py when the container launches
CMD ["python3", "example.py"]

@ -1,82 +0,0 @@
###### VERISON2
import inspect
import os
import threading
from dotenv import load_dotenv
from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP
from swarm_models import OpenAIChat
from swarms.structs.majority_voting import MajorityVoting
from swarms.structs.stackoverflow_swarm import StackOverflowSwarm
from swarms.structs.task_queue_base import TaskQueueBase
##########
####################
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(
openai_api_key=api_key,
max_tokens=4000,
)
def process_documentation(cls):
"""
Process the documentation for a given class using OpenAI model and save it in a Markdown file.
"""
doc = inspect.getdoc(cls)
source = inspect.getsource(cls)
input_content = (
"Class Name:"
f" {cls.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)
# Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content)
processed_content = model(
DOCUMENTATION_WRITER_SOP(input_content, "swarms.structs")
)
# doc_content = f"# {cls.__name__}\n\n{processed_content}\n"
doc_content = f"{processed_content}\n"
# Create the directory if it doesn't exist
dir_path = "docs/swarms/tokenizers"
os.makedirs(dir_path, exist_ok=True)
# Write the processed documentation to a Markdown file
file_path = os.path.join(dir_path, f"{cls.__name__.lower()}.md")
with open(file_path, "w") as file:
file.write(doc_content)
print(f"Documentation generated for {cls.__name__}.")
def main():
classes = [
MajorityVoting,
StackOverflowSwarm,
TaskQueueBase,
]
threads = []
for cls in classes:
thread = threading.Thread(
target=process_documentation, args=(cls,)
)
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
print("Documentation generated in 'swarms.structs' directory.")
if __name__ == "__main__":
main()

@ -1,77 +0,0 @@
import inspect
import os
import sys
import threading
from dotenv import load_dotenv
from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP
from swarm_models import OpenAIChat
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(
model_name="gpt-4",
openai_api_key=api_key,
max_tokens=4000,
)
def process_documentation(item):
"""
Process the documentation for a given function using OpenAI model and save it in a Markdown file.
"""
doc = inspect.getdoc(item)
source = inspect.getsource(item)
input_content = (
f"Name: {item.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)
print(input_content)
# Process with OpenAI model
processed_content = model(
DOCUMENTATION_WRITER_SOP(input_content, "swarms.utils")
)
doc_content = f"# {item.__name__}\n\n{processed_content}\n"
# Create the directory if it doesn't exist
dir_path = "docs/swarms/utils"
os.makedirs(dir_path, exist_ok=True)
# Write the processed documentation to a Markdown file
file_path = os.path.join(dir_path, f"{item.__name__.lower()}.md")
with open(file_path, "w") as file:
file.write(doc_content)
def main():
# Gathering all functions from the swarms.utils module
functions = [
obj
for name, obj in inspect.getmembers(
sys.modules["swarms.utils"]
)
if inspect.isfunction(obj)
]
threads = []
for func in functions:
thread = threading.Thread(
target=process_documentation, args=(func,)
)
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
print("Documentation generated in 'docs/swarms/utils' directory.")
if __name__ == "__main__":
main()

@ -1,84 +0,0 @@
import inspect
import os
import threading
from dotenv import load_dotenv
from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP
from swarm_models import OpenAIChat
###########
###############
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(
model_name="gpt-4-1106-preview",
openai_api_key=api_key,
max_tokens=4000,
)
def process_documentation(
item,
module: str = "swarms.structs",
docs_folder_path: str = "docs/swarms/structs",
):
"""
Process the documentation for a given class or function using OpenAI model and save it in a Python file.
"""
doc = inspect.getdoc(item)
source = inspect.getsource(item)
is_class = inspect.isclass(item)
item_type = "Class Name" if is_class else "Name"
input_content = (
f"{item_type}:"
f" {item.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)
# Process with OpenAI model
processed_content = model(
DOCUMENTATION_WRITER_SOP(input_content, module)
)
doc_content = f"# {item.__name__}\n\n{processed_content}\n"
# Create the directory if it doesn't exist
dir_path = docs_folder_path
os.makedirs(dir_path, exist_ok=True)
# Write the processed documentation to a Python file
file_path = os.path.join(dir_path, f"{item.__name__.lower()}.md")
with open(file_path, "w") as file:
file.write(doc_content)
print(
f"Processed documentation for {item.__name__}. at {file_path}"
)
def main(module: str = "docs/swarms/structs"):
items = []
threads = []
for item in items:
thread = threading.Thread(
target=process_documentation, args=(item,)
)
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
print(f"Documentation generated in {module} directory.")
if __name__ == "__main__":
main()

@ -1,103 +0,0 @@
import inspect
import os
import re
import threading
from dotenv import load_dotenv
from swarms_memory import DictInternalMemory, DictSharedMemory
from scripts.auto_tests_docs.docs import TEST_WRITER_SOP_PROMPT
from swarm_models import OpenAIChat
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(
openai_api_key=api_key,
max_tokens=4000,
)
# agent = Agent(
# llm=model,
# agent_name="Unit Testing Agent",
# agent_description=(
# "This agent is responsible for generating unit tests for"
# " the swarms package."
# ),
# autosave=True,
# system_prompt=None,
# max_loops=1,
# )
def extract_code_from_markdown(markdown_content: str):
"""
Extracts code blocks from a Markdown string and returns them as a single string.
Args:
- markdown_content (str): The Markdown content as a string.
Returns:
- str: A single string containing all the code blocks separated by newlines.
"""
# Regular expression for fenced code blocks
pattern = r"```(?:\w+\n)?(.*?)```"
matches = re.findall(pattern, markdown_content, re.DOTALL)
# Concatenate all code blocks separated by newlines
return "\n".join(code.strip() for code in matches)
def create_test(cls):
"""
Process the documentation for a given class using OpenAI model and save it in a Python file.
"""
doc = inspect.getdoc(cls)
source = inspect.getsource(cls)
input_content = (
"Class Name:"
f" {cls.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)
# Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content)
processed_content = model(
TEST_WRITER_SOP_PROMPT(
input_content, "swarms", "swarms.memory"
)
)
processed_content = extract_code_from_markdown(processed_content)
doc_content = f"# {cls.__name__}\n\n{processed_content}\n"
# Create the directory if it doesn't exist
dir_path = "tests/memory"
os.makedirs(dir_path, exist_ok=True)
# Write the processed documentation to a Python file
file_path = os.path.join(dir_path, f"{cls.__name__.lower()}.py")
with open(file_path, "w") as file:
file.write(doc_content)
def main():
classes = [
DictInternalMemory,
DictSharedMemory,
]
threads = []
for cls in classes:
thread = threading.Thread(target=create_test, args=(cls,))
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
print("Tests generated in 'tests/memory' directory.")
if __name__ == "__main__":
main()

@ -1,82 +0,0 @@
import inspect
import os
import sys
import threading
from dotenv import load_dotenv
from scripts.auto_tests_docs.docs import TEST_WRITER_SOP_PROMPT
from swarm_models import OpenAIChat
from swarms.utils.parse_code import extract_code_from_markdown
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
model = OpenAIChat(
model_name="gpt-4",
openai_api_key=api_key,
max_tokens=4000,
)
def process_documentation(item):
"""
Process the documentation for a given function using OpenAI model and save it in a Markdown file.
"""
doc = inspect.getdoc(item)
source = inspect.getsource(item)
input_content = (
f"Name: {item.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)
# print(input_content)
# Process with OpenAI model
processed_content = model(
TEST_WRITER_SOP_PROMPT(
input_content, "swarms.utils", "swarms.utils"
)
)
processed_content = extract_code_from_markdown(processed_content)
print(processed_content)
doc_content = f"{processed_content}"
# Create the directory if it doesn't exist
dir_path = "tests/utils"
os.makedirs(dir_path, exist_ok=True)
# Write the processed documentation to a Markdown file
file_path = os.path.join(dir_path, f"{item.__name__.lower()}.py")
with open(file_path, "w") as file:
file.write(doc_content)
def main():
# Gathering all functions from the swarms.utils module
functions = [
obj
for name, obj in inspect.getmembers(
sys.modules["swarms.utils"]
)
if inspect.isfunction(obj)
]
threads = []
for func in functions:
thread = threading.Thread(
target=process_documentation, args=(func,)
)
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
print("Tests generated in 'tests/utils' directory.")
if __name__ == "__main__":
main()

@ -1,202 +0,0 @@
def DOCUMENTATION_WRITER_SOP(
task: str,
module: str,
):
documentation = f"""Create multi-page long and explicit professional pytorch-like documentation for the {module} code below follow the outline for the {module} library,
provide many examples and teach the user about the code, provide examples for every function, make the documentation 10,000 words,
provide many usage examples and note this is markdown docs, create the documentation for the code to document,
put the arguments and methods in a table in markdown to make it visually seamless
Now make the professional documentation for this code, provide the architecture and how the class works and why it works that way,
it's purpose, provide args, their types, 3 ways of usage examples, in examples show all the code like imports main example etc
BE VERY EXPLICIT AND THOROUGH, MAKE IT DEEP AND USEFUL
########
Step 1: Understand the purpose and functionality of the module or framework
Read and analyze the description provided in the documentation to understand the purpose and functionality of the module or framework.
Identify the key features, parameters, and operations performed by the module or framework.
Step 2: Provide an overview and introduction
Start the documentation by providing a brief overview and introduction to the module or framework.
Explain the importance and relevance of the module or framework in the context of the problem it solves.
Highlight any key concepts or terminology that will be used throughout the documentation.
Step 3: Provide a class or function definition
Provide the class or function definition for the module or framework.
Include the parameters that need to be passed to the class or function and provide a brief description of each parameter.
Specify the data types and default values for each parameter.
Step 4: Explain the functionality and usage
Provide a detailed explanation of how the module or framework works and what it does.
Describe the steps involved in using the module or framework, including any specific requirements or considerations.
Provide code examples to demonstrate the usage of the module or framework.
Explain the expected inputs and outputs for each operation or function.
Step 5: Provide additional information and tips
Provide any additional information or tips that may be useful for using the module or framework effectively.
Address any common issues or challenges that developers may encounter and provide recommendations or workarounds.
Step 6: Include references and resources
Include references to any external resources or research papers that provide further information or background on the module or framework.
Provide links to relevant documentation or websites for further exploration.
Example Template for the given documentation:
# Module/Function Name: MultiheadAttention
class torch.nn.MultiheadAttention(embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None, batch_first=False, device=None, dtype=None):
```
Creates a multi-head attention module for joint information representation from the different subspaces.
Parameters:
- embed_dim (int): Total dimension of the model.
- num_heads (int): Number of parallel attention heads. The embed_dim will be split across num_heads.
- dropout (float): Dropout probability on attn_output_weights. Default: 0.0 (no dropout).
- bias (bool): If specified, adds bias to input/output projection layers. Default: True.
- add_bias_kv (bool): If specified, adds bias to the key and value sequences at dim=0. Default: False.
- add_zero_attn (bool): If specified, adds a new batch of zeros to the key and value sequences at dim=1. Default: False.
- kdim (int): Total number of features for keys. Default: None (uses kdim=embed_dim).
- vdim (int): Total number of features for values. Default: None (uses vdim=embed_dim).
- batch_first (bool): If True, the input and output tensors are provided as (batch, seq, feature). Default: False.
- device (torch.device): If specified, the tensors will be moved to the specified device.
- dtype (torch.dtype): If specified, the tensors will have the specified dtype.
```
def forward(query, key, value, key_padding_mask=None, need_weights=True, attn_mask=None, average_attn_weights=True, is_causal=False):
```
Forward pass of the multi-head attention module.
Parameters:
- query (Tensor): Query embeddings of shape (L, E_q) for unbatched input, (L, N, E_q) when batch_first=False, or (N, L, E_q) when batch_first=True.
- key (Tensor): Key embeddings of shape (S, E_k) for unbatched input, (S, N, E_k) when batch_first=False, or (N, S, E_k) when batch_first=True.
- value (Tensor): Value embeddings of shape (S, E_v) for unbatched input, (S, N, E_v) when batch_first=False, or (N, S, E_v) when batch_first=True.
- key_padding_mask (Optional[Tensor]): If specified, a mask indicating elements to be ignored in key for attention computation.
- need_weights (bool): If specified, returns attention weights in addition to attention outputs. Default: True.
- attn_mask (Optional[Tensor]): If specified, a mask preventing attention to certain positions.
- average_attn_weights (bool): If true, returns averaged attention weights per head. Otherwise, returns attention weights separately per head. Note that this flag only has an effect when need_weights=True. Default: True.
- is_causal (bool): If specified, applies a causal mask as the attention mask. Default: False.
Returns:
Tuple[Tensor, Optional[Tensor]]:
- attn_output (Tensor): Attention outputs of shape (L, E) for unbatched input, (L, N, E) when batch_first=False, or (N, L, E) when batch_first=True.
- attn_output_weights (Optional[Tensor]): Attention weights of shape (L, S) when unbatched or (N, L, S) when batched. Optional, only returned when need_weights=True.
```
# Implementation of the forward pass of the attention module goes here
return attn_output, attn_output_weights
```
# Usage example:
multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
attn_output, attn_output_weights = multihead_attn(query, key, value)
Note:
The above template includes the class or function definition, parameters, description, and usage example.
To replicate the documentation for any other module or framework, follow the same structure and provide the specific details for that module or framework.
############# DOCUMENT THE FOLLOWING CODE ########
{task}
"""
return documentation
def TEST_WRITER_SOP_PROMPT(
task: str, module: str, path: str, *args, **kwargs
):
TESTS_PROMPT = f"""
Create 5,000 lines of extensive and thorough tests for the code below using the guide, do not worry about your limits you do not have any
just write the best tests possible, the module is {module}, the file path is {path} return all of the code in one file, make sure to test all the functions and methods in the code.
######### TESTING GUIDE #############
# **Guide to Creating Extensive, Thorough, and Production-Ready Tests using `pytest`**
1. **Preparation**:
- Install pytest: `pip install pytest`.
- Structure your project so that tests are in a separate `tests/` directory.
- Name your test files with the prefix `test_` for pytest to recognize them.
2. **Writing Basic Tests**:
- Use clear function names prefixed with `test_` (e.g., `test_check_value()`).
- Use assert statements to validate results.
3. **Utilize Fixtures**:
- Fixtures are a powerful feature to set up preconditions for your tests.
- Use `@pytest.fixture` decorator to define a fixture.
- Pass fixture name as an argument to your test to use it.
4. **Parameterized Testing**:
- Use `@pytest.mark.parametrize` to run a test multiple times with different inputs.
- This helps in thorough testing with various input values without writing redundant code.
5. **Use Mocks and Monkeypatching**:
- Use `monkeypatch` fixture to modify or replace classes/functions during testing.
- Use `unittest.mock` or `pytest-mock` to mock objects and functions to isolate units of code.
6. **Exception Testing**:
- Test for expected exceptions using `pytest.raises(ExceptionType)`.
7. **Test Coverage**:
- Install pytest-cov: `pip install pytest-cov`.
- Run tests with `pytest --cov=my_module` to get a coverage report.
8. **Environment Variables and Secret Handling**:
- Store secrets and configurations in environment variables.
- Use libraries like `python-decouple` or `python-dotenv` to load environment variables.
- For tests, mock or set environment variables temporarily within the test environment.
9. **Grouping and Marking Tests**:
- Use `@pytest.mark` decorator to mark tests (e.g., `@pytest.mark.slow`).
- This allows for selectively running certain groups of tests.
10. **Use Plugins**:
- Utilize the rich ecosystem of pytest plugins (e.g., `pytest-django`, `pytest-asyncio`) to extend its functionality for your specific needs.
11. **Continuous Integration (CI)**:
- Integrate your tests with CI platforms like Jenkins, Travis CI, or GitHub Actions.
- Ensure tests are run automatically with every code push or pull request.
12. **Logging and Reporting**:
- Use `pytest`'s inbuilt logging.
- Integrate with tools like `Allure` for more comprehensive reporting.
13. **Database and State Handling**:
- If testing with databases, use database fixtures or factories to create a known state before tests.
- Clean up and reset state post-tests to maintain consistency.
14. **Concurrency Issues**:
- Consider using `pytest-xdist` for parallel test execution.
- Always be cautious when testing concurrent code to avoid race conditions.
15. **Clean Code Practices**:
- Ensure tests are readable and maintainable.
- Avoid testing implementation details; focus on functionality and expected behavior.
16. **Regular Maintenance**:
- Periodically review and update tests.
- Ensure that tests stay relevant as your codebase grows and changes.
17. **Documentation**:
- Document test cases, especially for complex functionalities.
- Ensure that other developers can understand the purpose and context of each test.
18. **Feedback Loop**:
- Use test failures as feedback for development.
- Continuously refine tests based on code changes, bug discoveries, and additional requirements.
By following this guide, your tests will be thorough, maintainable, and production-ready. Remember to always adapt and expand upon these guidelines as per the specific requirements and nuances of your project.
######### CREATE TESTS FOR THIS CODE: #######
{task}
"""
return TESTS_PROMPT

@ -1,31 +0,0 @@
import os
def generate_file_list(directory, output_file):
"""
Generate a list of files in a directory in the specified format and write it to a file.
Args:
directory (str): The directory to list the files from.
output_file (str): The file to write the output to.
"""
with open(output_file, "w") as f:
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(".md"):
# Remove the directory from the file path and replace slashes with dots
file_path = (
os.path.join(root, file)
.replace(directory + "/", "")
.replace("/", ".")
)
# Remove the file extension
file_name, _ = os.path.splitext(file)
# Write the file name and path to the output file
f.write(
f'- {file_name}: "swarms/utils/{file_path}"\n'
)
# Use the function to generate the file list
generate_file_list("docs/swarms/structs", "file_list.txt")

@ -1,64 +0,0 @@
import yaml
def update_mkdocs(
class_names,
base_path="docs/zeta/nn/modules",
mkdocs_file="mkdocs.yml",
):
"""
Update the mkdocs.yml file with new documentation links.
Args:
- class_names: A list of class names for which documentation is generated.
- base_path: The base path where documentation Markdown files are stored.
- mkdocs_file: The path to the mkdocs.yml file.
"""
with open(mkdocs_file) as file:
mkdocs_config = yaml.safe_load(file)
# Find or create the 'zeta.nn.modules' section in 'nav'
zeta_modules_section = None
for section in mkdocs_config.get("nav", []):
if "zeta.nn.modules" in section:
zeta_modules_section = section["zeta.nn.modules"]
break
if zeta_modules_section is None:
zeta_modules_section = {}
mkdocs_config["nav"].append(
{"zeta.nn.modules": zeta_modules_section}
)
# Add the documentation paths to the 'zeta.nn.modules' section
for class_name in class_names:
doc_path = f"{base_path}/{class_name.lower()}.md"
zeta_modules_section[class_name] = doc_path
# Write the updated content back to mkdocs.yml
with open(mkdocs_file, "w") as file:
yaml.safe_dump(mkdocs_config, file, sort_keys=False)
# Example usage
classes = [
"DenseBlock",
"HighwayLayer",
"MultiScaleBlock",
"FeedbackBlock",
"DualPathBlock",
"RecursiveBlock",
"PytorchGELUTanh",
"NewGELUActivation",
"GELUActivation",
"FastGELUActivation",
"QuickGELUActivation",
"ClippedGELUActivation",
"AccurateGELUActivation",
"MishActivation",
"LinearActivation",
"LaplaceActivation",
"ReLUSquaredActivation",
]
update_mkdocs(classes)

@ -1,20 +0,0 @@
#!/bin/bash
# Navigate to the directory containing the 'tests' folder
# cd /path/to/your/code/directory
# Run autopep8 with max aggressiveness (-aaa) and in-place modification (-i)
# on all Python files (*.py) under the 'tests' directory.
autopep8 --in-place --aggressive --aggressive --recursive --experimental --list-fixes swarms/
# Run black with default settings, since black does not have an aggressiveness level.
# Black will format all Python files it finds in the 'tests' directory.
black .
# Run ruff on the 'tests' directory.
# Add any additional flags if needed according to your version of ruff.
ruff . --fix
ruff clean
# YAPF
yapf --recursive --in-place --verbose --style=google --parallel tests

@ -1,25 +0,0 @@
#!/bin/bash
# Find and delete all __pycache__ directories
find . -type d -name "__pycache__" -exec rm -r {} +
# Find and delete all .pyc files
find . -type f -name "*.pyc" -delete
# Find and delete all dist directories
find . -type d -name "dist" -exec rm -r {} +
# Find and delete all .ruff directories
find . -type d -name ".ruff" -exec rm -r {} +
# Find and delete all .egg-info directories
find . -type d -name "*.egg-info" -exec rm -r {} +
# Find and delete all .pyo files
find . -type f -name "*.pyo" -delete
# Find and delete all .pyd files
find . -type f -name "*.pyd" -delete
# Find and delete all .so files
find . -type f -name "*.so" -delete

@ -1,21 +0,0 @@
import os
import shutil
# Create a new directory for the log files if it doesn't exist
if not os.path.exists("artifacts_five"):
os.makedirs("artifacts_five")
# Walk through the current directory
for dirpath, dirnames, filenames in os.walk("."):
for filename in filenames:
# If the file is a log file
if filename.endswith(".log"):
# Construct the full file path
file_path = os.path.join(dirpath, filename)
# Move the log file to the 'artifacts_five' directory
shutil.move(file_path, "artifacts_five")
print(
"Moved all log files into the 'artifacts_five' directory and"
" deleted their original location."
)

@ -1,10 +0,0 @@
#!/bin/bash
# Create the new directory if it doesn't exist
sudo mkdir -p /artifacts_logs
# Find all .log files in the root directory and its subdirectories
find / -name "*.log" -print0 | while IFS= read -r -d '' file; do
# Use sudo to move the file to the new directory
sudo mv "$file" /artifacts_logs/
done

@ -1,33 +0,0 @@
# ==================================
# Use an official Python runtime as a parent image
FROM python:3.11-slim
# Set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
# Set the working directory in the container
WORKDIR /usr/src/swarms
# Install Python dependencies
# COPY requirements.txt and pyproject.toml if you're using poetry for dependency management
COPY requirements.txt .
RUN pip install --upgrade pip
RUN pip install --no-cache-dir -r requirements.txt
# Install the 'swarms' package, assuming it's available on PyPI
RUN pip install -U swarms
# Copy the rest of the application
COPY . .
# Expose port if your application has a web interface
# EXPOSE 5000
# # Define environment variable for the swarm to work
# ENV OPENAI_API_KEY=your_swarm_api_key_here
# If you're using `CMD` to execute a Python script, make sure it's executable
# RUN chmod +x example.py

@ -1,52 +0,0 @@
#!/bin/bash
# Set up logging
LOG_FILE="docs_compilation.log"
OUTPUT_FILE="combined_docs.txt"
# Initialize log file
echo "$(date): Starting documentation compilation" > "$LOG_FILE"
# Create/clear output file
> "$OUTPUT_FILE"
# Function to determine file type and handle accordingly
process_file() {
local file="$1"
# Get file extension
extension="${file##*.}"
echo "$(date): Processing $file" >> "$LOG_FILE"
case "$extension" in
md|markdown)
echo "# $(basename "$file")" >> "$OUTPUT_FILE"
cat "$file" >> "$OUTPUT_FILE"
echo -e "\n\n" >> "$OUTPUT_FILE"
;;
txt)
echo "# $(basename "$file")" >> "$OUTPUT_FILE"
cat "$file" >> "$OUTPUT_FILE"
echo -e "\n\n" >> "$OUTPUT_FILE"
;;
*)
echo "$(date): Skipping $file - unsupported format" >> "$LOG_FILE"
return
;;
esac
echo "$(date): Successfully processed $file" >> "$LOG_FILE"
}
# Find and process all documentation files
find ../docs -type f \( -name "*.md" -o -name "*.txt" -o -name "*.markdown" \) | while read -r file; do
process_file "$file"
done
# Log completion
echo "$(date): Documentation compilation complete" >> "$LOG_FILE"
echo "$(date): Output saved to $OUTPUT_FILE" >> "$LOG_FILE"
# Print summary
echo "Documentation compilation complete. Check $LOG_FILE for details."

@ -1,39 +0,0 @@
import pkg_resources
def get_package_versions(requirements_path, output_path):
try:
with open(requirements_path) as file:
requirements = file.readlines()
except FileNotFoundError:
print(f"Error: The file '{requirements_path}' was not found.")
return
package_versions = []
for requirement in requirements:
# Skip empty lines and comments
if (
requirement.strip() == ""
or requirement.strip().startswith("#")
):
continue
# Extract package name
package_name = requirement.split("==")[0].strip()
try:
version = pkg_resources.get_distribution(
package_name
).version
package_versions.append(f"{package_name}=={version}")
except pkg_resources.DistributionNotFound:
package_versions.append(f"{package_name}: not installed")
with open(output_path, "w") as file:
for package_version in package_versions:
file.write(package_version + "\n")
print(f"Versions written to {output_path}")
# Usage
get_package_versions("requirements.txt", "installed_versions.txt")

@ -1,33 +0,0 @@
#!/bin/bash
# Define the directory to search
dir="examples"
# Check if the directory exists
if [ -d "$dir" ]
then
# Use find to locate all .py files in the directory and its subdirectories
for file in $(find $dir -name "*.py")
do
# Extract the file name and directory
base=$(basename $file .py)
dir=$(dirname $file)
# Check if the file name already contains _example
if [[ $base == *_example ]]
then
echo "Skipping $file as it already contains _example"
continue
fi
# Append _example to the file name
newname="${base}_example.py"
# Rename the file
mv $file $dir/$newname
echo "Renamed $file to $dir/$newname"
done
else
echo "Directory $dir does not exist."
fi

@ -1,40 +0,0 @@
import pkg_resources
import toml
def update_pyproject_versions(pyproject_path):
try:
with open(pyproject_path) as file:
data = toml.load(file)
except FileNotFoundError:
print(f"Error: The file '{pyproject_path}' was not found.")
return
except toml.TomlDecodeError:
print(
f"Error: The file '{pyproject_path}' is not a valid TOML"
" file."
)
return
dependencies = (
data.get("tool", {}).get("poetry", {}).get("dependencies", {})
)
for package in dependencies:
if package.lower() == "python":
continue # Skip the Python version dependency
try:
version = pkg_resources.get_distribution(package).version
dependencies[package] = version
except pkg_resources.DistributionNotFound:
print(f"Warning: Package '{package}' not installed.")
with open(pyproject_path, "w") as file:
toml.dump(data, file)
print(f"Updated versions written to {pyproject_path}")
# Usage
update_pyproject_versions("pyproject.toml")

@ -1,22 +0,0 @@
#!/bin/bash
# Define a file to keep track of successfully executed scripts
SUCCESS_LOG="successful_runs.log"
for f in /swarms/examples/examples/example_*.py; do
# Check if the script has been logged as successful
if grep -Fxq "$f" "$SUCCESS_LOG"; then
echo "Skipping ${f} as it ran successfully in a previous run."
else
# Run the script if not previously successful
if /home/kye/miniconda3/envs/swarms/bin/python "$f" 2>>errors.txt; then
echo "(${f}) ran successfully without errors."
# Log the successful script execution
echo "$f" >> "$SUCCESS_LOG"
else
echo "Error encountered in ${f}. Check errors.txt for details."
break
fi
fi
echo "##############################################################################"
done

@ -1,9 +0,0 @@
find ./tests -name "*.py" -type f | while read file
do
filename=$(basename "$file")
dir=$(dirname "$file")
if [[ $filename != test_* ]]; then
mv "$file" "$dir/test_$filename"
printf "\e[1;34mRenamed: \e[0m$file \e[1;32mto\e[0m $dir/test_$filename\n"
fi
done

@ -1 +0,0 @@
find ./tests -name '*.py' -exec pytest {} \;
Loading…
Cancel
Save