parent
63236dbee3
commit
c5ba940e47
@ -0,0 +1,101 @@
|
|||||||
|
###### VERISON2
|
||||||
|
import inspect
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
from zeta import OpenAIChat
|
||||||
|
from scripts.auto_tests_docs.docs import DOCUMENTATION_WRITER_SOP
|
||||||
|
from zeta.nn.modules._activations import (
|
||||||
|
AccurateGELUActivation,
|
||||||
|
ClippedGELUActivation,
|
||||||
|
FastGELUActivation,
|
||||||
|
GELUActivation,
|
||||||
|
LaplaceActivation,
|
||||||
|
LinearActivation,
|
||||||
|
MishActivation,
|
||||||
|
NewGELUActivation,
|
||||||
|
PytorchGELUTanh,
|
||||||
|
QuickGELUActivation,
|
||||||
|
ReLUSquaredActivation,
|
||||||
|
)
|
||||||
|
from zeta.nn.modules.dense_connect import DenseBlock
|
||||||
|
from zeta.nn.modules.dual_path_block import DualPathBlock
|
||||||
|
from zeta.nn.modules.feedback_block import FeedbackBlock
|
||||||
|
from zeta.nn.modules.highway_layer import HighwayLayer
|
||||||
|
from zeta.nn.modules.multi_scale_block import MultiScaleBlock
|
||||||
|
from zeta.nn.modules.recursive_block import RecursiveBlock
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
model = OpenAIChat(
|
||||||
|
model_name="gpt-4",
|
||||||
|
openai_api_key=api_key,
|
||||||
|
max_tokens=4000,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def process_documentation(cls):
|
||||||
|
"""
|
||||||
|
Process the documentation for a given class using OpenAI model and save it in a Markdown file.
|
||||||
|
"""
|
||||||
|
doc = inspect.getdoc(cls)
|
||||||
|
source = inspect.getsource(cls)
|
||||||
|
input_content = (
|
||||||
|
f"Class Name: {cls.__name__}\n\nDocumentation:\n{doc}\n\nSource"
|
||||||
|
f" Code:\n{source}"
|
||||||
|
)
|
||||||
|
print(input_content)
|
||||||
|
|
||||||
|
# Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content)
|
||||||
|
processed_content = model(DOCUMENTATION_WRITER_SOP(input_content, "zeta"))
|
||||||
|
|
||||||
|
doc_content = f"# {cls.__name__}\n\n{processed_content}\n"
|
||||||
|
|
||||||
|
# Create the directory if it doesn't exist
|
||||||
|
dir_path = "docs/zeta/nn/modules"
|
||||||
|
os.makedirs(dir_path, exist_ok=True)
|
||||||
|
|
||||||
|
# Write the processed documentation to a Markdown file
|
||||||
|
file_path = os.path.join(dir_path, f"{cls.__name__.lower()}.md")
|
||||||
|
with open(file_path, "w") as file:
|
||||||
|
file.write(doc_content)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
classes = [
|
||||||
|
DenseBlock,
|
||||||
|
HighwayLayer,
|
||||||
|
MultiScaleBlock,
|
||||||
|
FeedbackBlock,
|
||||||
|
DualPathBlock,
|
||||||
|
RecursiveBlock,
|
||||||
|
PytorchGELUTanh,
|
||||||
|
NewGELUActivation,
|
||||||
|
GELUActivation,
|
||||||
|
FastGELUActivation,
|
||||||
|
QuickGELUActivation,
|
||||||
|
ClippedGELUActivation,
|
||||||
|
AccurateGELUActivation,
|
||||||
|
MishActivation,
|
||||||
|
LinearActivation,
|
||||||
|
LaplaceActivation,
|
||||||
|
ReLUSquaredActivation,
|
||||||
|
]
|
||||||
|
|
||||||
|
threads = []
|
||||||
|
for cls in classes:
|
||||||
|
thread = threading.Thread(target=process_documentation, args=(cls,))
|
||||||
|
threads.append(thread)
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
# Wait for all threads to complete
|
||||||
|
for thread in threads:
|
||||||
|
thread.join()
|
||||||
|
|
||||||
|
print("Documentation generated in 'docs/zeta/nn/modules' directory.")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@ -0,0 +1,122 @@
|
|||||||
|
import inspect
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import threading
|
||||||
|
from swarms import OpenAIChat
|
||||||
|
from scripts.auto_tests_docs.docs import TEST_WRITER_SOP_PROMPT
|
||||||
|
from zeta.nn.modules._activations import (
|
||||||
|
AccurateGELUActivation,
|
||||||
|
ClippedGELUActivation,
|
||||||
|
FastGELUActivation,
|
||||||
|
GELUActivation,
|
||||||
|
LaplaceActivation,
|
||||||
|
LinearActivation,
|
||||||
|
MishActivation,
|
||||||
|
NewGELUActivation,
|
||||||
|
PytorchGELUTanh,
|
||||||
|
QuickGELUActivation,
|
||||||
|
ReLUSquaredActivation,
|
||||||
|
)
|
||||||
|
from zeta.nn.modules.dense_connect import DenseBlock
|
||||||
|
from zeta.nn.modules.dual_path_block import DualPathBlock
|
||||||
|
from zeta.nn.modules.feedback_block import FeedbackBlock
|
||||||
|
from zeta.nn.modules.highway_layer import HighwayLayer
|
||||||
|
from zeta.nn.modules.multi_scale_block import MultiScaleBlock
|
||||||
|
from zeta.nn.modules.recursive_block import RecursiveBlock
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
model = OpenAIChat(
|
||||||
|
model_name="gpt-4",
|
||||||
|
openai_api_key=api_key,
|
||||||
|
max_tokens=4000,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def extract_code_from_markdown(markdown_content: str):
|
||||||
|
"""
|
||||||
|
Extracts code blocks from a Markdown string and returns them as a single string.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- markdown_content (str): The Markdown content as a string.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- str: A single string containing all the code blocks separated by newlines.
|
||||||
|
"""
|
||||||
|
# Regular expression for fenced code blocks
|
||||||
|
pattern = r"```(?:\w+\n)?(.*?)```"
|
||||||
|
matches = re.findall(pattern, markdown_content, re.DOTALL)
|
||||||
|
|
||||||
|
# Concatenate all code blocks separated by newlines
|
||||||
|
return "\n".join(code.strip() for code in matches)
|
||||||
|
|
||||||
|
|
||||||
|
def create_test(cls):
|
||||||
|
"""
|
||||||
|
Process the documentation for a given class using OpenAI model and save it in a Python file.
|
||||||
|
"""
|
||||||
|
doc = inspect.getdoc(cls)
|
||||||
|
source = inspect.getsource(cls)
|
||||||
|
input_content = (
|
||||||
|
f"Class Name: {cls.__name__}\n\nDocumentation:\n{doc}\n\nSource"
|
||||||
|
f" Code:\n{source}"
|
||||||
|
)
|
||||||
|
print(input_content)
|
||||||
|
|
||||||
|
# Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content)
|
||||||
|
processed_content = model(
|
||||||
|
TEST_WRITER_SOP_PROMPT(input_content, "zeta", "zeta.nn")
|
||||||
|
)
|
||||||
|
processed_content = extract_code_from_markdown(processed_content)
|
||||||
|
|
||||||
|
doc_content = f"# {cls.__name__}\n\n{processed_content}\n"
|
||||||
|
|
||||||
|
# Create the directory if it doesn't exist
|
||||||
|
dir_path = "tests/nn/modules"
|
||||||
|
os.makedirs(dir_path, exist_ok=True)
|
||||||
|
|
||||||
|
# Write the processed documentation to a Python file
|
||||||
|
file_path = os.path.join(dir_path, f"{cls.__name__.lower()}.py")
|
||||||
|
with open(file_path, "w") as file:
|
||||||
|
file.write(doc_content)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
classes = [
|
||||||
|
DenseBlock,
|
||||||
|
HighwayLayer,
|
||||||
|
MultiScaleBlock,
|
||||||
|
FeedbackBlock,
|
||||||
|
DualPathBlock,
|
||||||
|
RecursiveBlock,
|
||||||
|
PytorchGELUTanh,
|
||||||
|
NewGELUActivation,
|
||||||
|
GELUActivation,
|
||||||
|
FastGELUActivation,
|
||||||
|
QuickGELUActivation,
|
||||||
|
ClippedGELUActivation,
|
||||||
|
AccurateGELUActivation,
|
||||||
|
MishActivation,
|
||||||
|
LinearActivation,
|
||||||
|
LaplaceActivation,
|
||||||
|
ReLUSquaredActivation,
|
||||||
|
]
|
||||||
|
|
||||||
|
threads = []
|
||||||
|
for cls in classes:
|
||||||
|
thread = threading.Thread(target=create_test, args=(cls,))
|
||||||
|
threads.append(thread)
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
# Wait for all threads to complete
|
||||||
|
for thread in threads:
|
||||||
|
thread.join()
|
||||||
|
|
||||||
|
print("Tests generated in 'docs/zeta/nn/modules' directory.")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
@ -0,0 +1,199 @@
|
|||||||
|
def DOCUMENTATION_WRITER_SOP(
|
||||||
|
task: str,
|
||||||
|
module: str,
|
||||||
|
):
|
||||||
|
documentation = f"""Create multi-page long and explicit professional pytorch-like documentation for the {module} code below follow the outline for the {module} library,
|
||||||
|
provide many examples and teach the user about the code, provide examples for every function, make the documentation 10,000 words,
|
||||||
|
provide many usage examples and note this is markdown docs, create the documentation for the code to document,
|
||||||
|
put the arguments and methods in a table in markdown to make it visually seamless
|
||||||
|
|
||||||
|
Now make the professional documentation for this code, provide the architecture and how the class works and why it works that way,
|
||||||
|
it's purpose, provide args, their types, 3 ways of usage examples, in examples show all the code like imports main example etc
|
||||||
|
|
||||||
|
BE VERY EXPLICIT AND THOROUGH, MAKE IT DEEP AND USEFUL
|
||||||
|
|
||||||
|
########
|
||||||
|
Step 1: Understand the purpose and functionality of the module or framework
|
||||||
|
|
||||||
|
Read and analyze the description provided in the documentation to understand the purpose and functionality of the module or framework.
|
||||||
|
Identify the key features, parameters, and operations performed by the module or framework.
|
||||||
|
Step 2: Provide an overview and introduction
|
||||||
|
|
||||||
|
Start the documentation by providing a brief overview and introduction to the module or framework.
|
||||||
|
Explain the importance and relevance of the module or framework in the context of the problem it solves.
|
||||||
|
Highlight any key concepts or terminology that will be used throughout the documentation.
|
||||||
|
Step 3: Provide a class or function definition
|
||||||
|
|
||||||
|
Provide the class or function definition for the module or framework.
|
||||||
|
Include the parameters that need to be passed to the class or function and provide a brief description of each parameter.
|
||||||
|
Specify the data types and default values for each parameter.
|
||||||
|
Step 4: Explain the functionality and usage
|
||||||
|
|
||||||
|
Provide a detailed explanation of how the module or framework works and what it does.
|
||||||
|
Describe the steps involved in using the module or framework, including any specific requirements or considerations.
|
||||||
|
Provide code examples to demonstrate the usage of the module or framework.
|
||||||
|
Explain the expected inputs and outputs for each operation or function.
|
||||||
|
Step 5: Provide additional information and tips
|
||||||
|
|
||||||
|
Provide any additional information or tips that may be useful for using the module or framework effectively.
|
||||||
|
Address any common issues or challenges that developers may encounter and provide recommendations or workarounds.
|
||||||
|
Step 6: Include references and resources
|
||||||
|
|
||||||
|
Include references to any external resources or research papers that provide further information or background on the module or framework.
|
||||||
|
Provide links to relevant documentation or websites for further exploration.
|
||||||
|
Example Template for the given documentation:
|
||||||
|
|
||||||
|
# Module/Function Name: MultiheadAttention
|
||||||
|
|
||||||
|
class torch.nn.MultiheadAttention(embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None, batch_first=False, device=None, dtype=None):
|
||||||
|
```
|
||||||
|
Creates a multi-head attention module for joint information representation from the different subspaces.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- embed_dim (int): Total dimension of the model.
|
||||||
|
- num_heads (int): Number of parallel attention heads. The embed_dim will be split across num_heads.
|
||||||
|
- dropout (float): Dropout probability on attn_output_weights. Default: 0.0 (no dropout).
|
||||||
|
- bias (bool): If specified, adds bias to input/output projection layers. Default: True.
|
||||||
|
- add_bias_kv (bool): If specified, adds bias to the key and value sequences at dim=0. Default: False.
|
||||||
|
- add_zero_attn (bool): If specified, adds a new batch of zeros to the key and value sequences at dim=1. Default: False.
|
||||||
|
- kdim (int): Total number of features for keys. Default: None (uses kdim=embed_dim).
|
||||||
|
- vdim (int): Total number of features for values. Default: None (uses vdim=embed_dim).
|
||||||
|
- batch_first (bool): If True, the input and output tensors are provided as (batch, seq, feature). Default: False.
|
||||||
|
- device (torch.device): If specified, the tensors will be moved to the specified device.
|
||||||
|
- dtype (torch.dtype): If specified, the tensors will have the specified dtype.
|
||||||
|
```
|
||||||
|
|
||||||
|
def forward(query, key, value, key_padding_mask=None, need_weights=True, attn_mask=None, average_attn_weights=True, is_causal=False):
|
||||||
|
```
|
||||||
|
Forward pass of the multi-head attention module.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
- query (Tensor): Query embeddings of shape (L, E_q) for unbatched input, (L, N, E_q) when batch_first=False, or (N, L, E_q) when batch_first=True.
|
||||||
|
- key (Tensor): Key embeddings of shape (S, E_k) for unbatched input, (S, N, E_k) when batch_first=False, or (N, S, E_k) when batch_first=True.
|
||||||
|
- value (Tensor): Value embeddings of shape (S, E_v) for unbatched input, (S, N, E_v) when batch_first=False, or (N, S, E_v) when batch_first=True.
|
||||||
|
- key_padding_mask (Optional[Tensor]): If specified, a mask indicating elements to be ignored in key for attention computation.
|
||||||
|
- need_weights (bool): If specified, returns attention weights in addition to attention outputs. Default: True.
|
||||||
|
- attn_mask (Optional[Tensor]): If specified, a mask preventing attention to certain positions.
|
||||||
|
- average_attn_weights (bool): If true, returns averaged attention weights per head. Otherwise, returns attention weights separately per head. Note that this flag only has an effect when need_weights=True. Default: True.
|
||||||
|
- is_causal (bool): If specified, applies a causal mask as the attention mask. Default: False.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple[Tensor, Optional[Tensor]]:
|
||||||
|
- attn_output (Tensor): Attention outputs of shape (L, E) for unbatched input, (L, N, E) when batch_first=False, or (N, L, E) when batch_first=True.
|
||||||
|
- attn_output_weights (Optional[Tensor]): Attention weights of shape (L, S) when unbatched or (N, L, S) when batched. Optional, only returned when need_weights=True.
|
||||||
|
```
|
||||||
|
|
||||||
|
# Implementation of the forward pass of the attention module goes here
|
||||||
|
|
||||||
|
return attn_output, attn_output_weights
|
||||||
|
|
||||||
|
```
|
||||||
|
# Usage example:
|
||||||
|
|
||||||
|
multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
|
||||||
|
attn_output, attn_output_weights = multihead_attn(query, key, value)
|
||||||
|
Note:
|
||||||
|
|
||||||
|
The above template includes the class or function definition, parameters, description, and usage example.
|
||||||
|
To replicate the documentation for any other module or framework, follow the same structure and provide the specific details for that module or framework.
|
||||||
|
|
||||||
|
|
||||||
|
############# DOCUMENT THE FOLLOWING CODE ########
|
||||||
|
{task}
|
||||||
|
"""
|
||||||
|
return documentation
|
||||||
|
|
||||||
|
|
||||||
|
def TEST_WRITER_SOP_PROMPT(task: str, module: str, path: str, *args, **kwargs):
|
||||||
|
TESTS_PROMPT = f"""
|
||||||
|
|
||||||
|
Create 5,000 lines of extensive and thorough tests for the code below using the guide, do not worry about your limits you do not have any
|
||||||
|
just write the best tests possible, the module is {module}, the file path is {path}
|
||||||
|
|
||||||
|
|
||||||
|
######### TESTING GUIDE #############
|
||||||
|
|
||||||
|
# **Guide to Creating Extensive, Thorough, and Production-Ready Tests using `pytest`**
|
||||||
|
|
||||||
|
1. **Preparation**:
|
||||||
|
- Install pytest: `pip install pytest`.
|
||||||
|
- Structure your project so that tests are in a separate `tests/` directory.
|
||||||
|
- Name your test files with the prefix `test_` for pytest to recognize them.
|
||||||
|
|
||||||
|
2. **Writing Basic Tests**:
|
||||||
|
- Use clear function names prefixed with `test_` (e.g., `test_check_value()`).
|
||||||
|
- Use assert statements to validate results.
|
||||||
|
|
||||||
|
3. **Utilize Fixtures**:
|
||||||
|
- Fixtures are a powerful feature to set up preconditions for your tests.
|
||||||
|
- Use `@pytest.fixture` decorator to define a fixture.
|
||||||
|
- Pass fixture name as an argument to your test to use it.
|
||||||
|
|
||||||
|
4. **Parameterized Testing**:
|
||||||
|
- Use `@pytest.mark.parametrize` to run a test multiple times with different inputs.
|
||||||
|
- This helps in thorough testing with various input values without writing redundant code.
|
||||||
|
|
||||||
|
5. **Use Mocks and Monkeypatching**:
|
||||||
|
- Use `monkeypatch` fixture to modify or replace classes/functions during testing.
|
||||||
|
- Use `unittest.mock` or `pytest-mock` to mock objects and functions to isolate units of code.
|
||||||
|
|
||||||
|
6. **Exception Testing**:
|
||||||
|
- Test for expected exceptions using `pytest.raises(ExceptionType)`.
|
||||||
|
|
||||||
|
7. **Test Coverage**:
|
||||||
|
- Install pytest-cov: `pip install pytest-cov`.
|
||||||
|
- Run tests with `pytest --cov=my_module` to get a coverage report.
|
||||||
|
|
||||||
|
8. **Environment Variables and Secret Handling**:
|
||||||
|
- Store secrets and configurations in environment variables.
|
||||||
|
- Use libraries like `python-decouple` or `python-dotenv` to load environment variables.
|
||||||
|
- For tests, mock or set environment variables temporarily within the test environment.
|
||||||
|
|
||||||
|
9. **Grouping and Marking Tests**:
|
||||||
|
- Use `@pytest.mark` decorator to mark tests (e.g., `@pytest.mark.slow`).
|
||||||
|
- This allows for selectively running certain groups of tests.
|
||||||
|
|
||||||
|
10. **Use Plugins**:
|
||||||
|
- Utilize the rich ecosystem of pytest plugins (e.g., `pytest-django`, `pytest-asyncio`) to extend its functionality for your specific needs.
|
||||||
|
|
||||||
|
11. **Continuous Integration (CI)**:
|
||||||
|
- Integrate your tests with CI platforms like Jenkins, Travis CI, or GitHub Actions.
|
||||||
|
- Ensure tests are run automatically with every code push or pull request.
|
||||||
|
|
||||||
|
12. **Logging and Reporting**:
|
||||||
|
- Use `pytest`'s inbuilt logging.
|
||||||
|
- Integrate with tools like `Allure` for more comprehensive reporting.
|
||||||
|
|
||||||
|
13. **Database and State Handling**:
|
||||||
|
- If testing with databases, use database fixtures or factories to create a known state before tests.
|
||||||
|
- Clean up and reset state post-tests to maintain consistency.
|
||||||
|
|
||||||
|
14. **Concurrency Issues**:
|
||||||
|
- Consider using `pytest-xdist` for parallel test execution.
|
||||||
|
- Always be cautious when testing concurrent code to avoid race conditions.
|
||||||
|
|
||||||
|
15. **Clean Code Practices**:
|
||||||
|
- Ensure tests are readable and maintainable.
|
||||||
|
- Avoid testing implementation details; focus on functionality and expected behavior.
|
||||||
|
|
||||||
|
16. **Regular Maintenance**:
|
||||||
|
- Periodically review and update tests.
|
||||||
|
- Ensure that tests stay relevant as your codebase grows and changes.
|
||||||
|
|
||||||
|
17. **Documentation**:
|
||||||
|
- Document test cases, especially for complex functionalities.
|
||||||
|
- Ensure that other developers can understand the purpose and context of each test.
|
||||||
|
|
||||||
|
18. **Feedback Loop**:
|
||||||
|
- Use test failures as feedback for development.
|
||||||
|
- Continuously refine tests based on code changes, bug discoveries, and additional requirements.
|
||||||
|
|
||||||
|
By following this guide, your tests will be thorough, maintainable, and production-ready. Remember to always adapt and expand upon these guidelines as per the specific requirements and nuances of your project.
|
||||||
|
|
||||||
|
|
||||||
|
######### CREATE TESTS FOR THIS CODE: #######
|
||||||
|
{task}
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
return TESTS_PROMPT
|
@ -0,0 +1,60 @@
|
|||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
def update_mkdocs(
|
||||||
|
class_names, base_path="docs/zeta/nn/modules", mkdocs_file="mkdocs.yml"
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Update the mkdocs.yml file with new documentation links.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- class_names: A list of class names for which documentation is generated.
|
||||||
|
- base_path: The base path where documentation Markdown files are stored.
|
||||||
|
- mkdocs_file: The path to the mkdocs.yml file.
|
||||||
|
"""
|
||||||
|
with open(mkdocs_file, "r") as file:
|
||||||
|
mkdocs_config = yaml.safe_load(file)
|
||||||
|
|
||||||
|
# Find or create the 'zeta.nn.modules' section in 'nav'
|
||||||
|
zeta_modules_section = None
|
||||||
|
for section in mkdocs_config.get("nav", []):
|
||||||
|
if "zeta.nn.modules" in section:
|
||||||
|
zeta_modules_section = section["zeta.nn.modules"]
|
||||||
|
break
|
||||||
|
|
||||||
|
if zeta_modules_section is None:
|
||||||
|
zeta_modules_section = {}
|
||||||
|
mkdocs_config["nav"].append({"zeta.nn.modules": zeta_modules_section})
|
||||||
|
|
||||||
|
# Add the documentation paths to the 'zeta.nn.modules' section
|
||||||
|
for class_name in class_names:
|
||||||
|
doc_path = f"{base_path}/{class_name.lower()}.md"
|
||||||
|
zeta_modules_section[class_name] = doc_path
|
||||||
|
|
||||||
|
# Write the updated content back to mkdocs.yml
|
||||||
|
with open(mkdocs_file, "w") as file:
|
||||||
|
yaml.safe_dump(mkdocs_config, file, sort_keys=False)
|
||||||
|
|
||||||
|
|
||||||
|
# Example usage
|
||||||
|
classes = [
|
||||||
|
"DenseBlock",
|
||||||
|
"HighwayLayer",
|
||||||
|
"MultiScaleBlock",
|
||||||
|
"FeedbackBlock",
|
||||||
|
"DualPathBlock",
|
||||||
|
"RecursiveBlock",
|
||||||
|
"PytorchGELUTanh",
|
||||||
|
"NewGELUActivation",
|
||||||
|
"GELUActivation",
|
||||||
|
"FastGELUActivation",
|
||||||
|
"QuickGELUActivation",
|
||||||
|
"ClippedGELUActivation",
|
||||||
|
"AccurateGELUActivation",
|
||||||
|
"MishActivation",
|
||||||
|
"LinearActivation",
|
||||||
|
"LaplaceActivation",
|
||||||
|
"ReLUSquaredActivation",
|
||||||
|
]
|
||||||
|
|
||||||
|
update_mkdocs(classes)
|
@ -1,19 +1,19 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Navigate to the directory containing the 'swarms' folder
|
# Navigate to the directory containing the 'tests' folder
|
||||||
# cd /path/to/your/code/directory
|
# cd /path/to/your/code/directory
|
||||||
|
|
||||||
# Run autopep8 with max aggressiveness (-aaa) and in-place modification (-i)
|
# Run autopep8 with max aggressiveness (-aaa) and in-place modification (-i)
|
||||||
# on all Python files (*.py) under the 'swarms' directory.
|
# on all Python files (*.py) under the 'tests' directory.
|
||||||
autopep8 --in-place --aggressive --aggressive --recursive --experimental --list-fixes swarms/
|
autopep8 --in-place --aggressive --aggressive --recursive --experimental --list-fixes zeta/
|
||||||
|
|
||||||
# Run black with default settings, since black does not have an aggressiveness level.
|
# Run black with default settings, since black does not have an aggressiveness level.
|
||||||
# Black will format all Python files it finds in the 'swarms' directory.
|
# Black will format all Python files it finds in the 'tests' directory.
|
||||||
black --experimental-string-processing swarms/
|
black --experimental-string-processing zeta/
|
||||||
|
|
||||||
# Run ruff on the 'swarms' directory.
|
# Run ruff on the 'tests' directory.
|
||||||
# Add any additional flags if needed according to your version of ruff.
|
# Add any additional flags if needed according to your version of ruff.
|
||||||
ruff --unsafe_fix
|
ruff zeta/ --fix
|
||||||
|
|
||||||
# YAPF
|
# YAPF
|
||||||
yapf --recursive --in-place --verbose --style=google --parallel swarms
|
yapf --recursive --in-place --verbose --style=google --parallel tests
|
||||||
|
@ -1,4 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Find all __pycache__ directories and delete them
|
|
||||||
find . -type d -name "__pycache__" -exec rm -rf {} +
|
|
@ -0,0 +1,66 @@
|
|||||||
|
from typing import Optional, Any
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from diffusers import AutoPipelineForText2Image
|
||||||
|
from swarms.models.base_multimodal_model import BaseMultiModalModel
|
||||||
|
|
||||||
|
|
||||||
|
class OpenDalle(BaseMultiModalModel):
|
||||||
|
"""OpenDalle model class
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
model_name (str): The name or path of the model to be used. Defaults to "dataautogpt3/OpenDalleV1.1".
|
||||||
|
torch_dtype (torch.dtype): The torch data type to be used. Defaults to torch.float16.
|
||||||
|
device (str): The device to be used for computation. Defaults to "cuda".
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
>>> from swarms.models.open_dalle import OpenDalle
|
||||||
|
>>> od = OpenDalle()
|
||||||
|
>>> od.run("A picture of a cat")
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model_name: str = "dataautogpt3/OpenDalleV1.1",
|
||||||
|
torch_dtype: Any = torch.float16,
|
||||||
|
device: str = "cuda",
|
||||||
|
*args,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initializes the OpenDalle model.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_name (str, optional): The name or path of the model to be used. Defaults to "dataautogpt3/OpenDalleV1.1".
|
||||||
|
torch_dtype (torch.dtype, optional): The torch data type to be used. Defaults to torch.float16.
|
||||||
|
device (str, optional): The device to be used for computation. Defaults to "cuda".
|
||||||
|
*args: Variable length argument list.
|
||||||
|
**kwargs: Arbitrary keyword arguments.
|
||||||
|
"""
|
||||||
|
self.pipeline = AutoPipelineForText2Image.from_pretrained(
|
||||||
|
model_name, torch_dtype=torch_dtype, *args, **kwargs
|
||||||
|
).to(device)
|
||||||
|
|
||||||
|
def run(self, task: Optional[str] = None, *args, **kwargs):
|
||||||
|
"""Run the OpenDalle model
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task (str, optional): The task to be performed. Defaults to None.
|
||||||
|
*args: Variable length argument list.
|
||||||
|
**kwargs: Arbitrary keyword arguments.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
[type]: [description]
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if task is None:
|
||||||
|
raise ValueError("Task cannot be None")
|
||||||
|
if not isinstance(task, str):
|
||||||
|
raise TypeError("Task must be a string")
|
||||||
|
if len(task) < 1:
|
||||||
|
raise ValueError("Task cannot be empty")
|
||||||
|
return self.pipeline(task, *args, **kwargs).images[0]
|
||||||
|
except Exception as error:
|
||||||
|
print(f"[ERROR][OpenDalle] {error}")
|
||||||
|
raise error
|
@ -1,89 +1,94 @@
|
|||||||
TESTS_PROMPT = """
|
def TEST_WRITER_SOP_PROMPT(task: str, module: str, path: str, *args, **kwargs):
|
||||||
|
|
||||||
Create 5,000 lines of extensive and thorough tests for the code below using the guide, do not worry about your limits you do not have any
|
TESTS_PROMPT = f"""
|
||||||
just write the best tests possible:
|
|
||||||
|
|
||||||
|
Create 5,000 lines of extensive and thorough tests for the code below using the guide, do not worry about your limits you do not have any
|
||||||
|
just write the best tests possible, the module is {module}, the file path is {path}
|
||||||
|
|
||||||
######### TESTING GUIDE #############
|
|
||||||
|
|
||||||
# **Guide to Creating Extensive, Thorough, and Production-Ready Tests using `pytest`**
|
######### TESTING GUIDE #############
|
||||||
|
|
||||||
1. **Preparation**:
|
# **Guide to Creating Extensive, Thorough, and Production-Ready Tests using `pytest`**
|
||||||
- Install pytest: `pip install pytest`.
|
|
||||||
- Structure your project so that tests are in a separate `tests/` directory.
|
|
||||||
- Name your test files with the prefix `test_` for pytest to recognize them.
|
|
||||||
|
|
||||||
2. **Writing Basic Tests**:
|
1. **Preparation**:
|
||||||
- Use clear function names prefixed with `test_` (e.g., `test_check_value()`).
|
- Install pytest: `pip install pytest`.
|
||||||
- Use assert statements to validate results.
|
- Structure your project so that tests are in a separate `tests/` directory.
|
||||||
|
- Name your test files with the prefix `test_` for pytest to recognize them.
|
||||||
|
|
||||||
3. **Utilize Fixtures**:
|
2. **Writing Basic Tests**:
|
||||||
- Fixtures are a powerful feature to set up preconditions for your tests.
|
- Use clear function names prefixed with `test_` (e.g., `test_check_value()`).
|
||||||
- Use `@pytest.fixture` decorator to define a fixture.
|
- Use assert statements to validate results.
|
||||||
- Pass fixture name as an argument to your test to use it.
|
|
||||||
|
|
||||||
4. **Parameterized Testing**:
|
3. **Utilize Fixtures**:
|
||||||
- Use `@pytest.mark.parametrize` to run a test multiple times with different inputs.
|
- Fixtures are a powerful feature to set up preconditions for your tests.
|
||||||
- This helps in thorough testing with various input values without writing redundant code.
|
- Use `@pytest.fixture` decorator to define a fixture.
|
||||||
|
- Pass fixture name as an argument to your test to use it.
|
||||||
|
|
||||||
5. **Use Mocks and Monkeypatching**:
|
4. **Parameterized Testing**:
|
||||||
- Use `monkeypatch` fixture to modify or replace classes/functions during testing.
|
- Use `@pytest.mark.parametrize` to run a test multiple times with different inputs.
|
||||||
- Use `unittest.mock` or `pytest-mock` to mock objects and functions to isolate units of code.
|
- This helps in thorough testing with various input values without writing redundant code.
|
||||||
|
|
||||||
6. **Exception Testing**:
|
5. **Use Mocks and Monkeypatching**:
|
||||||
- Test for expected exceptions using `pytest.raises(ExceptionType)`.
|
- Use `monkeypatch` fixture to modify or replace classes/functions during testing.
|
||||||
|
- Use `unittest.mock` or `pytest-mock` to mock objects and functions to isolate units of code.
|
||||||
|
|
||||||
7. **Test Coverage**:
|
6. **Exception Testing**:
|
||||||
- Install pytest-cov: `pip install pytest-cov`.
|
- Test for expected exceptions using `pytest.raises(ExceptionType)`.
|
||||||
- Run tests with `pytest --cov=my_module` to get a coverage report.
|
|
||||||
|
|
||||||
8. **Environment Variables and Secret Handling**:
|
7. **Test Coverage**:
|
||||||
- Store secrets and configurations in environment variables.
|
- Install pytest-cov: `pip install pytest-cov`.
|
||||||
- Use libraries like `python-decouple` or `python-dotenv` to load environment variables.
|
- Run tests with `pytest --cov=my_module` to get a coverage report.
|
||||||
- For tests, mock or set environment variables temporarily within the test environment.
|
|
||||||
|
|
||||||
9. **Grouping and Marking Tests**:
|
8. **Environment Variables and Secret Handling**:
|
||||||
- Use `@pytest.mark` decorator to mark tests (e.g., `@pytest.mark.slow`).
|
- Store secrets and configurations in environment variables.
|
||||||
- This allows for selectively running certain groups of tests.
|
- Use libraries like `python-decouple` or `python-dotenv` to load environment variables.
|
||||||
|
- For tests, mock or set environment variables temporarily within the test environment.
|
||||||
|
|
||||||
10. **Use Plugins**:
|
9. **Grouping and Marking Tests**:
|
||||||
- Utilize the rich ecosystem of pytest plugins (e.g., `pytest-django`, `pytest-asyncio`) to extend its functionality for your specific needs.
|
- Use `@pytest.mark` decorator to mark tests (e.g., `@pytest.mark.slow`).
|
||||||
|
- This allows for selectively running certain groups of tests.
|
||||||
|
|
||||||
11. **Continuous Integration (CI)**:
|
10. **Use Plugins**:
|
||||||
- Integrate your tests with CI platforms like Jenkins, Travis CI, or GitHub Actions.
|
- Utilize the rich ecosystem of pytest plugins (e.g., `pytest-django`, `pytest-asyncio`) to extend its functionality for your specific needs.
|
||||||
- Ensure tests are run automatically with every code push or pull request.
|
|
||||||
|
|
||||||
12. **Logging and Reporting**:
|
11. **Continuous Integration (CI)**:
|
||||||
- Use `pytest`'s inbuilt logging.
|
- Integrate your tests with CI platforms like Jenkins, Travis CI, or GitHub Actions.
|
||||||
- Integrate with tools like `Allure` for more comprehensive reporting.
|
- Ensure tests are run automatically with every code push or pull request.
|
||||||
|
|
||||||
13. **Database and State Handling**:
|
12. **Logging and Reporting**:
|
||||||
- If testing with databases, use database fixtures or factories to create a known state before tests.
|
- Use `pytest`'s inbuilt logging.
|
||||||
- Clean up and reset state post-tests to maintain consistency.
|
- Integrate with tools like `Allure` for more comprehensive reporting.
|
||||||
|
|
||||||
14. **Concurrency Issues**:
|
13. **Database and State Handling**:
|
||||||
- Consider using `pytest-xdist` for parallel test execution.
|
- If testing with databases, use database fixtures or factories to create a known state before tests.
|
||||||
- Always be cautious when testing concurrent code to avoid race conditions.
|
- Clean up and reset state post-tests to maintain consistency.
|
||||||
|
|
||||||
15. **Clean Code Practices**:
|
14. **Concurrency Issues**:
|
||||||
- Ensure tests are readable and maintainable.
|
- Consider using `pytest-xdist` for parallel test execution.
|
||||||
- Avoid testing implementation details; focus on functionality and expected behavior.
|
- Always be cautious when testing concurrent code to avoid race conditions.
|
||||||
|
|
||||||
16. **Regular Maintenance**:
|
15. **Clean Code Practices**:
|
||||||
- Periodically review and update tests.
|
- Ensure tests are readable and maintainable.
|
||||||
- Ensure that tests stay relevant as your codebase grows and changes.
|
- Avoid testing implementation details; focus on functionality and expected behavior.
|
||||||
|
|
||||||
17. **Documentation**:
|
16. **Regular Maintenance**:
|
||||||
- Document test cases, especially for complex functionalities.
|
- Periodically review and update tests.
|
||||||
- Ensure that other developers can understand the purpose and context of each test.
|
- Ensure that tests stay relevant as your codebase grows and changes.
|
||||||
|
|
||||||
18. **Feedback Loop**:
|
17. **Documentation**:
|
||||||
- Use test failures as feedback for development.
|
- Document test cases, especially for complex functionalities.
|
||||||
- Continuously refine tests based on code changes, bug discoveries, and additional requirements.
|
- Ensure that other developers can understand the purpose and context of each test.
|
||||||
|
|
||||||
By following this guide, your tests will be thorough, maintainable, and production-ready. Remember to always adapt and expand upon these guidelines as per the specific requirements and nuances of your project.
|
18. **Feedback Loop**:
|
||||||
|
- Use test failures as feedback for development.
|
||||||
|
- Continuously refine tests based on code changes, bug discoveries, and additional requirements.
|
||||||
|
|
||||||
|
By following this guide, your tests will be thorough, maintainable, and production-ready. Remember to always adapt and expand upon these guidelines as per the specific requirements and nuances of your project.
|
||||||
|
|
||||||
######### CREATE TESTS FOR THIS CODE: #######
|
|
||||||
|
|
||||||
"""
|
######### CREATE TESTS FOR THIS CODE: #######
|
||||||
|
{task}
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
return TESTS_PROMPT
|
@ -1,11 +1,11 @@
|
|||||||
from swarms.structs.autoscaler import AutoScaler
|
from swarms.structs.autoscaler import AutoScaler
|
||||||
from swarms.swarms.god_mode import GodMode
|
from swarms.swarms.god_mode import ModelParallelizer
|
||||||
from swarms.swarms.multi_agent_collab import MultiAgentCollaboration
|
from swarms.swarms.multi_agent_collab import MultiAgentCollaboration
|
||||||
from swarms.swarms.base import AbstractSwarm
|
from swarms.swarms.base import AbstractSwarm
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"AutoScaler",
|
"AutoScaler",
|
||||||
"GodMode",
|
"ModelParallelizer",
|
||||||
"MultiAgentCollaboration",
|
"MultiAgentCollaboration",
|
||||||
"AbstractSwarm",
|
"AbstractSwarm",
|
||||||
]
|
]
|
||||||
|
@ -0,0 +1,59 @@
|
|||||||
|
import pytest
|
||||||
|
import torch
|
||||||
|
from swarms.models.open_dalle import OpenDalle
|
||||||
|
|
||||||
|
|
||||||
|
def test_init():
|
||||||
|
od = OpenDalle()
|
||||||
|
assert isinstance(od, OpenDalle)
|
||||||
|
|
||||||
|
|
||||||
|
def test_init_custom_model():
|
||||||
|
od = OpenDalle(model_name="custom_model")
|
||||||
|
assert od.pipeline.model_name == "custom_model"
|
||||||
|
|
||||||
|
|
||||||
|
def test_init_custom_dtype():
|
||||||
|
od = OpenDalle(torch_dtype=torch.float32)
|
||||||
|
assert od.pipeline.torch_dtype == torch.float32
|
||||||
|
|
||||||
|
|
||||||
|
def test_init_custom_device():
|
||||||
|
od = OpenDalle(device="cpu")
|
||||||
|
assert od.pipeline.device == "cpu"
|
||||||
|
|
||||||
|
|
||||||
|
def test_run():
|
||||||
|
od = OpenDalle()
|
||||||
|
result = od.run("A picture of a cat")
|
||||||
|
assert isinstance(result, torch.Tensor)
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_no_task():
|
||||||
|
od = OpenDalle()
|
||||||
|
with pytest.raises(ValueError, match="Task cannot be None"):
|
||||||
|
od.run(None)
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_non_string_task():
|
||||||
|
od = OpenDalle()
|
||||||
|
with pytest.raises(TypeError, match="Task must be a string"):
|
||||||
|
od.run(123)
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_empty_task():
|
||||||
|
od = OpenDalle()
|
||||||
|
with pytest.raises(ValueError, match="Task cannot be empty"):
|
||||||
|
od.run("")
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_custom_args():
|
||||||
|
od = OpenDalle()
|
||||||
|
result = od.run("A picture of a cat", custom_arg="custom_value")
|
||||||
|
assert isinstance(result, torch.Tensor)
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_error():
|
||||||
|
od = OpenDalle()
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
od.run("A picture of a cat", raise_error=True)
|
Loading…
Reference in new issue