parent
414adf0af8
commit
ef1990ad98
File diff suppressed because one or more lines are too long
@ -1,22 +0,0 @@
|
||||
mkdocs
|
||||
mkdocs-material
|
||||
mkdocs-glightbox
|
||||
mkdocs-git-authors-plugin
|
||||
mkdocs-git-revision-date-plugin
|
||||
mkdocs-git-committers-plugin
|
||||
mkdocstrings
|
||||
mike
|
||||
mkdocs-jupyter
|
||||
mkdocs-git-committers-plugin-2
|
||||
mkdocs-git-revision-date-localized-plugin
|
||||
mkdocs-redirects
|
||||
mkdocs-material-extensions
|
||||
mkdocs-simple-hooks
|
||||
mkdocs-awesome-pages-plugin
|
||||
mkdocs-versioning
|
||||
mkdocs-mermaid2-plugin
|
||||
mkdocs-include-markdown-plugin
|
||||
mkdocs-enumerate-headings-plugin
|
||||
mkdocs-autolinks-plugin
|
||||
mkdocs-minify-html-plugin
|
||||
mkdocs-autolinks-plugin
|
@ -1,5 +0,0 @@
|
||||
h1: 404
|
||||
Published on Unknown
|
||||
Authors:
|
||||
Abstract
|
||||
Abstract not found
|
File diff suppressed because it is too large
Load Diff
@ -1,35 +0,0 @@
|
||||
python>=3.9,<4.0
|
||||
torch>=2.1.1,<3.0
|
||||
transformers>=4.39.0,<5.0.0
|
||||
asyncio>=3.4.3,<4.0
|
||||
langchain-community==0.0.29
|
||||
langchain-experimental==0.0.55
|
||||
backoff==2.2.1
|
||||
toml
|
||||
pypdf==4.1.0
|
||||
ratelimit==2.2.1
|
||||
loguru==0.7.2
|
||||
pydantic==2.7.1
|
||||
tenacity==8.2.3
|
||||
Pillow==10.3.0
|
||||
psutil
|
||||
sentry-sdk
|
||||
python-dotenv
|
||||
accelerate==0.28.0
|
||||
opencv-python>=4.9.0.80,<4.10.0
|
||||
PyYAML
|
||||
docstring_parser==0.16
|
||||
|
||||
# Lint dependencies
|
||||
black>=23.1.0,<24.0.0
|
||||
ruff>=0.0.249,<0.3.5
|
||||
types-toml>=0.10.8.1,<0.10.9
|
||||
types-pytz>=2023.3.0.0,<2023.4.0
|
||||
types-chardet>=5.0.4.6,<5.0.5
|
||||
mypy-protobuf>=3.0.0,<4.0.0
|
||||
|
||||
# Test dependencies
|
||||
pytest>=8.1.1,<9.0.0
|
||||
termcolor>=2.4.0,<2.5.0
|
||||
pandas>=2.2.2,<2.3.0
|
||||
fastapi>=0.110.1,<0.111.0
|
@ -0,0 +1,28 @@
|
||||
import os
|
||||
import shutil
|
||||
|
||||
def cleanup_json_logs():
|
||||
# Define the root directory and the target directory
|
||||
root_dir = os.getcwd()
|
||||
target_dir = os.path.join(root_dir, 'artifacts5')
|
||||
|
||||
# Create the target directory if it doesn't exist
|
||||
if not os.path.exists(target_dir):
|
||||
os.makedirs(target_dir)
|
||||
|
||||
# Walk through the root directory
|
||||
for dirpath, dirnames, filenames in os.walk(root_dir):
|
||||
for filename in filenames:
|
||||
# If the file is a JSON log file, .log file or .txt file
|
||||
if filename.endswith('.json') or filename.endswith('.log') or filename.endswith('.txt'):
|
||||
# Construct the full file paths
|
||||
file_path = os.path.join(dirpath, filename)
|
||||
target_path = os.path.join(target_dir, filename)
|
||||
|
||||
# Move the file to the target directory
|
||||
shutil.move(file_path, target_path)
|
||||
|
||||
print(f"All JSON, LOG and TXT files have been moved to {target_dir}")
|
||||
|
||||
# Call the function
|
||||
cleanup_json_logs()
|
@ -0,0 +1,130 @@
|
||||
try:
|
||||
import ray
|
||||
except ImportError:
|
||||
print(
|
||||
"Please install Ray. You can install it by running 'pip install ray'"
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def track_gpu_resources():
|
||||
"""
|
||||
Retrieves and prints information about the total and available GPUs.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing the total number of GPUs and the number of available GPUs.
|
||||
"""
|
||||
if not ray.is_initialized():
|
||||
ray.init()
|
||||
|
||||
resources = ray.cluster_resources()
|
||||
available_resources = ray.available_resources()
|
||||
|
||||
# Total GPUs
|
||||
total_gpus = resources.get("GPU", 0)
|
||||
print(f"Total GPUs: {total_gpus}")
|
||||
|
||||
available_resources = available_resources.get("GPU", 0)
|
||||
|
||||
print(f"Available GPUs: {available_resources}")
|
||||
print(f"Used GPUs: {total_gpus - available_resources}")
|
||||
|
||||
return total_gpus, available_resources
|
||||
|
||||
|
||||
def track_all_resources():
|
||||
"""
|
||||
Prints detailed information about all resources in the Ray cluster.
|
||||
|
||||
This function initializes Ray if it is not already initialized, and then retrieves
|
||||
information about the total resources and available resources in the cluster.
|
||||
It prints the resource name and quantity for both total resources and available resources.
|
||||
|
||||
Note: This function requires the Ray library to be installed.
|
||||
|
||||
Example usage:
|
||||
track_all_resources()
|
||||
"""
|
||||
if not ray.is_initialized():
|
||||
ray.init()
|
||||
|
||||
resources = ray.cluster_resources()
|
||||
available_resources = ray.available_resources()
|
||||
|
||||
print("Total Resources:")
|
||||
for resource, quantity in resources.items():
|
||||
print(f" {resource}: {quantity}")
|
||||
|
||||
print("\nAvailable Resources:")
|
||||
for resource, quantity in available_resources.items():
|
||||
print(f" {resource}: {quantity}")
|
||||
|
||||
|
||||
def execute__callableon_gpu(
|
||||
num_cpus: int = None,
|
||||
num_gpus: int = None,
|
||||
pre_post_process=None,
|
||||
execute_before: bool = True,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
A decorator to execute functions with specified Ray resources, with optional pre/post processing.
|
||||
|
||||
Args:
|
||||
num_cpus (int, optional): The number of CPUs to allocate for the function execution. Defaults to None.
|
||||
num_gpus (int, optional): The number of GPUs to allocate for the function execution. Defaults to None.
|
||||
pre_post_process (callable, optional): A callable function to be executed before or after the main function. Defaults to None.
|
||||
execute_before (bool, optional): Determines whether the pre_post_process should be executed before or after the main function. Defaults to True.
|
||||
*args: Variable length argument list.
|
||||
**kwargs: Arbitrary keyword arguments.
|
||||
|
||||
Returns:
|
||||
The result of the main function execution.
|
||||
|
||||
Example:
|
||||
>>> @execute__callableon_gpu(num_gpus=1)
|
||||
... def add(a, b):
|
||||
... return a + b
|
||||
>>> add(1, 2)
|
||||
3
|
||||
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
# Initialize Ray, if not already done.
|
||||
if not ray.is_initialized():
|
||||
ray.init()
|
||||
|
||||
# Register the function as a Ray remote function with specified resources.
|
||||
remote_func = ray.remote(num_cpus=num_cpus, num_gpus=num_gpus)(
|
||||
func
|
||||
)
|
||||
|
||||
# Optionally, register the callable if provided.
|
||||
if pre_post_process:
|
||||
remote_callable = ray.remote(
|
||||
num_cpus=num_cpus, num_gpus=num_gpus
|
||||
)(pre_post_process)
|
||||
|
||||
def wrapper(*args, **kwargs):
|
||||
# Execute the callable before or after the main function, based on 'execute_before'
|
||||
if pre_post_process and execute_before:
|
||||
# Execute the callable and wait for its result before the main function
|
||||
callable_result = remote_callable.remote(*args, **kwargs)
|
||||
ray.get(callable_result)
|
||||
|
||||
# Execute the main function
|
||||
result_ref = remote_func.remote(*args, **kwargs)
|
||||
result = ray.get(result_ref)
|
||||
|
||||
if pre_post_process and not execute_before:
|
||||
# Execute the callable and wait for its result after the main function
|
||||
callable_result = remote_callable.remote(*args, **kwargs)
|
||||
ray.get(callable_result)
|
||||
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
Loading…
Reference in new issue