[LOGGING CLEANUP]

pull/634/head
Your Name 2 months ago
parent 4a9a0ba3ef
commit 54dfa692a8

@ -1,6 +1,5 @@
from typing import Any
from loguru import logger
from tenacity import retry, stop_after_attempt, wait_exponential
from swarms.prompts.prompt_generator import (
@ -9,6 +8,9 @@ from swarms.prompts.prompt_generator import (
from swarms.prompts.prompt_generator_optimizer import (
prompt_generator_sys_prompt,
)
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="ape_agent")
@retry(

@ -2,12 +2,15 @@ import os
from typing import Any, Callable, Dict, List, Tuple, Union
import yaml
from loguru import logger
from swarms.utils.loguru_logger import initialize_logger
from swarms.structs.agent import Agent
from swarms.structs.swarm_router import SwarmRouter
logger = initialize_logger(log_folder="create_agents_from_yaml")
def create_agents_from_yaml(
model: Callable = None,
yaml_file: str = "agents.yaml",

@ -3,13 +3,16 @@ import os
import time
from typing import Dict
from loguru import logger
from swarms.utils.loguru_logger import initialize_logger
from swarms.telemetry.capture_sys_data import (
capture_system_data,
log_agent_data,
)
logger = initialize_logger(log_folder="onboarding_process")
class OnboardingProcess:
"""

@ -4,7 +4,6 @@ import time
import uuid
from typing import Any, Callable, List
from loguru import logger
from pydantic import (
BaseModel,
Field,
@ -17,6 +16,9 @@ from swarms.telemetry.capture_sys_data import (
log_agent_data,
)
from swarms.tools.base_tool import BaseTool
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(file_name="prompts")
class Prompt(BaseModel):

@ -26,7 +26,6 @@ from clusterops import (
execute_on_gpu,
execute_with_cpu_cores,
)
from loguru import logger
from pydantic import BaseModel
from swarm_models.tiktoken_wrapper import TikTokenizer
from termcolor import colored
@ -54,6 +53,9 @@ from swarms.utils.data_to_text import data_to_text
from swarms.utils.file_processing import create_file_in_folder
from swarms.utils.pdf_to_text import pdf_to_text
from swarms.artifacts.main_artifact import Artifact
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="agents")
# Utils

@ -1,10 +1,12 @@
from typing import List, Optional
import chromadb
from loguru import logger
from tenacity import retry, stop_after_attempt, wait_exponential
from typing import Union, Callable, Any
from swarms import Agent
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="agent_router")
class AgentRouter:

@ -1,6 +1,22 @@
import os
from typing import List, Any
from loguru import logger
from swarms.structs.agent import Agent
from loguru import logger
import uuid
WORKSPACE_DIR = os.getenv("WORKSPACE_DIR")
uuid_for_log = str(uuid.uuid4())
logger.add(
os.path.join(
WORKSPACE_DIR,
"agents_available",
f"agents-available-{uuid_for_log}.log",
),
level="INFO",
colorize=True,
backtrace=True,
diagnose=True,
)
def get_agent_name(agent: Any) -> str:

@ -8,6 +8,9 @@ from swarm_models import OpenAIFunctionCaller, OpenAIChat
from swarms.structs.agent import Agent
from swarms.structs.swarm_router import SwarmRouter
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="auto_swarm_builder")
class AgentConfig(BaseModel):

@ -5,7 +5,6 @@ from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
from loguru import logger
from pydantic import BaseModel, Field
from tenacity import retry, stop_after_attempt, wait_exponential
@ -19,6 +18,9 @@ from clusterops import (
execute_on_multiple_gpus,
list_available_gpus,
)
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="concurrent_workflow")
class AgentOutputSchema(BaseModel):

@ -1,7 +1,7 @@
from typing import List, Any
from loguru import logger
from pydantic import BaseModel, Field
from swarms.utils.loguru_logger import initialize_logger
from swarms.structs.base_swarm import BaseSwarm
from swarms.structs.agent import Agent
from swarms.structs.concat import concat_strings
@ -9,6 +9,7 @@ from swarms.structs.agent_registry import AgentRegistry
from swarm_models.base_llm import BaseLLM
from swarms.structs.conversation import Conversation
logger = initialize_logger(log_folder="hiearchical_swarm")
# Example usage:
HIEARCHICAL_AGENT_SYSTEM_PROMPT = """

@ -1,26 +1,14 @@
import concurrent.futures
import re
import sys
from collections import Counter
from typing import Any, Callable, List, Optional
from loguru import logger
from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation
from swarms.utils.file_processing import create_file
from swarms.utils.loguru_logger import initialize_logger
# Configure loguru logger with advanced settings
logger.remove()
logger.add(
sys.stderr,
colorize=True,
format="<green>{time}</green> <level>{message}</level>",
backtrace=True,
diagnose=True,
enqueue=True,
catch=True,
)
logger = initialize_logger(log_folder="majority_voting")
def extract_last_python_code_block(text):

@ -2,13 +2,15 @@ import asyncio
import time
from typing import Any, Dict, List, Optional
from loguru import logger
from pydantic import BaseModel, Field
from swarms.structs.agent import Agent
from swarms.telemetry.capture_sys_data import log_agent_data
from swarms.schemas.agent_step_schemas import ManySteps
from swarms.prompts.ag_prompt import aggregator_system_prompt
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="mixture_of_agents")
time_stamp = time.strftime("%Y-%m-%d %H:%M:%S")

@ -6,13 +6,15 @@ import uuid
from typing import List, Union
import aiofiles
from loguru import logger
from pydantic import BaseModel, Field
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm
from swarms.utils.file_processing import create_file_in_folder
from swarms.telemetry.capture_sys_data import log_agent_data
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="spreadsheet_swarm")
time = datetime.datetime.now().isoformat()
uuid_hex = uuid.uuid4().hex

@ -3,9 +3,11 @@ import numpy as np
import torch
from transformers import AutoTokenizer, AutoModel
from pydantic import BaseModel, Field
from loguru import logger
import json
from tenacity import retry, stop_after_attempt, wait_exponential
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="swarm_matcher")
class SwarmType(BaseModel):

@ -3,7 +3,6 @@ from datetime import datetime
from typing import Any, Callable, Dict, List, Literal, Union
from doc_master import doc_master
from loguru import logger
from pydantic import BaseModel, Field
from tenacity import retry, stop_after_attempt, wait_fixed
@ -18,6 +17,9 @@ from swarms.structs.swarm_matcher import swarm_matcher
from swarms.utils.wrapper_clusterop import (
exec_callable_with_clusterops,
)
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="swarm_router")
SwarmType = Literal[
"AgentRearrange",

@ -2,11 +2,13 @@ import asyncio
import math
from typing import List, Union
from loguru import logger
from pydantic import BaseModel
from swarms.structs.agent import Agent
from swarms.structs.omni_agent_types import AgentListType
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="swarming_architectures")
# Define Pydantic schema for logging agent responses

@ -3,11 +3,13 @@ from collections import Counter
from datetime import datetime
from typing import Any, List, Optional
from loguru import logger
from pydantic import BaseModel, Field
from sentence_transformers import SentenceTransformer, util
from swarms import Agent
from swarms.structs.agent import Agent
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="tree_swarm")
# Pretrained model for embeddings
embedding_model = SentenceTransformer(

@ -1,9 +1,10 @@
import subprocess
from loguru import logger
from swarms.utils.loguru_logger import initialize_logger
from swarms.telemetry.check_update import check_for_update
logger = initialize_logger(log_folder="auto_upgrade_swarms")
def auto_update():
"""auto update swarms"""
@ -13,7 +14,15 @@ def auto_update():
logger.info(
"There is a new version of swarms available! Downloading..."
)
subprocess.run(["pip", "install", "-U", "swarms"])
try:
subprocess.run(
["pip", "install", "-U", "swarms"], check=True
)
except subprocess.CalledProcessError:
logger.info("Attempting to install with pip3...")
subprocess.run(
["pip3", "install", "-U", "swarms"], check=True
)
else:
logger.info("swarms is up to date!")
except Exception as e:

@ -2,10 +2,13 @@ import platform
import socket
import psutil
import uuid
from loguru import logger
from typing import Dict
import requests
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="capture_sys_data")
def capture_system_data() -> Dict[str, str]:
"""

@ -1,9 +1,11 @@
import os
from typing import Any, Callable, Dict, List, Optional
import time
from loguru import logger
from pydantic import BaseModel, Field
from concurrent.futures import ThreadPoolExecutor, as_completed
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="tool_registry")
class ToolMetadata(BaseModel):

@ -17,7 +17,6 @@ from swarms.tools.prebuilt.math_eval import math_eval
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text
from swarms.utils.try_except_wrapper import try_except_wrapper
from swarms.utils.concurrent_utils import execute_concurrently
from swarms.utils.calculate_func_metrics import profile_func
@ -37,6 +36,5 @@ __all__ = [
"extract_code_from_markdown",
"pdf_to_text",
"try_except_wrapper",
"execute_concurrently",
"profile_func",
]

@ -1,10 +1,14 @@
from typing import Any, List, Optional, Union
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
from loguru import logger
from typing import Any, List, Optional, Union
from doc_master import doc_master
from concurrent.futures import ThreadPoolExecutor, as_completed
from tenacity import retry, stop_after_attempt, wait_exponential
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="add_docs_to_agents")
@retry(
stop=stop_after_attempt(3),

@ -1,49 +0,0 @@
import concurrent.futures
from typing import List, Tuple, Any, Dict, Union, Callable
def execute_concurrently(
callable_functions: List[
Tuple[Callable, Tuple[Any, ...], Dict[str, Any]]
],
max_workers: int = 5,
) -> List[Union[Any, Exception]]:
"""
Executes callable functions concurrently using multithreading.
Parameters:
- callable_functions: A list of tuples, each containing the callable function and its arguments.
For example: [(function1, (arg1, arg2), {'kwarg1': val1}), (function2, (), {})]
- max_workers: The maximum number of threads to use.
Returns:
- results: A list of results returned by the callable functions. If an error occurs in any function,
the exception object will be placed at the corresponding index in the list.
"""
results = [None] * len(callable_functions)
def worker(
fn: Callable,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
index: int,
) -> None:
try:
result = fn(*args, **kwargs)
results[index] = result
except Exception as e:
results[index] = e
with concurrent.futures.ThreadPoolExecutor(
max_workers=max_workers
) as executor:
futures = []
for i, (fn, args, kwargs) in enumerate(callable_functions):
futures.append(
executor.submit(worker, fn, args, kwargs, i)
)
# Wait for all threads to complete
concurrent.futures.wait(futures)
return results

@ -1,80 +0,0 @@
import functools
import logging
import threading
import warnings
def retry_decorator(max_retries: int = 5):
"""
Decorator that retries a function a specified number of times if an exception occurs.
Args:
max_retries (int): The maximum number of times to retry the function.
Returns:
function: The decorated function.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
for _ in range(max_retries):
try:
return func(*args, **kwargs)
except Exception as error:
logging.error(
f" Error in {func.__name__}:"
f" {str(error)} Retrying ...."
)
return func(*args, **kwargs)
return wrapper
return decorator
def singleton_decorator(cls):
instances = {}
def wrapper(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return wrapper
def synchronized_decorator(func):
func.__lock__ = threading.Lock()
def wrapper(*args, **kwargs):
with func.__lock__:
return func(*args, **kwargs)
return wrapper
def deprecated_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(
f"{func.__name__} is deprecated",
category=DeprecationWarning,
)
return func(*args, **kwargs)
return wrapper
def validate_inputs_decorator(validator):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not validator(*args, **kwargs):
raise ValueError("Invalid Inputs")
return func(*args, **kwargs)
return wrapper
return decorator

@ -1,23 +1,33 @@
import os
import uuid
from loguru import logger
WORKSPACE_DIR = os.getenv("WORKSPACE_DIR")
def initialize_logger(log_folder: str = "logs"):
logger.add(
os.path.join(WORKSPACE_DIR, "swarms.log"),
level="INFO",
colorize=True,
backtrace=True,
diagnose=True,
)
WORKSPACE_DIR = os.getenv("WORKSPACE_DIR")
if not os.path.exists(WORKSPACE_DIR):
os.makedirs(WORKSPACE_DIR)
# Create a folder within the workspace_dir
log_folder_path = os.path.join(WORKSPACE_DIR, log_folder)
if not os.path.exists(log_folder_path):
os.makedirs(log_folder_path)
def loguru_logger(file_path: str = "swarms.log"):
return logger.add(
os.path.join(WORKSPACE_DIR, file_path),
# Generate a unique identifier for the log file
uuid_for_log = str(uuid.uuid4())
log_file_path = os.path.join(
log_folder_path, f"{log_folder}_{uuid_for_log}.log"
)
logger.add(
log_file_path,
level="INFO",
colorize=True,
backtrace=True,
diagnose=True,
enqueue=True,
retention="10 days",
compression="zip",
)
return logger

@ -1,11 +1,13 @@
import subprocess
from typing import Any, Dict, List
from loguru import logger
from swarms.utils.loguru_logger import initialize_logger
from pydantic import BaseModel
from swarms.structs.agent import Agent
logger = initialize_logger(log_folder="pandas_utils")
try:
import pandas as pd

@ -1,108 +0,0 @@
import datetime
import os
import platform
import traceback
from loguru import logger
# Remove default logger configuration
logger.remove()
# Define the path for the log folder
log_folder = os.path.join(os.getcwd(), "errors")
try:
# Create the log folder if it doesn't exist
os.makedirs(log_folder, exist_ok=True)
except PermissionError:
logger.error(f"Permission denied: '{log_folder}'")
except Exception as e:
logger.error(
f"An error occurred while creating the log folder: {e}"
)
else:
# If the folder was created successfully, add a new logger
logger.add(
os.path.join(log_folder, "error_{time}.log"),
level="ERROR",
format="<red>{time}</red> - <level>{level}</level> - <level>{message}</level>",
)
def report_error(error: Exception):
"""
Logs an error message and provides instructions for reporting the issue on Swarms GitHub
or joining the community on Discord for real-time support.
Args:
error (Exception): The exception that occurred.
Returns:
None
Raises:
None
"""
# Gather extensive context information
context_info = {
"exception_type": type(error).__name__,
"exception_message": str(error),
"stack_trace": traceback.format_exc(),
"timestamp": datetime.datetime.now().isoformat(),
"python_version": platform.python_version(),
"platform": platform.platform(),
"machine": platform.machine(),
"processor": platform.processor(),
"user": os.getenv("USER") or os.getenv("USERNAME"),
"current_working_directory": os.getcwd(),
}
error_message = (
f"\n"
f"------------------Error: {error}-----------------------\n"
f"#########################################\n"
f"# #\n"
f"# ERROR DETECTED! #\n"
f"# #\n"
f"# #\n"
f"# #\n"
f"# #\n"
f"#########################################\n"
f"\n"
f"Error Message: {context_info['exception_message']} ({context_info['exception_type']})\n"
f"\n"
f"Stack Trace:\n{context_info['stack_trace']}\n"
f"\n"
f"Context Information:\n"
f"-----------------------------------------\n"
f"Timestamp: {context_info['timestamp']}\n"
f"Python Version: {context_info['python_version']}\n"
f"Platform: {context_info['platform']}\n"
f"Machine: {context_info['machine']}\n"
f"Processor: {context_info['processor']}\n"
f"User: {context_info['user']}\n"
f"Current Working Directory: {context_info['current_working_directory']}\n"
f"-----------------------------------------\n"
f"\n"
"Support"
f"\n"
f"\n"
f"To report this issue, please visit the Swarms GitHub Issues page:\n"
f"https://github.com/kyegomez/swarms/issues\n"
f"\n"
f"You can also join the Swarms community on Discord for real-time support:\n"
f"https://discord.com/servers/agora-999382051935506503\n"
f"\n"
f"#########################################\n"
f"-----------------------------------------\n"
)
return logger.error(error_message)
# # Example usage:
# try:
# # Simulate an error
# raise ValueError("An example error")
# except Exception as e:
# report_error(e)

@ -1,9 +1,12 @@
import os
import psutil
from typing import Callable, Any
from loguru import logger
import functools
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="run_on_cpu")
def run_on_cpu(func: Callable) -> Callable:
"""

@ -1,34 +0,0 @@
from typing import Union, Dict, List
from swarms.artifacts.main_artifact import Artifact
def handle_artifact_outputs(
file_path: str,
data: Union[str, Dict, List],
output_type: str = "txt",
folder_path: str = "./artifacts",
) -> str:
"""
Handle different types of data and create files in various formats.
Args:
file_path: Path where the file should be saved
data: Input data that can be string, dict or list
output_type: Type of output file (txt, md, pdf, csv, json)
folder_path: Folder to save artifacts
Returns:
str: Path to the created file
"""
# Create artifact with appropriate file type
artifact = Artifact(
folder_path=folder_path,
file_path=file_path,
file_type=output_type,
contents=data,
edit_count=0,
)
# Save the file
# artifact.save()
artifact.save_as(output_format=output_type)

@ -1,6 +1,9 @@
from loguru import logger
from typing import List, Union, Callable, Optional
from typing import Callable, List, Optional, Union
from swarms.structs.agent import Agent
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="swarm_reliability_checks")
def reliability_check(

@ -1,13 +1,16 @@
import os
from typing import Any
from clusterops import (
execute_on_gpu,
execute_on_multiple_gpus,
execute_with_cpu_cores,
list_available_gpus,
)
from loguru import logger
from swarms.utils.loguru_logger import initialize_logger
logger = initialize_logger(log_folder="clusterops_wrapper")
def exec_callable_with_clusterops(

Loading…
Cancel
Save