From e738b32259fc03659afcd6ec2ebfe92fb5d7eb1c Mon Sep 17 00:00:00 2001 From: mike dupont Date: Wed, 11 Dec 2024 18:18:17 -0500 Subject: [PATCH] adding hunter telehack --- api/main.py | 34 +++++- api/test2.sh | 1 + api/uvicorn_runner.py | 16 ++- swarms/telemetry/auto_upgrade_swarms.py | 41 +++++++ swarms/telemetry/bootup.py | 6 +- swarms/telemetry/capture_sys_data.py | 35 ++++-- swarms/telemetry/sentry_active.py | 20 +++- swarms/utils/disable_logging.py | 153 +++++++++++++----------- swarms/utils/loguru_logger.py | 5 +- 9 files changed, 216 insertions(+), 95 deletions(-) create mode 100644 swarms/telemetry/auto_upgrade_swarms.py diff --git a/api/main.py b/api/main.py index fa61e475..e71a150e 100644 --- a/api/main.py +++ b/api/main.py @@ -9,10 +9,10 @@ from pathlib import Path from typing import Any, Dict, List, Optional from uuid import UUID, uuid4 +import hunter import uvicorn from dotenv import load_dotenv -from fastapi import (BackgroundTasks, Depends, FastAPI, Header, HTTPException, - Query, Request, status) +from fastapi import BackgroundTasks, FastAPI, HTTPException, Query, status from fastapi.middleware.cors import CORSMiddleware from loguru import logger from pydantic import BaseModel, Field @@ -20,6 +20,36 @@ from pydantic import BaseModel, Field from swarms.structs.agent import Agent +seen = {} +kind = {} +def foo(x): + # + m = x.module + k = x.kind + + if k not in kind: + print("KIND",x) + kind[k]=1 + + if "swarms" in m: + hunter.CallPrinter(x) + + if m not in seen: + + print("MOD",m) + seen[m]=1 + else: + seen[m]=seen[m]+11 + +hunter.trace( + stdlib=False, + action=foo +) + #print("starting") # Load environment variables load_dotenv() diff --git a/api/test2.sh b/api/test2.sh index 2435b5ee..adadf7f6 100644 --- a/api/test2.sh +++ b/api/test2.sh @@ -6,4 +6,5 @@ #/mnt/data1/swarms/var/swarms/agent_workspace/.venv/bin/uvicorn \ . /mnt/data1/swarms/var/swarms/agent_workspace/.venv/bin/activate +pip install hunter /mnt/data1/swarms/var/swarms/agent_workspace/.venv/bin/python3 ~mdupont/2024/05/swarms/api/uvicorn_runner.py diff --git a/api/uvicorn_runner.py b/api/uvicorn_runner.py index e3f61c06..fff31c0e 100644 --- a/api/uvicorn_runner.py +++ b/api/uvicorn_runner.py @@ -4,12 +4,6 @@ import re import sys import pdb import logging -for logger_name in logging.root.manager.loggerDict.keys(): - print(logger_name) - override_logger = logging.getLogger(logger_name) -for handler in override_logger.handlers: - print(handler) - handler.setFormatter(formatter) # from uvicorn.main import main # if __name__ == '__main__': @@ -27,6 +21,9 @@ for handler in override_logger.handlers: import sys import uvicorn from uvicorn.config import LOGGING_CONFIG +import hunter +hunter.trace(stdlib=False, + action=hunter.CallPrinter) def main(): #root_path = '' @@ -41,6 +38,13 @@ def main(): LOGGING_CONFIG["formatters"]["default"]["datefmt"] = date_fmt LOGGING_CONFIG["formatters"]["access"]["datefmt"] = date_fmt ## + for logger_name in logging.root.manager.loggerDict.keys(): + print(logger_name) + override_logger = logging.getLogger(logger_name) + for handler in override_logger.handlers: + print(handler) + handler.setFormatter(formatter) + uvicorn.run( "main:app", host="127.0.0.1", diff --git a/swarms/telemetry/auto_upgrade_swarms.py b/swarms/telemetry/auto_upgrade_swarms.py new file mode 100644 index 00000000..b5ea24d1 --- /dev/null +++ b/swarms/telemetry/auto_upgrade_swarms.py @@ -0,0 +1,41 @@ +import os +import subprocess + +from swarms.utils.loguru_logger import initialize_logger +from swarms.telemetry.check_update import check_for_update + +logger = initialize_logger(log_folder="auto_upgrade_swarms") + + +def auto_update(): + """auto update swarms""" + pass + # try: + # # Check if auto-update is disabled + # auto_update_enabled = os.getenv( + # "SWARMS_AUTOUPDATE_ON", "false" + # ).lower() + # if auto_update_enabled == "false": + # logger.info( + # "Auto-update is disabled via SWARMS_AUTOUPDATE_ON" + # ) + # return + + # outcome = check_for_update() + # if outcome is True: + # logger.info( + # "There is a new version of swarms available! Downloading..." + # ) + # try: + # subprocess.run( + # ["pip", "install", "-U", "swarms"], check=True + # ) + # except subprocess.CalledProcessError: + # logger.info("Attempting to install with pip3...") + # subprocess.run( + # ["pip3", "install", "-U", "swarms"], check=True + # ) + # else: + # logger.info("swarms is up to date!") + # except Exception as e: + # logger.error(e) diff --git a/swarms/telemetry/bootup.py b/swarms/telemetry/bootup.py index 5e38c3ea..628a8d84 100644 --- a/swarms/telemetry/bootup.py +++ b/swarms/telemetry/bootup.py @@ -8,8 +8,8 @@ from swarms.utils.disable_logging import disable_logging def bootup(): """Bootup swarms""" try: - logging.disable(logging.CRITICAL) - os.environ["WANDB_SILENT"] = "true" + #logging.disable(logging.CRITICAL) + #os.environ["WANDB_SILENT"] = "true" # Auto set workspace directory workspace_dir = os.path.join(os.getcwd(), "agent_workspace") @@ -17,7 +17,7 @@ def bootup(): os.makedirs(workspace_dir, exist_ok=True) os.environ["WORKSPACE_DIR"] = workspace_dir - warnings.filterwarnings("ignore", category=DeprecationWarning) + #warnings.filterwarnings("ignore", category=DeprecationWarning) # Use ThreadPoolExecutor to run disable_logging and auto_update concurrently with ThreadPoolExecutor(max_workers=1) as executor: diff --git a/swarms/telemetry/capture_sys_data.py b/swarms/telemetry/capture_sys_data.py index 9ef52976..d1f504a2 100644 --- a/swarms/telemetry/capture_sys_data.py +++ b/swarms/telemetry/capture_sys_data.py @@ -67,13 +67,32 @@ def log_agent_data(data_dict: dict) -> dict | None: logger.error("Empty data dictionary provided") raise ValueError("data_dict cannot be empty") - url = "https://swarms.world/api/get-agents/log-agents" - headers = { - "Content-Type": "application/json", - "Authorization": "Bearer sk-f24a13ed139f757d99cdd9cdcae710fccead92681606a97086d9711f69d44869", - } +# url = "https://swarms.world/api/get-agents/log-agents" +# headers = { +# "Content-Type": "application/json", +# "Authorization": "Bearer sk-f24a13ed139f757d99cdd9cdcae710fccead92681606a97086d9711f69d44869", +# } - requests.post(url, json=data_dict, headers=headers, timeout=10) - # response.raise_for_status() + print(data_dict) + #try: + # response = requests.post( + # url, json=data_dict, headers=headers, timeout=10 + # ) + # response.raise_for_status() - return None + # result = response.json() + # return result + + # except requests.exceptions.Timeout: + # logger.warning("Request timed out") + + # except requests.exceptions.HTTPError as e: + # logger.error(f"HTTP error occurred: {e}") + # if response.status_code == 401: + # logger.error("Authentication failed - check API key") + + # except requests.exceptions.RequestException as e: + # logger.error(f"Error logging agent data: {e}") + + #logger.error("Failed to log agent data") + return {} diff --git a/swarms/telemetry/sentry_active.py b/swarms/telemetry/sentry_active.py index e83fa7ec..f6a60fff 100644 --- a/swarms/telemetry/sentry_active.py +++ b/swarms/telemetry/sentry_active.py @@ -10,14 +10,30 @@ def activate_sentry_async(): use_telementry = os.getenv("USE_TELEMETRY") if use_telementry == "True": + #sentry_sdk.init( + # #dsn="https://5d72dd59551c02f78391d2ea5872ddd4@o4504578305490944.ingest.us.sentry.io/4506951704444928", + #) sentry_sdk.init( - dsn="https://5d72dd59551c02f78391d2ea5872ddd4@o4504578305490944.ingest.us.sentry.io/4506951704444928", + dsn="https://4fd91d75ad5635da55cdd3069e8fdd97@o4508452173840384.ingest.de.sentry.io/4508452178493520", + # Set traces_sample_rate to 1.0 to capture 100% + # of transactions for tracing. traces_sample_rate=1.0, + #traces_sample_rate=1.0, profiles_sample_rate=1.0, enable_tracing=True, - debug=False, # Set debug to False + debug=True, # Set debug to False + + _experiments={ + # Set continuous_profiling_auto_start to True + # to automatically start the profiler on when + # possible. + "continuous_profiling_auto_start": True, + }, ) +#asgi_app = SentryAsgiMiddleware(asgi_app) + + def activate_sentry(): t = threading.Thread(target=activate_sentry_async) diff --git a/swarms/utils/disable_logging.py b/swarms/utils/disable_logging.py index c2cc4ab5..986f748f 100644 --- a/swarms/utils/disable_logging.py +++ b/swarms/utils/disable_logging.py @@ -6,81 +6,90 @@ from threading import Thread def disable_langchain(): - """ - Disables the LangChain deprecation warning. - """ - from langchain_core._api.deprecation import ( - LangChainDeprecationWarning, - ) + pass + # """ + # Disables the LangChain deprecation warning. + # """ + # from langchain_core._api.deprecation import ( + # LangChainDeprecationWarning, + # ) - # Ignore LangChainDeprecationWarning - warnings.filterwarnings( - "ignore", category=LangChainDeprecationWarning - ) + # # Ignore LangChainDeprecationWarning + # warnings.filterwarnings( + # "ignore", category=LangChainDeprecationWarning + # ) def disable_logging(): - """ - Disables logging for specific modules and sets up file and stream handlers. - Runs in a separate thread to avoid blocking the main thread. - """ - os.environ["WORKSPACE_DIR"] = "agent_workspace" - - warnings.filterwarnings("ignore", category=UserWarning) - - # disable tensorflow warnings - os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" - - # Set the logging level for the entire module - logging.basicConfig(level=logging.ERROR) - - try: - log = logging.getLogger("pytorch") - log.propagate = False - log.setLevel(logging.ERROR) - except Exception as error: - print(f"Pytorch logging not disabled: {error}") - - logger_names = [ - "tensorflow", - "h5py", - "numexpr", - "git", - "wandb.docker.auth", - "langchain", - "distutils", - "urllib3", - "elasticsearch", - "packaging", - ] - - # Use concurrent futures to set the level for each logger concurrently - with concurrent.futures.ThreadPoolExecutor() as executor: - executor.map(set_logger_level, logger_names) - - # Remove all existing handlers - logging.getLogger().handlers = [] - - # Get the workspace directory from the environment variables - workspace_dir = os.environ["WORKSPACE_DIR"] - - # Check if the workspace directory exists, if not, create it - if not os.path.exists(workspace_dir): - os.makedirs(workspace_dir) - - # Create a file handler to log errors to the file - file_handler = logging.FileHandler( - os.path.join(workspace_dir, "error.txt") - ) - file_handler.setLevel(logging.ERROR) - logging.getLogger().addHandler(file_handler) - - # Create a stream handler to log errors to the terminal - stream_handler = logging.StreamHandler() - stream_handler.setLevel(logging.ERROR) - logging.getLogger().addHandler(stream_handler) - - disable_langchain() + pass + for logger_name in logging.root.manager.loggerDict.keys(): + print("LOGGER",logger_name) + override_logger = logging.getLogger(logger_name) + for handler in override_logger.handlers: + print(handler) + handler.setFormatter(formatter) + + # """ + # Disables logging for specific modules and sets up file and stream handlers. + # Runs in a separate thread to avoid blocking the main thread. + # """ + # os.environ["WORKSPACE_DIR"] = "agent_workspace" + + # warnings.filterwarnings("ignore", category=UserWarning) + + # # disable tensorflow warnings + # os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + + # # Set the logging level for the entire module + # logging.basicConfig(level=logging.ERROR) + + # try: + # log = logging.getLogger("pytorch") + # log.propagate = False + # log.setLevel(logging.ERROR) + # except Exception as error: + # print(f"Pytorch logging not disabled: {error}") + + # logger_names = [ + # "tensorflow", + # "h5py", + # "numexpr", + # "git", + # "wandb.docker.auth", + # "langchain", + # "distutils", + # "urllib3", + # "elasticsearch", + # "packaging", + # ] + + # # Use concurrent futures to set the level for each logger concurrently + # with concurrent.futures.ThreadPoolExecutor() as executor: + # executor.map(set_logger_level, logger_names) + + # # Remove all existing handlers + # logging.getLogger().handlers = [] + + # # Get the workspace directory from the environment variables + # workspace_dir = os.environ["WORKSPACE_DIR"] + + # # Check if the workspace directory exists, if not, create it + # if not os.path.exists(workspace_dir): + # os.makedirs(workspace_dir) + + # # Create a file handler to log errors to the file + # file_handler = logging.FileHandler( + # os.path.join(workspace_dir, "error.txt") + # ) + # file_handler.setLevel(logging.ERROR) + # logging.getLogger().addHandler(file_handler) + + # # Create a stream handler to log errors to the terminal + # stream_handler = logging.StreamHandler() + # stream_handler.setLevel(logging.ERROR) + # logging.getLogger().addHandler(stream_handler) + + # disable_langchain() def set_logger_level(logger_name: str) -> None: @@ -91,7 +100,7 @@ def set_logger_level(logger_name: str) -> None: logger_name (str): The name of the logger to modify. """ logger = logging.getLogger(logger_name) - logger.setLevel(logging.CRITICAL) + logger.setLevel(logging.TRACE) def start_disable_logging_in_thread(): diff --git a/swarms/utils/loguru_logger.py b/swarms/utils/loguru_logger.py index af5c7239..e36a1933 100644 --- a/swarms/utils/loguru_logger.py +++ b/swarms/utils/loguru_logger.py @@ -21,12 +21,13 @@ def initialize_logger(log_folder: str = "logs"): # Generate a unique identifier for the log file uuid_for_log = str(uuid.uuid4()) log_file_path = os.path.join( - log_folder_path, f"{log_folder}_{uuid_for_log}.log" + #log_folder_path, f"{log_folder}_{uuid_for_log}.log" + log_folder_path, f"{log_folder}.log" ) logger.add( log_file_path, - level="INFO", + level="TRACE", colorize=True, backtrace=True, diagnose=True,