adding hunter

telehack
pull/699/head
mike dupont 7 months ago
parent b35f76f029
commit e738b32259

@ -9,10 +9,10 @@ from pathlib import Path
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
from uuid import UUID, uuid4 from uuid import UUID, uuid4
import hunter
import uvicorn import uvicorn
from dotenv import load_dotenv from dotenv import load_dotenv
from fastapi import (BackgroundTasks, Depends, FastAPI, Header, HTTPException, from fastapi import BackgroundTasks, FastAPI, HTTPException, Query, status
Query, Request, status)
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
from loguru import logger from loguru import logger
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
@ -20,6 +20,36 @@ from pydantic import BaseModel, Field
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
seen = {}
kind = {}
def foo(x):
#<Event kind='call'
#function='<module>'
#module='ray.experimental.channel'
#filename='/mnt/data1/nix/time/2024/05/31/swarms/api/.venv/lib/python3.10/site-packages/ray/experimental/channel/__init__.py'
#lineno=1>
m = x.module
k = x.kind
if k not in kind:
print("KIND",x)
kind[k]=1
if "swarms" in m:
hunter.CallPrinter(x)
if m not in seen:
print("MOD",m)
seen[m]=1
else:
seen[m]=seen[m]+11
hunter.trace(
stdlib=False,
action=foo
)
#print("starting") #print("starting")
# Load environment variables # Load environment variables
load_dotenv() load_dotenv()

@ -6,4 +6,5 @@
#/mnt/data1/swarms/var/swarms/agent_workspace/.venv/bin/uvicorn \ #/mnt/data1/swarms/var/swarms/agent_workspace/.venv/bin/uvicorn \
. /mnt/data1/swarms/var/swarms/agent_workspace/.venv/bin/activate . /mnt/data1/swarms/var/swarms/agent_workspace/.venv/bin/activate
pip install hunter
/mnt/data1/swarms/var/swarms/agent_workspace/.venv/bin/python3 ~mdupont/2024/05/swarms/api/uvicorn_runner.py /mnt/data1/swarms/var/swarms/agent_workspace/.venv/bin/python3 ~mdupont/2024/05/swarms/api/uvicorn_runner.py

@ -4,12 +4,6 @@ import re
import sys import sys
import pdb import pdb
import logging import logging
for logger_name in logging.root.manager.loggerDict.keys():
print(logger_name)
override_logger = logging.getLogger(logger_name)
for handler in override_logger.handlers:
print(handler)
handler.setFormatter(formatter)
# from uvicorn.main import main # from uvicorn.main import main
# if __name__ == '__main__': # if __name__ == '__main__':
@ -27,6 +21,9 @@ for handler in override_logger.handlers:
import sys import sys
import uvicorn import uvicorn
from uvicorn.config import LOGGING_CONFIG from uvicorn.config import LOGGING_CONFIG
import hunter
hunter.trace(stdlib=False,
action=hunter.CallPrinter)
def main(): def main():
#root_path = '' #root_path = ''
@ -41,6 +38,13 @@ def main():
LOGGING_CONFIG["formatters"]["default"]["datefmt"] = date_fmt LOGGING_CONFIG["formatters"]["default"]["datefmt"] = date_fmt
LOGGING_CONFIG["formatters"]["access"]["datefmt"] = date_fmt LOGGING_CONFIG["formatters"]["access"]["datefmt"] = date_fmt
## ##
for logger_name in logging.root.manager.loggerDict.keys():
print(logger_name)
override_logger = logging.getLogger(logger_name)
for handler in override_logger.handlers:
print(handler)
handler.setFormatter(formatter)
uvicorn.run( uvicorn.run(
"main:app", "main:app",
host="127.0.0.1", host="127.0.0.1",

@ -0,0 +1,41 @@
import os
import subprocess
from swarms.utils.loguru_logger import initialize_logger
from swarms.telemetry.check_update import check_for_update
logger = initialize_logger(log_folder="auto_upgrade_swarms")
def auto_update():
"""auto update swarms"""
pass
# try:
# # Check if auto-update is disabled
# auto_update_enabled = os.getenv(
# "SWARMS_AUTOUPDATE_ON", "false"
# ).lower()
# if auto_update_enabled == "false":
# logger.info(
# "Auto-update is disabled via SWARMS_AUTOUPDATE_ON"
# )
# return
# outcome = check_for_update()
# if outcome is True:
# logger.info(
# "There is a new version of swarms available! Downloading..."
# )
# try:
# subprocess.run(
# ["pip", "install", "-U", "swarms"], check=True
# )
# except subprocess.CalledProcessError:
# logger.info("Attempting to install with pip3...")
# subprocess.run(
# ["pip3", "install", "-U", "swarms"], check=True
# )
# else:
# logger.info("swarms is up to date!")
# except Exception as e:
# logger.error(e)

@ -8,8 +8,8 @@ from swarms.utils.disable_logging import disable_logging
def bootup(): def bootup():
"""Bootup swarms""" """Bootup swarms"""
try: try:
logging.disable(logging.CRITICAL) #logging.disable(logging.CRITICAL)
os.environ["WANDB_SILENT"] = "true" #os.environ["WANDB_SILENT"] = "true"
# Auto set workspace directory # Auto set workspace directory
workspace_dir = os.path.join(os.getcwd(), "agent_workspace") workspace_dir = os.path.join(os.getcwd(), "agent_workspace")
@ -17,7 +17,7 @@ def bootup():
os.makedirs(workspace_dir, exist_ok=True) os.makedirs(workspace_dir, exist_ok=True)
os.environ["WORKSPACE_DIR"] = workspace_dir os.environ["WORKSPACE_DIR"] = workspace_dir
warnings.filterwarnings("ignore", category=DeprecationWarning) #warnings.filterwarnings("ignore", category=DeprecationWarning)
# Use ThreadPoolExecutor to run disable_logging and auto_update concurrently # Use ThreadPoolExecutor to run disable_logging and auto_update concurrently
with ThreadPoolExecutor(max_workers=1) as executor: with ThreadPoolExecutor(max_workers=1) as executor:

@ -67,13 +67,32 @@ def log_agent_data(data_dict: dict) -> dict | None:
logger.error("Empty data dictionary provided") logger.error("Empty data dictionary provided")
raise ValueError("data_dict cannot be empty") raise ValueError("data_dict cannot be empty")
url = "https://swarms.world/api/get-agents/log-agents" # url = "https://swarms.world/api/get-agents/log-agents"
headers = { # headers = {
"Content-Type": "application/json", # "Content-Type": "application/json",
"Authorization": "Bearer sk-f24a13ed139f757d99cdd9cdcae710fccead92681606a97086d9711f69d44869", # "Authorization": "Bearer sk-f24a13ed139f757d99cdd9cdcae710fccead92681606a97086d9711f69d44869",
} # }
requests.post(url, json=data_dict, headers=headers, timeout=10) print(data_dict)
# response.raise_for_status() #try:
# response = requests.post(
# url, json=data_dict, headers=headers, timeout=10
# )
# response.raise_for_status()
return None # result = response.json()
# return result
# except requests.exceptions.Timeout:
# logger.warning("Request timed out")
# except requests.exceptions.HTTPError as e:
# logger.error(f"HTTP error occurred: {e}")
# if response.status_code == 401:
# logger.error("Authentication failed - check API key")
# except requests.exceptions.RequestException as e:
# logger.error(f"Error logging agent data: {e}")
#logger.error("Failed to log agent data")
return {}

@ -10,14 +10,30 @@ def activate_sentry_async():
use_telementry = os.getenv("USE_TELEMETRY") use_telementry = os.getenv("USE_TELEMETRY")
if use_telementry == "True": if use_telementry == "True":
#sentry_sdk.init(
# #dsn="https://5d72dd59551c02f78391d2ea5872ddd4@o4504578305490944.ingest.us.sentry.io/4506951704444928",
#)
sentry_sdk.init( sentry_sdk.init(
dsn="https://5d72dd59551c02f78391d2ea5872ddd4@o4504578305490944.ingest.us.sentry.io/4506951704444928", dsn="https://4fd91d75ad5635da55cdd3069e8fdd97@o4508452173840384.ingest.de.sentry.io/4508452178493520",
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for tracing.
traces_sample_rate=1.0, traces_sample_rate=1.0,
#traces_sample_rate=1.0,
profiles_sample_rate=1.0, profiles_sample_rate=1.0,
enable_tracing=True, enable_tracing=True,
debug=False, # Set debug to False debug=True, # Set debug to False
_experiments={
# Set continuous_profiling_auto_start to True
# to automatically start the profiler on when
# possible.
"continuous_profiling_auto_start": True,
},
) )
#asgi_app = SentryAsgiMiddleware(asgi_app)
def activate_sentry(): def activate_sentry():
t = threading.Thread(target=activate_sentry_async) t = threading.Thread(target=activate_sentry_async)

@ -6,81 +6,90 @@ from threading import Thread
def disable_langchain(): def disable_langchain():
""" pass
Disables the LangChain deprecation warning. # """
""" # Disables the LangChain deprecation warning.
from langchain_core._api.deprecation import ( # """
LangChainDeprecationWarning, # from langchain_core._api.deprecation import (
) # LangChainDeprecationWarning,
# )
# Ignore LangChainDeprecationWarning # # Ignore LangChainDeprecationWarning
warnings.filterwarnings( # warnings.filterwarnings(
"ignore", category=LangChainDeprecationWarning # "ignore", category=LangChainDeprecationWarning
) # )
def disable_logging(): def disable_logging():
""" pass
Disables logging for specific modules and sets up file and stream handlers. for logger_name in logging.root.manager.loggerDict.keys():
Runs in a separate thread to avoid blocking the main thread. print("LOGGER",logger_name)
""" override_logger = logging.getLogger(logger_name)
os.environ["WORKSPACE_DIR"] = "agent_workspace" for handler in override_logger.handlers:
print(handler)
warnings.filterwarnings("ignore", category=UserWarning) handler.setFormatter(formatter)
# disable tensorflow warnings # """
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # Disables logging for specific modules and sets up file and stream handlers.
# Runs in a separate thread to avoid blocking the main thread.
# Set the logging level for the entire module # """
logging.basicConfig(level=logging.ERROR) # os.environ["WORKSPACE_DIR"] = "agent_workspace"
try: # warnings.filterwarnings("ignore", category=UserWarning)
log = logging.getLogger("pytorch")
log.propagate = False # # disable tensorflow warnings
log.setLevel(logging.ERROR) # os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
except Exception as error:
print(f"Pytorch logging not disabled: {error}") # # Set the logging level for the entire module
# logging.basicConfig(level=logging.ERROR)
logger_names = [
"tensorflow", # try:
"h5py", # log = logging.getLogger("pytorch")
"numexpr", # log.propagate = False
"git", # log.setLevel(logging.ERROR)
"wandb.docker.auth", # except Exception as error:
"langchain", # print(f"Pytorch logging not disabled: {error}")
"distutils",
"urllib3", # logger_names = [
"elasticsearch", # "tensorflow",
"packaging", # "h5py",
] # "numexpr",
# "git",
# Use concurrent futures to set the level for each logger concurrently # "wandb.docker.auth",
with concurrent.futures.ThreadPoolExecutor() as executor: # "langchain",
executor.map(set_logger_level, logger_names) # "distutils",
# "urllib3",
# Remove all existing handlers # "elasticsearch",
logging.getLogger().handlers = [] # "packaging",
# ]
# Get the workspace directory from the environment variables
workspace_dir = os.environ["WORKSPACE_DIR"] # # Use concurrent futures to set the level for each logger concurrently
# with concurrent.futures.ThreadPoolExecutor() as executor:
# Check if the workspace directory exists, if not, create it # executor.map(set_logger_level, logger_names)
if not os.path.exists(workspace_dir):
os.makedirs(workspace_dir) # # Remove all existing handlers
# logging.getLogger().handlers = []
# Create a file handler to log errors to the file
file_handler = logging.FileHandler( # # Get the workspace directory from the environment variables
os.path.join(workspace_dir, "error.txt") # workspace_dir = os.environ["WORKSPACE_DIR"]
)
file_handler.setLevel(logging.ERROR) # # Check if the workspace directory exists, if not, create it
logging.getLogger().addHandler(file_handler) # if not os.path.exists(workspace_dir):
# os.makedirs(workspace_dir)
# Create a stream handler to log errors to the terminal
stream_handler = logging.StreamHandler() # # Create a file handler to log errors to the file
stream_handler.setLevel(logging.ERROR) # file_handler = logging.FileHandler(
logging.getLogger().addHandler(stream_handler) # os.path.join(workspace_dir, "error.txt")
# )
disable_langchain() # file_handler.setLevel(logging.ERROR)
# logging.getLogger().addHandler(file_handler)
# # Create a stream handler to log errors to the terminal
# stream_handler = logging.StreamHandler()
# stream_handler.setLevel(logging.ERROR)
# logging.getLogger().addHandler(stream_handler)
# disable_langchain()
def set_logger_level(logger_name: str) -> None: def set_logger_level(logger_name: str) -> None:
@ -91,7 +100,7 @@ def set_logger_level(logger_name: str) -> None:
logger_name (str): The name of the logger to modify. logger_name (str): The name of the logger to modify.
""" """
logger = logging.getLogger(logger_name) logger = logging.getLogger(logger_name)
logger.setLevel(logging.CRITICAL) logger.setLevel(logging.TRACE)
def start_disable_logging_in_thread(): def start_disable_logging_in_thread():

@ -21,12 +21,13 @@ def initialize_logger(log_folder: str = "logs"):
# Generate a unique identifier for the log file # Generate a unique identifier for the log file
uuid_for_log = str(uuid.uuid4()) uuid_for_log = str(uuid.uuid4())
log_file_path = os.path.join( log_file_path = os.path.join(
log_folder_path, f"{log_folder}_{uuid_for_log}.log" #log_folder_path, f"{log_folder}_{uuid_for_log}.log"
log_folder_path, f"{log_folder}.log"
) )
logger.add( logger.add(
log_file_path, log_file_path,
level="INFO", level="TRACE",
colorize=True, colorize=True,
backtrace=True, backtrace=True,
diagnose=True, diagnose=True,

Loading…
Cancel
Save