adding hunter

telehack
pull/699/head
mike dupont 7 months ago
parent b35f76f029
commit e738b32259

@ -9,10 +9,10 @@ from pathlib import Path
from typing import Any, Dict, List, Optional
from uuid import UUID, uuid4
import hunter
import uvicorn
from dotenv import load_dotenv
from fastapi import (BackgroundTasks, Depends, FastAPI, Header, HTTPException,
Query, Request, status)
from fastapi import BackgroundTasks, FastAPI, HTTPException, Query, status
from fastapi.middleware.cors import CORSMiddleware
from loguru import logger
from pydantic import BaseModel, Field
@ -20,6 +20,36 @@ from pydantic import BaseModel, Field
from swarms.structs.agent import Agent
seen = {}
kind = {}
def foo(x):
#<Event kind='call'
#function='<module>'
#module='ray.experimental.channel'
#filename='/mnt/data1/nix/time/2024/05/31/swarms/api/.venv/lib/python3.10/site-packages/ray/experimental/channel/__init__.py'
#lineno=1>
m = x.module
k = x.kind
if k not in kind:
print("KIND",x)
kind[k]=1
if "swarms" in m:
hunter.CallPrinter(x)
if m not in seen:
print("MOD",m)
seen[m]=1
else:
seen[m]=seen[m]+11
hunter.trace(
stdlib=False,
action=foo
)
#print("starting")
# Load environment variables
load_dotenv()

@ -6,4 +6,5 @@
#/mnt/data1/swarms/var/swarms/agent_workspace/.venv/bin/uvicorn \
. /mnt/data1/swarms/var/swarms/agent_workspace/.venv/bin/activate
pip install hunter
/mnt/data1/swarms/var/swarms/agent_workspace/.venv/bin/python3 ~mdupont/2024/05/swarms/api/uvicorn_runner.py

@ -4,12 +4,6 @@ import re
import sys
import pdb
import logging
for logger_name in logging.root.manager.loggerDict.keys():
print(logger_name)
override_logger = logging.getLogger(logger_name)
for handler in override_logger.handlers:
print(handler)
handler.setFormatter(formatter)
# from uvicorn.main import main
# if __name__ == '__main__':
@ -27,6 +21,9 @@ for handler in override_logger.handlers:
import sys
import uvicorn
from uvicorn.config import LOGGING_CONFIG
import hunter
hunter.trace(stdlib=False,
action=hunter.CallPrinter)
def main():
#root_path = ''
@ -41,6 +38,13 @@ def main():
LOGGING_CONFIG["formatters"]["default"]["datefmt"] = date_fmt
LOGGING_CONFIG["formatters"]["access"]["datefmt"] = date_fmt
##
for logger_name in logging.root.manager.loggerDict.keys():
print(logger_name)
override_logger = logging.getLogger(logger_name)
for handler in override_logger.handlers:
print(handler)
handler.setFormatter(formatter)
uvicorn.run(
"main:app",
host="127.0.0.1",

@ -0,0 +1,41 @@
import os
import subprocess
from swarms.utils.loguru_logger import initialize_logger
from swarms.telemetry.check_update import check_for_update
logger = initialize_logger(log_folder="auto_upgrade_swarms")
def auto_update():
"""auto update swarms"""
pass
# try:
# # Check if auto-update is disabled
# auto_update_enabled = os.getenv(
# "SWARMS_AUTOUPDATE_ON", "false"
# ).lower()
# if auto_update_enabled == "false":
# logger.info(
# "Auto-update is disabled via SWARMS_AUTOUPDATE_ON"
# )
# return
# outcome = check_for_update()
# if outcome is True:
# logger.info(
# "There is a new version of swarms available! Downloading..."
# )
# try:
# subprocess.run(
# ["pip", "install", "-U", "swarms"], check=True
# )
# except subprocess.CalledProcessError:
# logger.info("Attempting to install with pip3...")
# subprocess.run(
# ["pip3", "install", "-U", "swarms"], check=True
# )
# else:
# logger.info("swarms is up to date!")
# except Exception as e:
# logger.error(e)

@ -8,8 +8,8 @@ from swarms.utils.disable_logging import disable_logging
def bootup():
"""Bootup swarms"""
try:
logging.disable(logging.CRITICAL)
os.environ["WANDB_SILENT"] = "true"
#logging.disable(logging.CRITICAL)
#os.environ["WANDB_SILENT"] = "true"
# Auto set workspace directory
workspace_dir = os.path.join(os.getcwd(), "agent_workspace")
@ -17,7 +17,7 @@ def bootup():
os.makedirs(workspace_dir, exist_ok=True)
os.environ["WORKSPACE_DIR"] = workspace_dir
warnings.filterwarnings("ignore", category=DeprecationWarning)
#warnings.filterwarnings("ignore", category=DeprecationWarning)
# Use ThreadPoolExecutor to run disable_logging and auto_update concurrently
with ThreadPoolExecutor(max_workers=1) as executor:

@ -67,13 +67,32 @@ def log_agent_data(data_dict: dict) -> dict | None:
logger.error("Empty data dictionary provided")
raise ValueError("data_dict cannot be empty")
url = "https://swarms.world/api/get-agents/log-agents"
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer sk-f24a13ed139f757d99cdd9cdcae710fccead92681606a97086d9711f69d44869",
}
# url = "https://swarms.world/api/get-agents/log-agents"
# headers = {
# "Content-Type": "application/json",
# "Authorization": "Bearer sk-f24a13ed139f757d99cdd9cdcae710fccead92681606a97086d9711f69d44869",
# }
requests.post(url, json=data_dict, headers=headers, timeout=10)
print(data_dict)
#try:
# response = requests.post(
# url, json=data_dict, headers=headers, timeout=10
# )
# response.raise_for_status()
return None
# result = response.json()
# return result
# except requests.exceptions.Timeout:
# logger.warning("Request timed out")
# except requests.exceptions.HTTPError as e:
# logger.error(f"HTTP error occurred: {e}")
# if response.status_code == 401:
# logger.error("Authentication failed - check API key")
# except requests.exceptions.RequestException as e:
# logger.error(f"Error logging agent data: {e}")
#logger.error("Failed to log agent data")
return {}

@ -10,14 +10,30 @@ def activate_sentry_async():
use_telementry = os.getenv("USE_TELEMETRY")
if use_telementry == "True":
#sentry_sdk.init(
# #dsn="https://5d72dd59551c02f78391d2ea5872ddd4@o4504578305490944.ingest.us.sentry.io/4506951704444928",
#)
sentry_sdk.init(
dsn="https://5d72dd59551c02f78391d2ea5872ddd4@o4504578305490944.ingest.us.sentry.io/4506951704444928",
dsn="https://4fd91d75ad5635da55cdd3069e8fdd97@o4508452173840384.ingest.de.sentry.io/4508452178493520",
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for tracing.
traces_sample_rate=1.0,
#traces_sample_rate=1.0,
profiles_sample_rate=1.0,
enable_tracing=True,
debug=False, # Set debug to False
debug=True, # Set debug to False
_experiments={
# Set continuous_profiling_auto_start to True
# to automatically start the profiler on when
# possible.
"continuous_profiling_auto_start": True,
},
)
#asgi_app = SentryAsgiMiddleware(asgi_app)
def activate_sentry():
t = threading.Thread(target=activate_sentry_async)

@ -6,81 +6,90 @@ from threading import Thread
def disable_langchain():
"""
Disables the LangChain deprecation warning.
"""
from langchain_core._api.deprecation import (
LangChainDeprecationWarning,
)
pass
# """
# Disables the LangChain deprecation warning.
# """
# from langchain_core._api.deprecation import (
# LangChainDeprecationWarning,
# )
# Ignore LangChainDeprecationWarning
warnings.filterwarnings(
"ignore", category=LangChainDeprecationWarning
)
# # Ignore LangChainDeprecationWarning
# warnings.filterwarnings(
# "ignore", category=LangChainDeprecationWarning
# )
def disable_logging():
"""
Disables logging for specific modules and sets up file and stream handlers.
Runs in a separate thread to avoid blocking the main thread.
"""
os.environ["WORKSPACE_DIR"] = "agent_workspace"
warnings.filterwarnings("ignore", category=UserWarning)
# disable tensorflow warnings
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Set the logging level for the entire module
logging.basicConfig(level=logging.ERROR)
try:
log = logging.getLogger("pytorch")
log.propagate = False
log.setLevel(logging.ERROR)
except Exception as error:
print(f"Pytorch logging not disabled: {error}")
logger_names = [
"tensorflow",
"h5py",
"numexpr",
"git",
"wandb.docker.auth",
"langchain",
"distutils",
"urllib3",
"elasticsearch",
"packaging",
]
# Use concurrent futures to set the level for each logger concurrently
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(set_logger_level, logger_names)
# Remove all existing handlers
logging.getLogger().handlers = []
# Get the workspace directory from the environment variables
workspace_dir = os.environ["WORKSPACE_DIR"]
# Check if the workspace directory exists, if not, create it
if not os.path.exists(workspace_dir):
os.makedirs(workspace_dir)
# Create a file handler to log errors to the file
file_handler = logging.FileHandler(
os.path.join(workspace_dir, "error.txt")
)
file_handler.setLevel(logging.ERROR)
logging.getLogger().addHandler(file_handler)
# Create a stream handler to log errors to the terminal
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.ERROR)
logging.getLogger().addHandler(stream_handler)
disable_langchain()
pass
for logger_name in logging.root.manager.loggerDict.keys():
print("LOGGER",logger_name)
override_logger = logging.getLogger(logger_name)
for handler in override_logger.handlers:
print(handler)
handler.setFormatter(formatter)
# """
# Disables logging for specific modules and sets up file and stream handlers.
# Runs in a separate thread to avoid blocking the main thread.
# """
# os.environ["WORKSPACE_DIR"] = "agent_workspace"
# warnings.filterwarnings("ignore", category=UserWarning)
# # disable tensorflow warnings
# os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# # Set the logging level for the entire module
# logging.basicConfig(level=logging.ERROR)
# try:
# log = logging.getLogger("pytorch")
# log.propagate = False
# log.setLevel(logging.ERROR)
# except Exception as error:
# print(f"Pytorch logging not disabled: {error}")
# logger_names = [
# "tensorflow",
# "h5py",
# "numexpr",
# "git",
# "wandb.docker.auth",
# "langchain",
# "distutils",
# "urllib3",
# "elasticsearch",
# "packaging",
# ]
# # Use concurrent futures to set the level for each logger concurrently
# with concurrent.futures.ThreadPoolExecutor() as executor:
# executor.map(set_logger_level, logger_names)
# # Remove all existing handlers
# logging.getLogger().handlers = []
# # Get the workspace directory from the environment variables
# workspace_dir = os.environ["WORKSPACE_DIR"]
# # Check if the workspace directory exists, if not, create it
# if not os.path.exists(workspace_dir):
# os.makedirs(workspace_dir)
# # Create a file handler to log errors to the file
# file_handler = logging.FileHandler(
# os.path.join(workspace_dir, "error.txt")
# )
# file_handler.setLevel(logging.ERROR)
# logging.getLogger().addHandler(file_handler)
# # Create a stream handler to log errors to the terminal
# stream_handler = logging.StreamHandler()
# stream_handler.setLevel(logging.ERROR)
# logging.getLogger().addHandler(stream_handler)
# disable_langchain()
def set_logger_level(logger_name: str) -> None:
@ -91,7 +100,7 @@ def set_logger_level(logger_name: str) -> None:
logger_name (str): The name of the logger to modify.
"""
logger = logging.getLogger(logger_name)
logger.setLevel(logging.CRITICAL)
logger.setLevel(logging.TRACE)
def start_disable_logging_in_thread():

@ -21,12 +21,13 @@ def initialize_logger(log_folder: str = "logs"):
# Generate a unique identifier for the log file
uuid_for_log = str(uuid.uuid4())
log_file_path = os.path.join(
log_folder_path, f"{log_folder}_{uuid_for_log}.log"
#log_folder_path, f"{log_folder}_{uuid_for_log}.log"
log_folder_path, f"{log_folder}.log"
)
logger.add(
log_file_path,
level="INFO",
level="TRACE",
colorize=True,
backtrace=True,
diagnose=True,

Loading…
Cancel
Save