diff --git a/swarms/__init__.py b/swarms/__init__.py index d876c04e..9ceb78f2 100644 --- a/swarms/__init__.py +++ b/swarms/__init__.py @@ -1,18 +1,6 @@ -import logging -import os -import warnings +from swarms.utils.disable_logging import disable_logging -warnings.filterwarnings("ignore", category=UserWarning) - -# disable tensorflow warnings -os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" - -try: - log = logging.getLogger("pytorch") - log.propagate = False - log.setLevel(logging.ERROR) -except Exception as error: - print(f"Pytorch logging not disabled: {error}") +disable_logging() from swarms.agents import * # noqa: E402, F403 from swarms.swarms import * # noqa: E402, F403 diff --git a/swarms/models/base_multimodal_model.py b/swarms/models/base_multimodal_model.py index 54eed0ed..9f451be0 100644 --- a/swarms/models/base_multimodal_model.py +++ b/swarms/models/base_multimodal_model.py @@ -20,6 +20,7 @@ class BaseMultiModalModel: max_workers: Optional[int] = 10, top_p: Optional[int] = 1, top_k: Optional[int] = 50, + beautify: Optional[bool] = False, device: Optional[str] = "cuda", max_new_tokens: Optional[int] = 500, retries: Optional[int] = 3, @@ -30,6 +31,7 @@ class BaseMultiModalModel: self.max_workers = max_workers self.top_p = top_p self.top_k = top_k + self.beautify = beautify self.device = device self.max_new_tokens = max_new_tokens self.retries = retries @@ -206,4 +208,8 @@ class BaseMultiModalModel: def get_chat_history_tokens(self): """Get the chat history tokens""" return self._num_tokens() - \ No newline at end of file + + def print_beautiful(self, content: str, color: str = "cyan"): + """Print Beautifully with termcolor""" + content = colored(content, color) + print(content) \ No newline at end of file diff --git a/swarms/models/gpt4_vision_api.py b/swarms/models/gpt4_vision_api.py index 8cf9371d..6a8b8eb8 100644 --- a/swarms/models/gpt4_vision_api.py +++ b/swarms/models/gpt4_vision_api.py @@ -1,3 +1,4 @@ +import logging import asyncio import base64 import concurrent.futures @@ -54,16 +55,27 @@ class GPT4VisionAPI: self, openai_api_key: str = openai_api_key, model_name: str = "gpt-4-vision-preview", + logging_enabled: bool = False, max_workers: int = 10, max_tokens: str = 300, openai_proxy: str = "https://api.openai.com/v1/chat/completions", + beautify: bool = False ): super().__init__() self.openai_api_key = openai_api_key + self.logging_enabled = logging_enabled self.model_name = model_name self.max_workers = max_workers self.max_tokens = max_tokens self.openai_proxy = openai_proxy + self.beautify = beautify + + if self.logging_enabled: + logging.basicConfig(level=logging.DEBUG) + else: + # Disable debug logs for requests and urllib3 + logging.getLogger("requests").setLevel(logging.WARNING) + logging.getLogger("urllib3").setLevel(logging.WARNING) def encode_image(self, img: str): """Encode image to base64.""" @@ -83,7 +95,7 @@ class GPT4VisionAPI: "Authorization": f"Bearer {openai_api_key}", } payload = { - "model": self.model_name, + "model": "gpt-4-vision-preview", "messages": [ { "role": "user", @@ -103,14 +115,18 @@ class GPT4VisionAPI: "max_tokens": self.max_tokens, } response = requests.post( - "https://api.openai.com/v1/chat/completions", + self.openai_proxy, headers=headers, json=payload, ) out = response.json() content = out["choices"][0]["message"]["content"] - print(content) + + if self.beautify: + content = colored(content, "cyan") + else: + print(content) except Exception as error: print(f"Error with the request: {error}") raise error @@ -151,11 +167,14 @@ class GPT4VisionAPI: out = response.json() content = out["choices"][0]["message"]["content"] - print(content) + + if self.beautify: + content = colored(content, "cyan") + else: + print(content) except Exception as error: print(f"Error with the request: {error}") raise error - # Function to handle vision tasks def run_many( self, diff --git a/swarms/utils/disable_logging.py b/swarms/utils/disable_logging.py new file mode 100644 index 00000000..5b6ec675 --- /dev/null +++ b/swarms/utils/disable_logging.py @@ -0,0 +1,25 @@ + +import logging +import os +import warnings + +def disable_logging(): + warnings.filterwarnings("ignore", category=UserWarning) + + # disable tensorflow warnings + os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + + + # Set the logging level for the entire module + logging.basicConfig(level=logging.WARNING) + + try: + log = logging.getLogger("pytorch") + log.propagate = False + log.setLevel(logging.ERROR) + except Exception as error: + print(f"Pytorch logging not disabled: {error}") + + for logger_name in ['tensorflow', 'h5py', 'numexpr', 'git', 'wandb.docker.auth']: + logger = logging.getLogger(logger_name) + logger.setLevel(logging.WARNING) # Supress DEBUG and info logs