From 180c290bae13cc4ea48b1877d38bdbc7b44dbd80 Mon Sep 17 00:00:00 2001 From: Zack Date: Fri, 17 Nov 2023 20:16:49 -0800 Subject: [PATCH] refactor: import --- app.py | 108 ++++++++++++++++++++ swarms/modelui/modules/LoRA.py | 6 +- swarms/modelui/modules/block_requests.py | 2 +- swarms/modelui/modules/callbacks.py | 2 +- swarms/modelui/modules/chat.py | 14 +-- swarms/modelui/modules/evaluate.py | 8 +- swarms/modelui/modules/extensions.py | 6 +- swarms/modelui/modules/grammar.py | 2 +- swarms/modelui/modules/html_generator.py | 4 +- swarms/modelui/modules/llama_attn_hijack.py | 4 +- swarms/modelui/modules/loaders.py | 2 +- swarms/modelui/modules/logits.py | 6 +- swarms/modelui/modules/models.py | 38 +++---- swarms/modelui/modules/models_settings.py | 2 +- swarms/modelui/modules/prompts.py | 2 +- swarms/modelui/modules/shared.py | 2 +- swarms/modelui/modules/text_generation.py | 14 +-- swarms/modelui/modules/training.py | 14 +-- swarms/modelui/modules/ui.py | 2 +- swarms/modelui/modules/ui_chat.py | 8 +- swarms/modelui/modules/ui_default.py | 8 +- swarms/modelui/modules/ui_file_saving.py | 4 +- swarms/modelui/modules/ui_model_menu.py | 12 +-- swarms/modelui/modules/ui_notebook.py | 8 +- swarms/modelui/modules/ui_parameters.py | 4 +- swarms/modelui/modules/ui_session.py | 6 +- swarms/modelui/modules/utils.py | 4 +- swarms/modelui/server.py | 19 ++-- 28 files changed, 209 insertions(+), 102 deletions(-) diff --git a/app.py b/app.py index e6387279..7c6032a5 100644 --- a/app.py +++ b/app.py @@ -1,3 +1,111 @@ + +import os +import warnings + +from swarms.modelui.modules.block_requests import OpenMonkeyPatch, RequestBlocker +from swarms.modelui.modules.logging_colors import logger + +os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False' +os.environ['BITSANDBYTES_NOWELCOME'] = '1' +warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated') +warnings.filterwarnings('ignore', category=UserWarning, message='Using the update method is deprecated') +warnings.filterwarnings('ignore', category=UserWarning, message='Field "model_name" has conflict') + +with RequestBlocker(): + import gradio as gr + +import matplotlib + +matplotlib.use('Agg') # This fixes LaTeX rendering on some systems + +import json +import os +import sys +import time +from functools import partial +from pathlib import Path +from threading import Lock + +import yaml + +import swarms.modelui.modules.extensions as extensions_module +from swarms.modelui.modules import ( + chat, + shared, + training, + ui, + ui_chat, + ui_default, + ui_file_saving, + ui_model_menu, + ui_notebook, + ui_parameters, + ui_session, + utils +) +from swarms.modelui.modules.extensions import apply_extensions +from swarms.modelui.modules.LoRA import add_lora_to_model +from swarms.modelui.modules.models import load_model +from swarms.modelui.modules.models_settings import ( + get_fallback_settings, + get_model_metadata, + update_model_parameters +) +from swarms.modelui.modules.utils import gradio + +import warnings + +from swarms.modelui.modules.block_requests import OpenMonkeyPatch, RequestBlocker +from swarms.modelui.modules.logging_colors import logger + +os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False' +os.environ['BITSANDBYTES_NOWELCOME'] = '1' +warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated') +warnings.filterwarnings('ignore', category=UserWarning, message='Using the update method is deprecated') +warnings.filterwarnings('ignore', category=UserWarning, message='Field "model_name" has conflict') + +with RequestBlocker(): + import gradio as gr + +import matplotlib + +matplotlib.use('Agg') # This fixes LaTeX rendering on some systems + +import json +import os +import sys +import time +from functools import partial +from pathlib import Path +from threading import Lock + +import yaml + +import swarms.modelui.modules.extensions as extensions_module +from swarms.modelui.modules import ( + chat, + shared, + training, + ui, + ui_chat, + ui_default, + ui_file_saving, + ui_model_menu, + ui_notebook, + ui_parameters, + ui_session, + utils +) +from swarms.modelui.modules.extensions import apply_extensions +from swarms.modelui.modules.LoRA import add_lora_to_model +from swarms.modelui.modules.models import load_model +from swarms.modelui.modules.models_settings import ( + get_fallback_settings, + get_model_metadata, + update_model_parameters +) +from swarms.modelui.modules.utils import gradio + import gradio as gr from swarms.tools.tools_controller import MTQuestionAnswerer, load_valid_tools from swarms.tools.singletool import STQuestionAnswerer diff --git a/swarms/modelui/modules/LoRA.py b/swarms/modelui/modules/LoRA.py index 4b119994..59ce0c02 100644 --- a/swarms/modelui/modules/LoRA.py +++ b/swarms/modelui/modules/LoRA.py @@ -4,9 +4,9 @@ import torch from peft import PeftModel from transformers import is_torch_xpu_available -import modules.shared as shared -from modules.logging_colors import logger -from modules.models import reload_model +import swarms.modelui.modules.shared as shared +from swarms.modelui.modules.logging_colors import logger +from swarms.modelui.modules.models import reload_model def add_lora_to_model(lora_names): diff --git a/swarms/modelui/modules/block_requests.py b/swarms/modelui/modules/block_requests.py index 38f1a17f..3079f878 100644 --- a/swarms/modelui/modules/block_requests.py +++ b/swarms/modelui/modules/block_requests.py @@ -3,7 +3,7 @@ import io import requests -from modules.logging_colors import logger +from swarms.modelui.modules.logging_colors import logger original_open = open original_get = requests.get diff --git a/swarms/modelui/modules/callbacks.py b/swarms/modelui/modules/callbacks.py index bb979a6c..4020b413 100644 --- a/swarms/modelui/modules/callbacks.py +++ b/swarms/modelui/modules/callbacks.py @@ -7,7 +7,7 @@ import torch import transformers from transformers import is_torch_xpu_available -import modules.shared as shared +import swarms.modelui.modules.shared as shared class _StopEverythingStoppingCriteria(transformers.StoppingCriteria): diff --git a/swarms/modelui/modules/chat.py b/swarms/modelui/modules/chat.py index 4c518d33..101674d7 100644 --- a/swarms/modelui/modules/chat.py +++ b/swarms/modelui/modules/chat.py @@ -9,18 +9,18 @@ from pathlib import Path import gradio as gr import yaml -from PIL import Image +# from PIL import image -import modules.shared as shared -from modules.extensions import apply_extensions -from modules.html_generator import chat_html_wrapper, make_thumbnail -from modules.logging_colors import logger -from modules.text_generation import ( +import swarms.modelui.modules.shared as shared +from swarms.modelui.modules.extensions import apply_extensions +from swarms.modelui.modules.html_generator import chat_html_wrapper, make_thumbnail +from swarms.modelui.modules.logging_colors import logger +from swarms.modelui.modules.text_generation import ( generate_reply, get_encoded_length, get_max_prompt_length ) -from modules.utils import ( +from swarms.modelui.modules.utils import ( delete_file, get_available_characters, replace_all, diff --git a/swarms/modelui/modules/evaluate.py b/swarms/modelui/modules/evaluate.py index 4b5335ff..f8731a8f 100644 --- a/swarms/modelui/modules/evaluate.py +++ b/swarms/modelui/modules/evaluate.py @@ -6,10 +6,10 @@ import torch from datasets import load_dataset from tqdm import tqdm -from modules import shared -from modules.models import clear_torch_cache, load_model, unload_model -from modules.models_settings import get_model_metadata, update_model_parameters -from modules.text_generation import encode +from swarms.modelui.modules import shared +from swarms.modelui.modules.models import clear_torch_cache, load_model, unload_model +from swarms.modelui.modules.models_settings import get_model_metadata, update_model_parameters +from swarms.modelui.modules.text_generation import encode def load_past_evaluations(): diff --git a/swarms/modelui/modules/extensions.py b/swarms/modelui/modules/extensions.py index 6c072504..2ac8d0e5 100644 --- a/swarms/modelui/modules/extensions.py +++ b/swarms/modelui/modules/extensions.py @@ -4,9 +4,9 @@ from inspect import signature import gradio as gr -import extensions -import modules.shared as shared -from modules.logging_colors import logger +import swarms.modelui.extensions +import swarms.modelui.modules.shared as shared +from swarms.modelui.modules.logging_colors import logger state = {} available_extensions = [] diff --git a/swarms/modelui/modules/grammar.py b/swarms/modelui/modules/grammar.py index 5f6ad3a6..bbda7fc2 100644 --- a/swarms/modelui/modules/grammar.py +++ b/swarms/modelui/modules/grammar.py @@ -1,7 +1,7 @@ from torch_grammar import GrammarSampler from transformers.generation.logits_process import LogitsProcessor -from modules import shared +from swarms.modelui.modules import shared sampler = None grammar = None diff --git a/swarms/modelui/modules/html_generator.py b/swarms/modelui/modules/html_generator.py index 26e47848..2d0a63e8 100644 --- a/swarms/modelui/modules/html_generator.py +++ b/swarms/modelui/modules/html_generator.py @@ -7,7 +7,7 @@ from pathlib import Path import markdown from PIL import Image, ImageOps -from modules.utils import get_available_chat_styles +from swarms.modelui.modules.utils import get_available_chat_styles # This is to store the paths to the thumbnails of the profile pictures image_cache = {} @@ -22,7 +22,7 @@ with open(Path(__file__).resolve().parent / '../css/html_instruct_style.css', 'r # Custom chat styles chat_styles = {} for k in get_available_chat_styles(): - chat_styles[k] = open(Path(f'css/chat_style-{k}.css'), 'r').read() + chat_styles[k] = open(Path(f'../css/chat_style-{k}.css'), 'r').read() # Handle styles that derive from other styles for k in chat_styles: diff --git a/swarms/modelui/modules/llama_attn_hijack.py b/swarms/modelui/modules/llama_attn_hijack.py index 00436fb2..83829e41 100644 --- a/swarms/modelui/modules/llama_attn_hijack.py +++ b/swarms/modelui/modules/llama_attn_hijack.py @@ -5,8 +5,8 @@ from typing import Optional, Tuple import torch import torch.nn as nn -import modules.shared as shared -from modules.logging_colors import logger +import swarms.modelui.modules.shared as shared +from swarms.modelui.modules.logging_colors import logger if shared.args.xformers: try: diff --git a/swarms/modelui/modules/loaders.py b/swarms/modelui/modules/loaders.py index b3763f06..aeb4989a 100644 --- a/swarms/modelui/modules/loaders.py +++ b/swarms/modelui/modules/loaders.py @@ -3,7 +3,7 @@ from collections import OrderedDict import gradio as gr -from modules import shared +from swarms.modelui.modules import shared loaders_and_params = OrderedDict({ 'Transformers': [ diff --git a/swarms/modelui/modules/logits.py b/swarms/modelui/modules/logits.py index e356a986..e3d93d17 100644 --- a/swarms/modelui/modules/logits.py +++ b/swarms/modelui/modules/logits.py @@ -1,9 +1,9 @@ import torch from transformers import is_torch_xpu_available -from modules import sampler_hijack, shared -from modules.logging_colors import logger -from modules.text_generation import generate_reply +from swarms.modelui.modules import sampler_hijack, shared +from swarms.modelui.modules.logging_colors import logger +from swarms.modelui.modules.text_generation import generate_reply global_scores = None diff --git a/swarms/modelui/modules/models.py b/swarms/modelui/modules/models.py index 19c0d903..b01a1214 100644 --- a/swarms/modelui/modules/models.py +++ b/swarms/modelui/modules/models.py @@ -19,10 +19,10 @@ from transformers import ( GPTQConfig ) -import modules.shared as shared -from modules import RoPE, llama_attn_hijack, sampler_hijack -from modules.logging_colors import logger -from modules.models_settings import get_model_metadata +import swarms.modelui.modules.shared as shared +from swarms.modelui.modules import RoPE, llama_attn_hijack, sampler_hijack +from swarms.modelui.modules.logging_colors import logger +from swarms.modelui.modules.models_settings import get_model_metadata transformers.logging.set_verbosity_error() @@ -34,7 +34,7 @@ if shared.args.deepspeed: is_deepspeed_zero3_enabled ) - from modules.deepspeed_parameters import generate_ds_config + from swarms.modelui.modules.deepspeed_parameters import generate_ds_config # Distributed setup local_rank = shared.args.local_rank if shared.args.local_rank is not None else int(os.getenv("LOCAL_RANK", "0")) @@ -210,7 +210,7 @@ def huggingface_loader(model_name): model, dtype=torch.int8, max_memory=params['max_memory'], - no_split_module_classes=model._no_split_modules + no_split_module_classes=model.swarms.modelui._no_split_modules ) if shared.args.disk: @@ -237,7 +237,7 @@ def huggingface_loader(model_name): def llamacpp_loader(model_name): - from modules.llamacpp_model import LlamaCppModel + from swarms.modelui.modules.llamacpp_model import LlamaCppModel path = Path(f'{shared.args.model_dir}/{model_name}') if path.is_file(): @@ -251,7 +251,7 @@ def llamacpp_loader(model_name): def llamacpp_HF_loader(model_name): - from modules.llamacpp_hf import LlamacppHF + from swarms.modelui.modules.llamacpp_hf import LlamacppHF for fname in [model_name, "oobabooga_llama-tokenizer", "llama-tokenizer"]: path = Path(f'{shared.args.model_dir}/{fname}') @@ -276,7 +276,7 @@ def llamacpp_HF_loader(model_name): def ctransformers_loader(model_name): - from modules.ctransformers_model import CtransformersModel + from swarms.modelui.modules.ctransformers_model import CtransformersModel path = Path(f'{shared.args.model_dir}/{model_name}') ctrans = CtransformersModel() @@ -325,47 +325,47 @@ def GPTQ_loader(model_name): # Monkey patch if shared.args.monkey_patch: logger.warning("Applying the monkey patch for using LoRAs with GPTQ models. It may cause undefined behavior outside its intended scope.") - from modules.monkey_patch_gptq_lora import load_model_llama + from swarms.modelui.modules.monkey_patch_gptq_lora import load_model_llama model, _ = load_model_llama(model_name) # No monkey patch else: - import modules.GPTQ_loader + import swarms.modelui.modules.GPTQ_loader - model = modules.GPTQ_loader.load_quantized(model_name) + model = swarms.modelui.modules.GPTQ_loader.load_quantized(model_name) return model def AutoGPTQ_loader(model_name): - import modules.AutoGPTQ_loader + import swarms.modelui.modules.AutoGPTQ_loader - return modules.AutoGPTQ_loader.load_quantized(model_name) + return swarms.modelui.modules.AutoGPTQ_loader.load_quantized(model_name) def ExLlama_loader(model_name): - from modules.exllama import ExllamaModel + from swarms.modelui.modules.exllama import ExllamaModel model, tokenizer = ExllamaModel.from_pretrained(model_name) return model, tokenizer def ExLlama_HF_loader(model_name): - from modules.exllama_hf import ExllamaHF + from swarms.modelui.modules.exllama_hf import ExllamaHF return ExllamaHF.from_pretrained(model_name) def ExLlamav2_loader(model_name): - from modules.exllamav2 import Exllamav2Model + from swarms.modelui.modules.exllamav2 import Exllamav2Model model, tokenizer = Exllamav2Model.from_pretrained(model_name) return model, tokenizer def ExLlamav2_HF_loader(model_name): - from modules.exllamav2_hf import Exllamav2HF + from swarms.modelui.modules.exllamav2_hf import Exllamav2HF return Exllamav2HF.from_pretrained(model_name) @@ -375,7 +375,7 @@ def RWKV_loader(model_name): This loader is not currently maintained as RWKV can now be loaded through the transformers library. ''' - from modules.RWKV import RWKVModel, RWKVTokenizer + from swarms.modelui.modules.RWKV import RWKVModel, RWKVTokenizer model = RWKVModel.from_pretrained( Path(f'{shared.args.model_dir}/{model_name}'), diff --git a/swarms/modelui/modules/models_settings.py b/swarms/modelui/modules/models_settings.py index ebe4fddc..da0dde61 100644 --- a/swarms/modelui/modules/models_settings.py +++ b/swarms/modelui/modules/models_settings.py @@ -4,7 +4,7 @@ from pathlib import Path import yaml -from modules import loaders, metadata_gguf, shared, ui +from swarms.modelui.modules import loaders, metadata_gguf, shared, ui def get_fallback_settings(): diff --git a/swarms/modelui/modules/prompts.py b/swarms/modelui/modules/prompts.py index 565c2450..0f8cddf1 100644 --- a/swarms/modelui/modules/prompts.py +++ b/swarms/modelui/modules/prompts.py @@ -1,6 +1,6 @@ from pathlib import Path -from modules.text_generation import get_encoded_length +from swarms.modelui.modules.text_generation import get_encoded_length def load_prompt(fname): diff --git a/swarms/modelui/modules/shared.py b/swarms/modelui/modules/shared.py index d40a1e77..09a23e0e 100644 --- a/swarms/modelui/modules/shared.py +++ b/swarms/modelui/modules/shared.py @@ -6,7 +6,7 @@ from pathlib import Path import yaml -from modules.logging_colors import logger +from swarms.modelui.modules.logging_colors import logger # Model variables model = None diff --git a/swarms/modelui/modules/text_generation.py b/swarms/modelui/modules/text_generation.py index 6034ef31..1c4a9fa9 100644 --- a/swarms/modelui/modules/text_generation.py +++ b/swarms/modelui/modules/text_generation.py @@ -11,17 +11,17 @@ import torch import transformers from transformers import LogitsProcessorList, is_torch_xpu_available -import modules.shared as shared -from modules.callbacks import ( +import swarms.modelui.modules.shared as shared +from swarms.modelui.modules.callbacks import ( Iteratorize, Stream, _StopEverythingStoppingCriteria ) -from modules.extensions import apply_extensions -from modules.grammar import GrammarLogitsProcessor -from modules.html_generator import generate_4chan_html, generate_basic_html -from modules.logging_colors import logger -from modules.models import clear_torch_cache, local_rank +from swarms.modelui.modules.extensions import apply_extensions +from swarms.modelui.modules.grammar import GrammarLogitsProcessor +from swarms.modelui.modules.html_generator import generate_4chan_html, generate_basic_html +from swarms.modelui.modules.logging_colors import logger +from swarms.modelui.modules.models import clear_torch_cache, local_rank def generate_reply(*args, **kwargs): diff --git a/swarms/modelui/modules/training.py b/swarms/modelui/modules/training.py index c01f27db..ceec9db3 100644 --- a/swarms/modelui/modules/training.py +++ b/swarms/modelui/modules/training.py @@ -31,15 +31,15 @@ from transformers.models.auto.modeling_auto import ( MODEL_FOR_CAUSAL_LM_MAPPING_NAMES ) -from modules import shared, ui, utils -from modules.evaluate import ( +from swarms.modelui.modules import shared, ui, utils +from swarms.modelui.modules.evaluate import ( calculate_perplexity, generate_markdown_table, save_past_evaluations ) -from modules.logging_colors import logger -from modules.models import reload_model -from modules.utils import natural_keys +from swarms.modelui.modules.logging_colors import logger +from swarms.modelui.modules.models import reload_model +from swarms.modelui.modules.utils import natural_keys MODEL_CLASSES = {v[1]: v[0] for v in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.items()} PARAMETERS = ["lora_name", "always_override", "q_proj_en", "v_proj_en", "k_proj_en", "o_proj_en", "gate_proj_en", "down_proj_en", "up_proj_en", "save_steps", "micro_batch_size", "batch_size", "epochs", "learning_rate", "lr_scheduler_type", "lora_rank", "lora_alpha", "lora_dropout", "cutoff_len", "dataset", "eval_dataset", "format", "eval_steps", "raw_text_file", "overlap_len", "newline_favor_len", "higher_rank_limit", "warmup_steps", "optimizer", "hard_cut_string", "train_only_after", "stop_at_loss", "add_eos_token", "min_chars", "report_to"] @@ -69,7 +69,7 @@ def create_ui(): always_override = gr.Checkbox(label='Override Existing Files', value=False, info='If the name is the same, checking will replace the existing file, and unchecking will load and continue from it (the rank must be the same).', elem_classes=['no-background']) with gr.Accordion(label='Target Modules', open=False): - gr.Markdown("Selects which modules to target in training. Targeting more modules is closer to a full fine-tune at the cost of increased VRAM requirements and adapter size.\nNOTE: Only works for model_id='llama', other types will retain default training behavior and not use these settings.") + gr.Markdown("Selects which swarms.modelui.modules to target in training. Targeting more swarms.modelui.modules is closer to a full fine-tune at the cost of increased VRAM requirements and adapter size.\nNOTE: Only works for model_id='llama', other types will retain default training behavior and not use these settings.") with gr.Row(): with gr.Column(): q_proj_en = gr.Checkbox(label='Enable q_proj', value=True) @@ -339,7 +339,7 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en: shared.tokenizer.pad_token_id = 0 shared.tokenizer.padding_side = "left" - # Populate target_modules list with chosen X_proj modules. Llama-based models only atm, non-llama will revert to default behavior. + # Populate target_ swarms.modelui.modules list with chosen X_proj swarms.modelui.modules. Llama-based models only atm, non-llama will revert to default behavior. def list_target_modules(model_id): if model_id != "llama": return model_to_lora_modules[model_id] diff --git a/swarms/modelui/modules/ui.py b/swarms/modelui/modules/ui.py index de649668..919664a6 100644 --- a/swarms/modelui/modules/ui.py +++ b/swarms/modelui/modules/ui.py @@ -6,7 +6,7 @@ import torch import yaml from transformers import is_torch_xpu_available -from modules import shared +from swarms.modelui.modules import shared with open(Path(__file__).resolve().parent / '../css/NotoSans/stylesheet.css', 'r') as f: css = f.read() diff --git a/swarms/modelui/modules/ui_chat.py b/swarms/modelui/modules/ui_chat.py index b3cff3d6..ca48c8b6 100644 --- a/swarms/modelui/modules/ui_chat.py +++ b/swarms/modelui/modules/ui_chat.py @@ -5,10 +5,10 @@ from pathlib import Path import gradio as gr from PIL import Image -from modules import chat, shared, ui, utils -from modules.html_generator import chat_html_wrapper -from modules.text_generation import stop_everything_event -from modules.utils import gradio +from swarms.modelui.modules import chat, shared, ui, utils +from swarms.modelui.modules.html_generator import chat_html_wrapper +from swarms.modelui.modules.text_generation import stop_everything_event +from swarms.modelui.modules.utils import gradio inputs = ('Chat input', 'interface_state') reload_arr = ('history', 'name1', 'name2', 'mode', 'chat_style') diff --git a/swarms/modelui/modules/ui_default.py b/swarms/modelui/modules/ui_default.py index 7db6f0d9..2305df52 100644 --- a/swarms/modelui/modules/ui_default.py +++ b/swarms/modelui/modules/ui_default.py @@ -1,13 +1,13 @@ import gradio as gr -from modules import logits, shared, ui, utils -from modules.prompts import count_tokens, load_prompt -from modules.text_generation import ( +from swarms.modelui.modules import logits, shared, ui, utils +from swarms.modelui.modules.prompts import count_tokens, load_prompt +from swarms.modelui.modules.text_generation import ( generate_reply_wrapper, get_token_ids, stop_everything_event ) -from modules.utils import gradio +from swarms.modelui.modules.utils import gradio inputs = ('textbox-default', 'interface_state') outputs = ('output_textbox', 'html-default') diff --git a/swarms/modelui/modules/ui_file_saving.py b/swarms/modelui/modules/ui_file_saving.py index 1625c830..fccf9c97 100644 --- a/swarms/modelui/modules/ui_file_saving.py +++ b/swarms/modelui/modules/ui_file_saving.py @@ -1,7 +1,7 @@ import gradio as gr -from modules import chat, presets, shared, ui, utils -from modules.utils import gradio +from swarms.modelui.modules import chat, presets, shared, ui, utils +from swarms.modelui.modules.utils import gradio def create_ui(): diff --git a/swarms/modelui/modules/ui_model_menu.py b/swarms/modelui/modules/ui_model_menu.py index 67396b78..f78bc603 100644 --- a/swarms/modelui/modules/ui_model_menu.py +++ b/swarms/modelui/modules/ui_model_menu.py @@ -10,17 +10,17 @@ import psutil import torch from transformers import is_torch_xpu_available -from modules import loaders, shared, ui, utils -from modules.logging_colors import logger -from modules.LoRA import add_lora_to_model -from modules.models import load_model, unload_model -from modules.models_settings import ( +from swarms.modelui.modules import loaders, shared, ui, utils +from swarms.modelui.modules.logging_colors import logger +from swarms.modelui.modules.LoRA import add_lora_to_model +from swarms.modelui.modules.models import load_model, unload_model +from swarms.modelui.modules.models_settings import ( apply_model_settings_to_state, get_model_metadata, save_model_settings, update_model_parameters ) -from modules.utils import gradio +from swarms.modelui.modules.utils import gradio def create_ui(): diff --git a/swarms/modelui/modules/ui_notebook.py b/swarms/modelui/modules/ui_notebook.py index 6bd5c919..9574775b 100644 --- a/swarms/modelui/modules/ui_notebook.py +++ b/swarms/modelui/modules/ui_notebook.py @@ -1,13 +1,13 @@ import gradio as gr -from modules import logits, shared, ui, utils -from modules.prompts import count_tokens, load_prompt -from modules.text_generation import ( +from swarms.modelui.modules import logits, shared, ui, utils +from swarms.modelui.modules.prompts import count_tokens, load_prompt +from swarms.modelui.modules.text_generation import ( generate_reply_wrapper, get_token_ids, stop_everything_event ) -from modules.utils import gradio +from swarms.modelui.modules.utils import gradio inputs = ('textbox-notebook', 'interface_state') outputs = ('textbox-notebook', 'html-notebook') diff --git a/swarms/modelui/modules/ui_parameters.py b/swarms/modelui/modules/ui_parameters.py index fa245c4d..67cc3389 100644 --- a/swarms/modelui/modules/ui_parameters.py +++ b/swarms/modelui/modules/ui_parameters.py @@ -2,8 +2,8 @@ from pathlib import Path import gradio as gr -from modules import loaders, presets, shared, ui, ui_chat, utils -from modules.utils import gradio +from swarms.modelui.modules import loaders, presets, shared, ui, ui_chat, utils +from swarms.modelui.modules.utils import gradio def create_ui(default_preset): diff --git a/swarms/modelui/modules/ui_session.py b/swarms/modelui/modules/ui_session.py index 640b341c..974ca617 100644 --- a/swarms/modelui/modules/ui_session.py +++ b/swarms/modelui/modules/ui_session.py @@ -1,8 +1,8 @@ import gradio as gr -from modules import shared, ui, utils -from modules.github import clone_or_pull_repository -from modules.utils import gradio +from swarms.modelui.modules import shared, ui, utils +from swarms.modelui.modules.github import clone_or_pull_repository +from swarms.modelui.modules.utils import gradio def create_ui(): diff --git a/swarms/modelui/modules/utils.py b/swarms/modelui/modules/utils.py index 69953da7..34288b9f 100644 --- a/swarms/modelui/modules/utils.py +++ b/swarms/modelui/modules/utils.py @@ -3,8 +3,8 @@ import re from datetime import datetime from pathlib import Path -from modules import github, shared -from modules.logging_colors import logger +from swarms.modelui.modules import github, shared +from swarms.modelui.modules.logging_colors import logger # Helper function to get multiple values from shared.gradio diff --git a/swarms/modelui/server.py b/swarms/modelui/server.py index 5cf33150..6f5cc3e5 100644 --- a/swarms/modelui/server.py +++ b/swarms/modelui/server.py @@ -1,9 +1,8 @@ import os import warnings -import modules.one_click_installer_check -from modules.block_requests import OpenMonkeyPatch, RequestBlocker -from modules.logging_colors import logger +from swarms.modelui.modules.block_requests import OpenMonkeyPatch, RequestBlocker +from swarms.modelui.modules.logging_colors import logger os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False' os.environ['BITSANDBYTES_NOWELCOME'] = '1' @@ -28,8 +27,8 @@ from threading import Lock import yaml -import modules.extensions as extensions_module -from modules import ( +import swarms.modelui.modules.extensions as extensions_module +from swarms.modelui.modules import ( chat, shared, training, @@ -43,15 +42,15 @@ from modules import ( ui_session, utils ) -from modules.extensions import apply_extensions -from modules.LoRA import add_lora_to_model -from modules.models import load_model -from modules.models_settings import ( +from swarms.modelui.modules.extensions import apply_extensions +from swarms.modelui.modules.LoRA import add_lora_to_model +from swarms.modelui.modules.models import load_model +from swarms.modelui.modules.models_settings import ( get_fallback_settings, get_model_metadata, update_model_parameters ) -from modules.utils import gradio +from swarms.modelui.modules.utils import gradio def create_interface():