refactor: import

pull/160/head
Zack 1 year ago
parent b26d595cf6
commit 180c290bae

108
app.py

@ -1,3 +1,111 @@
import os
import warnings
from swarms.modelui.modules.block_requests import OpenMonkeyPatch, RequestBlocker
from swarms.modelui.modules.logging_colors import logger
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
os.environ['BITSANDBYTES_NOWELCOME'] = '1'
warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated')
warnings.filterwarnings('ignore', category=UserWarning, message='Using the update method is deprecated')
warnings.filterwarnings('ignore', category=UserWarning, message='Field "model_name" has conflict')
with RequestBlocker():
import gradio as gr
import matplotlib
matplotlib.use('Agg') # This fixes LaTeX rendering on some systems
import json
import os
import sys
import time
from functools import partial
from pathlib import Path
from threading import Lock
import yaml
import swarms.modelui.modules.extensions as extensions_module
from swarms.modelui.modules import (
chat,
shared,
training,
ui,
ui_chat,
ui_default,
ui_file_saving,
ui_model_menu,
ui_notebook,
ui_parameters,
ui_session,
utils
)
from swarms.modelui.modules.extensions import apply_extensions
from swarms.modelui.modules.LoRA import add_lora_to_model
from swarms.modelui.modules.models import load_model
from swarms.modelui.modules.models_settings import (
get_fallback_settings,
get_model_metadata,
update_model_parameters
)
from swarms.modelui.modules.utils import gradio
import warnings
from swarms.modelui.modules.block_requests import OpenMonkeyPatch, RequestBlocker
from swarms.modelui.modules.logging_colors import logger
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
os.environ['BITSANDBYTES_NOWELCOME'] = '1'
warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated')
warnings.filterwarnings('ignore', category=UserWarning, message='Using the update method is deprecated')
warnings.filterwarnings('ignore', category=UserWarning, message='Field "model_name" has conflict')
with RequestBlocker():
import gradio as gr
import matplotlib
matplotlib.use('Agg') # This fixes LaTeX rendering on some systems
import json
import os
import sys
import time
from functools import partial
from pathlib import Path
from threading import Lock
import yaml
import swarms.modelui.modules.extensions as extensions_module
from swarms.modelui.modules import (
chat,
shared,
training,
ui,
ui_chat,
ui_default,
ui_file_saving,
ui_model_menu,
ui_notebook,
ui_parameters,
ui_session,
utils
)
from swarms.modelui.modules.extensions import apply_extensions
from swarms.modelui.modules.LoRA import add_lora_to_model
from swarms.modelui.modules.models import load_model
from swarms.modelui.modules.models_settings import (
get_fallback_settings,
get_model_metadata,
update_model_parameters
)
from swarms.modelui.modules.utils import gradio
import gradio as gr
from swarms.tools.tools_controller import MTQuestionAnswerer, load_valid_tools
from swarms.tools.singletool import STQuestionAnswerer

@ -4,9 +4,9 @@ import torch
from peft import PeftModel
from transformers import is_torch_xpu_available
import modules.shared as shared
from modules.logging_colors import logger
from modules.models import reload_model
import swarms.modelui.modules.shared as shared
from swarms.modelui.modules.logging_colors import logger
from swarms.modelui.modules.models import reload_model
def add_lora_to_model(lora_names):

@ -3,7 +3,7 @@ import io
import requests
from modules.logging_colors import logger
from swarms.modelui.modules.logging_colors import logger
original_open = open
original_get = requests.get

@ -7,7 +7,7 @@ import torch
import transformers
from transformers import is_torch_xpu_available
import modules.shared as shared
import swarms.modelui.modules.shared as shared
class _StopEverythingStoppingCriteria(transformers.StoppingCriteria):

@ -9,18 +9,18 @@ from pathlib import Path
import gradio as gr
import yaml
from PIL import Image
# from PIL import image
import modules.shared as shared
from modules.extensions import apply_extensions
from modules.html_generator import chat_html_wrapper, make_thumbnail
from modules.logging_colors import logger
from modules.text_generation import (
import swarms.modelui.modules.shared as shared
from swarms.modelui.modules.extensions import apply_extensions
from swarms.modelui.modules.html_generator import chat_html_wrapper, make_thumbnail
from swarms.modelui.modules.logging_colors import logger
from swarms.modelui.modules.text_generation import (
generate_reply,
get_encoded_length,
get_max_prompt_length
)
from modules.utils import (
from swarms.modelui.modules.utils import (
delete_file,
get_available_characters,
replace_all,

@ -6,10 +6,10 @@ import torch
from datasets import load_dataset
from tqdm import tqdm
from modules import shared
from modules.models import clear_torch_cache, load_model, unload_model
from modules.models_settings import get_model_metadata, update_model_parameters
from modules.text_generation import encode
from swarms.modelui.modules import shared
from swarms.modelui.modules.models import clear_torch_cache, load_model, unload_model
from swarms.modelui.modules.models_settings import get_model_metadata, update_model_parameters
from swarms.modelui.modules.text_generation import encode
def load_past_evaluations():

@ -4,9 +4,9 @@ from inspect import signature
import gradio as gr
import extensions
import modules.shared as shared
from modules.logging_colors import logger
import swarms.modelui.extensions
import swarms.modelui.modules.shared as shared
from swarms.modelui.modules.logging_colors import logger
state = {}
available_extensions = []

@ -1,7 +1,7 @@
from torch_grammar import GrammarSampler
from transformers.generation.logits_process import LogitsProcessor
from modules import shared
from swarms.modelui.modules import shared
sampler = None
grammar = None

@ -7,7 +7,7 @@ from pathlib import Path
import markdown
from PIL import Image, ImageOps
from modules.utils import get_available_chat_styles
from swarms.modelui.modules.utils import get_available_chat_styles
# This is to store the paths to the thumbnails of the profile pictures
image_cache = {}
@ -22,7 +22,7 @@ with open(Path(__file__).resolve().parent / '../css/html_instruct_style.css', 'r
# Custom chat styles
chat_styles = {}
for k in get_available_chat_styles():
chat_styles[k] = open(Path(f'css/chat_style-{k}.css'), 'r').read()
chat_styles[k] = open(Path(f'../css/chat_style-{k}.css'), 'r').read()
# Handle styles that derive from other styles
for k in chat_styles:

@ -5,8 +5,8 @@ from typing import Optional, Tuple
import torch
import torch.nn as nn
import modules.shared as shared
from modules.logging_colors import logger
import swarms.modelui.modules.shared as shared
from swarms.modelui.modules.logging_colors import logger
if shared.args.xformers:
try:

@ -3,7 +3,7 @@ from collections import OrderedDict
import gradio as gr
from modules import shared
from swarms.modelui.modules import shared
loaders_and_params = OrderedDict({
'Transformers': [

@ -1,9 +1,9 @@
import torch
from transformers import is_torch_xpu_available
from modules import sampler_hijack, shared
from modules.logging_colors import logger
from modules.text_generation import generate_reply
from swarms.modelui.modules import sampler_hijack, shared
from swarms.modelui.modules.logging_colors import logger
from swarms.modelui.modules.text_generation import generate_reply
global_scores = None

@ -19,10 +19,10 @@ from transformers import (
GPTQConfig
)
import modules.shared as shared
from modules import RoPE, llama_attn_hijack, sampler_hijack
from modules.logging_colors import logger
from modules.models_settings import get_model_metadata
import swarms.modelui.modules.shared as shared
from swarms.modelui.modules import RoPE, llama_attn_hijack, sampler_hijack
from swarms.modelui.modules.logging_colors import logger
from swarms.modelui.modules.models_settings import get_model_metadata
transformers.logging.set_verbosity_error()
@ -34,7 +34,7 @@ if shared.args.deepspeed:
is_deepspeed_zero3_enabled
)
from modules.deepspeed_parameters import generate_ds_config
from swarms.modelui.modules.deepspeed_parameters import generate_ds_config
# Distributed setup
local_rank = shared.args.local_rank if shared.args.local_rank is not None else int(os.getenv("LOCAL_RANK", "0"))
@ -210,7 +210,7 @@ def huggingface_loader(model_name):
model,
dtype=torch.int8,
max_memory=params['max_memory'],
no_split_module_classes=model._no_split_modules
no_split_module_classes=model.swarms.modelui._no_split_modules
)
if shared.args.disk:
@ -237,7 +237,7 @@ def huggingface_loader(model_name):
def llamacpp_loader(model_name):
from modules.llamacpp_model import LlamaCppModel
from swarms.modelui.modules.llamacpp_model import LlamaCppModel
path = Path(f'{shared.args.model_dir}/{model_name}')
if path.is_file():
@ -251,7 +251,7 @@ def llamacpp_loader(model_name):
def llamacpp_HF_loader(model_name):
from modules.llamacpp_hf import LlamacppHF
from swarms.modelui.modules.llamacpp_hf import LlamacppHF
for fname in [model_name, "oobabooga_llama-tokenizer", "llama-tokenizer"]:
path = Path(f'{shared.args.model_dir}/{fname}')
@ -276,7 +276,7 @@ def llamacpp_HF_loader(model_name):
def ctransformers_loader(model_name):
from modules.ctransformers_model import CtransformersModel
from swarms.modelui.modules.ctransformers_model import CtransformersModel
path = Path(f'{shared.args.model_dir}/{model_name}')
ctrans = CtransformersModel()
@ -325,47 +325,47 @@ def GPTQ_loader(model_name):
# Monkey patch
if shared.args.monkey_patch:
logger.warning("Applying the monkey patch for using LoRAs with GPTQ models. It may cause undefined behavior outside its intended scope.")
from modules.monkey_patch_gptq_lora import load_model_llama
from swarms.modelui.modules.monkey_patch_gptq_lora import load_model_llama
model, _ = load_model_llama(model_name)
# No monkey patch
else:
import modules.GPTQ_loader
import swarms.modelui.modules.GPTQ_loader
model = modules.GPTQ_loader.load_quantized(model_name)
model = swarms.modelui.modules.GPTQ_loader.load_quantized(model_name)
return model
def AutoGPTQ_loader(model_name):
import modules.AutoGPTQ_loader
import swarms.modelui.modules.AutoGPTQ_loader
return modules.AutoGPTQ_loader.load_quantized(model_name)
return swarms.modelui.modules.AutoGPTQ_loader.load_quantized(model_name)
def ExLlama_loader(model_name):
from modules.exllama import ExllamaModel
from swarms.modelui.modules.exllama import ExllamaModel
model, tokenizer = ExllamaModel.from_pretrained(model_name)
return model, tokenizer
def ExLlama_HF_loader(model_name):
from modules.exllama_hf import ExllamaHF
from swarms.modelui.modules.exllama_hf import ExllamaHF
return ExllamaHF.from_pretrained(model_name)
def ExLlamav2_loader(model_name):
from modules.exllamav2 import Exllamav2Model
from swarms.modelui.modules.exllamav2 import Exllamav2Model
model, tokenizer = Exllamav2Model.from_pretrained(model_name)
return model, tokenizer
def ExLlamav2_HF_loader(model_name):
from modules.exllamav2_hf import Exllamav2HF
from swarms.modelui.modules.exllamav2_hf import Exllamav2HF
return Exllamav2HF.from_pretrained(model_name)
@ -375,7 +375,7 @@ def RWKV_loader(model_name):
This loader is not currently maintained as RWKV can now be loaded
through the transformers library.
'''
from modules.RWKV import RWKVModel, RWKVTokenizer
from swarms.modelui.modules.RWKV import RWKVModel, RWKVTokenizer
model = RWKVModel.from_pretrained(
Path(f'{shared.args.model_dir}/{model_name}'),

@ -4,7 +4,7 @@ from pathlib import Path
import yaml
from modules import loaders, metadata_gguf, shared, ui
from swarms.modelui.modules import loaders, metadata_gguf, shared, ui
def get_fallback_settings():

@ -1,6 +1,6 @@
from pathlib import Path
from modules.text_generation import get_encoded_length
from swarms.modelui.modules.text_generation import get_encoded_length
def load_prompt(fname):

@ -6,7 +6,7 @@ from pathlib import Path
import yaml
from modules.logging_colors import logger
from swarms.modelui.modules.logging_colors import logger
# Model variables
model = None

@ -11,17 +11,17 @@ import torch
import transformers
from transformers import LogitsProcessorList, is_torch_xpu_available
import modules.shared as shared
from modules.callbacks import (
import swarms.modelui.modules.shared as shared
from swarms.modelui.modules.callbacks import (
Iteratorize,
Stream,
_StopEverythingStoppingCriteria
)
from modules.extensions import apply_extensions
from modules.grammar import GrammarLogitsProcessor
from modules.html_generator import generate_4chan_html, generate_basic_html
from modules.logging_colors import logger
from modules.models import clear_torch_cache, local_rank
from swarms.modelui.modules.extensions import apply_extensions
from swarms.modelui.modules.grammar import GrammarLogitsProcessor
from swarms.modelui.modules.html_generator import generate_4chan_html, generate_basic_html
from swarms.modelui.modules.logging_colors import logger
from swarms.modelui.modules.models import clear_torch_cache, local_rank
def generate_reply(*args, **kwargs):

@ -31,15 +31,15 @@ from transformers.models.auto.modeling_auto import (
MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
)
from modules import shared, ui, utils
from modules.evaluate import (
from swarms.modelui.modules import shared, ui, utils
from swarms.modelui.modules.evaluate import (
calculate_perplexity,
generate_markdown_table,
save_past_evaluations
)
from modules.logging_colors import logger
from modules.models import reload_model
from modules.utils import natural_keys
from swarms.modelui.modules.logging_colors import logger
from swarms.modelui.modules.models import reload_model
from swarms.modelui.modules.utils import natural_keys
MODEL_CLASSES = {v[1]: v[0] for v in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.items()}
PARAMETERS = ["lora_name", "always_override", "q_proj_en", "v_proj_en", "k_proj_en", "o_proj_en", "gate_proj_en", "down_proj_en", "up_proj_en", "save_steps", "micro_batch_size", "batch_size", "epochs", "learning_rate", "lr_scheduler_type", "lora_rank", "lora_alpha", "lora_dropout", "cutoff_len", "dataset", "eval_dataset", "format", "eval_steps", "raw_text_file", "overlap_len", "newline_favor_len", "higher_rank_limit", "warmup_steps", "optimizer", "hard_cut_string", "train_only_after", "stop_at_loss", "add_eos_token", "min_chars", "report_to"]
@ -69,7 +69,7 @@ def create_ui():
always_override = gr.Checkbox(label='Override Existing Files', value=False, info='If the name is the same, checking will replace the existing file, and unchecking will load and continue from it (the rank must be the same).', elem_classes=['no-background'])
with gr.Accordion(label='Target Modules', open=False):
gr.Markdown("Selects which modules to target in training. Targeting more modules is closer to a full fine-tune at the cost of increased VRAM requirements and adapter size.\nNOTE: Only works for model_id='llama', other types will retain default training behavior and not use these settings.")
gr.Markdown("Selects which swarms.modelui.modules to target in training. Targeting more swarms.modelui.modules is closer to a full fine-tune at the cost of increased VRAM requirements and adapter size.\nNOTE: Only works for model_id='llama', other types will retain default training behavior and not use these settings.")
with gr.Row():
with gr.Column():
q_proj_en = gr.Checkbox(label='Enable q_proj', value=True)
@ -339,7 +339,7 @@ def do_train(lora_name: str, always_override: bool, q_proj_en: bool, v_proj_en:
shared.tokenizer.pad_token_id = 0
shared.tokenizer.padding_side = "left"
# Populate target_modules list with chosen X_proj modules. Llama-based models only atm, non-llama will revert to default behavior.
# Populate target_ swarms.modelui.modules list with chosen X_proj swarms.modelui.modules. Llama-based models only atm, non-llama will revert to default behavior.
def list_target_modules(model_id):
if model_id != "llama":
return model_to_lora_modules[model_id]

@ -6,7 +6,7 @@ import torch
import yaml
from transformers import is_torch_xpu_available
from modules import shared
from swarms.modelui.modules import shared
with open(Path(__file__).resolve().parent / '../css/NotoSans/stylesheet.css', 'r') as f:
css = f.read()

@ -5,10 +5,10 @@ from pathlib import Path
import gradio as gr
from PIL import Image
from modules import chat, shared, ui, utils
from modules.html_generator import chat_html_wrapper
from modules.text_generation import stop_everything_event
from modules.utils import gradio
from swarms.modelui.modules import chat, shared, ui, utils
from swarms.modelui.modules.html_generator import chat_html_wrapper
from swarms.modelui.modules.text_generation import stop_everything_event
from swarms.modelui.modules.utils import gradio
inputs = ('Chat input', 'interface_state')
reload_arr = ('history', 'name1', 'name2', 'mode', 'chat_style')

@ -1,13 +1,13 @@
import gradio as gr
from modules import logits, shared, ui, utils
from modules.prompts import count_tokens, load_prompt
from modules.text_generation import (
from swarms.modelui.modules import logits, shared, ui, utils
from swarms.modelui.modules.prompts import count_tokens, load_prompt
from swarms.modelui.modules.text_generation import (
generate_reply_wrapper,
get_token_ids,
stop_everything_event
)
from modules.utils import gradio
from swarms.modelui.modules.utils import gradio
inputs = ('textbox-default', 'interface_state')
outputs = ('output_textbox', 'html-default')

@ -1,7 +1,7 @@
import gradio as gr
from modules import chat, presets, shared, ui, utils
from modules.utils import gradio
from swarms.modelui.modules import chat, presets, shared, ui, utils
from swarms.modelui.modules.utils import gradio
def create_ui():

@ -10,17 +10,17 @@ import psutil
import torch
from transformers import is_torch_xpu_available
from modules import loaders, shared, ui, utils
from modules.logging_colors import logger
from modules.LoRA import add_lora_to_model
from modules.models import load_model, unload_model
from modules.models_settings import (
from swarms.modelui.modules import loaders, shared, ui, utils
from swarms.modelui.modules.logging_colors import logger
from swarms.modelui.modules.LoRA import add_lora_to_model
from swarms.modelui.modules.models import load_model, unload_model
from swarms.modelui.modules.models_settings import (
apply_model_settings_to_state,
get_model_metadata,
save_model_settings,
update_model_parameters
)
from modules.utils import gradio
from swarms.modelui.modules.utils import gradio
def create_ui():

@ -1,13 +1,13 @@
import gradio as gr
from modules import logits, shared, ui, utils
from modules.prompts import count_tokens, load_prompt
from modules.text_generation import (
from swarms.modelui.modules import logits, shared, ui, utils
from swarms.modelui.modules.prompts import count_tokens, load_prompt
from swarms.modelui.modules.text_generation import (
generate_reply_wrapper,
get_token_ids,
stop_everything_event
)
from modules.utils import gradio
from swarms.modelui.modules.utils import gradio
inputs = ('textbox-notebook', 'interface_state')
outputs = ('textbox-notebook', 'html-notebook')

@ -2,8 +2,8 @@ from pathlib import Path
import gradio as gr
from modules import loaders, presets, shared, ui, ui_chat, utils
from modules.utils import gradio
from swarms.modelui.modules import loaders, presets, shared, ui, ui_chat, utils
from swarms.modelui.modules.utils import gradio
def create_ui(default_preset):

@ -1,8 +1,8 @@
import gradio as gr
from modules import shared, ui, utils
from modules.github import clone_or_pull_repository
from modules.utils import gradio
from swarms.modelui.modules import shared, ui, utils
from swarms.modelui.modules.github import clone_or_pull_repository
from swarms.modelui.modules.utils import gradio
def create_ui():

@ -3,8 +3,8 @@ import re
from datetime import datetime
from pathlib import Path
from modules import github, shared
from modules.logging_colors import logger
from swarms.modelui.modules import github, shared
from swarms.modelui.modules.logging_colors import logger
# Helper function to get multiple values from shared.gradio

@ -1,9 +1,8 @@
import os
import warnings
import modules.one_click_installer_check
from modules.block_requests import OpenMonkeyPatch, RequestBlocker
from modules.logging_colors import logger
from swarms.modelui.modules.block_requests import OpenMonkeyPatch, RequestBlocker
from swarms.modelui.modules.logging_colors import logger
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
os.environ['BITSANDBYTES_NOWELCOME'] = '1'
@ -28,8 +27,8 @@ from threading import Lock
import yaml
import modules.extensions as extensions_module
from modules import (
import swarms.modelui.modules.extensions as extensions_module
from swarms.modelui.modules import (
chat,
shared,
training,
@ -43,15 +42,15 @@ from modules import (
ui_session,
utils
)
from modules.extensions import apply_extensions
from modules.LoRA import add_lora_to_model
from modules.models import load_model
from modules.models_settings import (
from swarms.modelui.modules.extensions import apply_extensions
from swarms.modelui.modules.LoRA import add_lora_to_model
from swarms.modelui.modules.models import load_model
from swarms.modelui.modules.models_settings import (
get_fallback_settings,
get_model_metadata,
update_model_parameters
)
from modules.utils import gradio
from swarms.modelui.modules.utils import gradio
def create_interface():

Loading…
Cancel
Save