import os
import warnings
from swarms.modelui.modules.block_requests import OpenMonkeyPatch, RequestBlocker
from swarms.modelui.modules.logging_colors import logger
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
os.environ['BITSANDBYTES_NOWELCOME'] = '1'
warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated')
warnings.filterwarnings('ignore', category=UserWarning, message='Using the update method is deprecated')
warnings.filterwarnings('ignore', category=UserWarning, message='Field "model_name" has conflict')
with RequestBlocker():
import gradio as gr
import matplotlib
matplotlib.use('Agg') # This fixes LaTeX rendering on some systems
import json
import os
import sys
import time
from functools import partial
from pathlib import Path
from threading import Lock
import yaml
import swarms.modelui.modules.extensions as extensions_module
from swarms.modelui.modules import (
chat,
shared,
training,
ui,
ui_chat,
ui_default,
ui_file_saving,
ui_model_menu,
ui_notebook,
ui_parameters,
ui_session,
utils
)
from swarms.modelui.modules.extensions import apply_extensions
from swarms.modelui.modules.LoRA import add_lora_to_model
from swarms.modelui.modules.models import load_model
from swarms.modelui.modules.models_settings import (
get_fallback_settings,
get_model_metadata,
update_model_parameters
)
from swarms.modelui.modules.utils import gradio
import warnings
from swarms.modelui.modules.block_requests import OpenMonkeyPatch, RequestBlocker
from swarms.modelui.modules.logging_colors import logger
os.environ['GRADIO_ANALYTICS_ENABLED'] = 'False'
os.environ['BITSANDBYTES_NOWELCOME'] = '1'
warnings.filterwarnings('ignore', category=UserWarning, message='TypedStorage is deprecated')
warnings.filterwarnings('ignore', category=UserWarning, message='Using the update method is deprecated')
warnings.filterwarnings('ignore', category=UserWarning, message='Field "model_name" has conflict')
with RequestBlocker():
import gradio as gr
import matplotlib
matplotlib.use('Agg') # This fixes LaTeX rendering on some systems
import json
import os
import sys
import time
from functools import partial
from pathlib import Path
from threading import Lock
import yaml
import swarms.modelui.modules.extensions as extensions_module
from swarms.modelui.modules import (
chat,
shared,
training,
ui,
ui_chat,
ui_default,
ui_file_saving,
ui_model_menu,
ui_notebook,
ui_parameters,
ui_session,
utils
)
from swarms.modelui.modules.extensions import apply_extensions
from swarms.modelui.modules.LoRA import add_lora_to_model
from swarms.modelui.modules.models import load_model
from swarms.modelui.modules.models_settings import (
get_fallback_settings,
get_model_metadata,
update_model_parameters
)
from swarms.modelui.modules.utils import gradio
import gradio as gr
from swarms.tools.tools_controller import MTQuestionAnswerer, load_valid_tools
from swarms.tools.singletool import STQuestionAnswerer
from langchain.schema import AgentFinish
import os
import requests
from swarms.modelui.server import create_interface
from tool_server import run_tool_server
from threading import Thread
from multiprocessing import Process
import time
tool_server_flag = False
def start_tool_server():
# server = Thread(target=run_tool_server)
server = Process(target=run_tool_server)
server.start()
global tool_server_flag
tool_server_flag = True
available_models = ["ChatGPT", "GPT-3.5"]
DEFAULTMODEL = "ChatGPT" # "GPT-3.5"
tools_mappings = {
"klarna": "https://www.klarna.com/",
"weather": "http://127.0.0.1:8079/tools/weather/",
# "database": "http://127.0.0.1:8079/tools/database/",
# "db_diag": "http://127.0.0.1:8079/tools/db_diag/",
"chemical-prop": "http://127.0.0.1:8079/tools/chemical-prop/",
"douban-film": "http://127.0.0.1:8079/tools/douban-film/",
"wikipedia": "http://127.0.0.1:8079/tools/wikipedia/",
# "wikidata": "http://127.0.0.1:8079/tools/kg/wikidata/",
"wolframalpha": "http://127.0.0.1:8079/tools/wolframalpha/",
"bing_search": "http://127.0.0.1:8079/tools/bing_search/",
"office-ppt": "http://127.0.0.1:8079/tools/office-ppt/",
"stock": "http://127.0.0.1:8079/tools/stock/",
"bing_map": "http://127.0.0.1:8079/tools/map.bing_map/",
# "baidu_map": "http://127.0.0.1:8079/tools/map/baidu_map/",
"zillow": "http://127.0.0.1:8079/tools/zillow/",
"airbnb": "http://127.0.0.1:8079/tools/airbnb/",
"job_search": "http://127.0.0.1:8079/tools/job_search/",
# "baidu-translation": "http://127.0.0.1:8079/tools/translation/baidu-translation/",
# "nllb-translation": "http://127.0.0.1:8079/tools/translation/nllb-translation/",
"tutorial": "http://127.0.0.1:8079/tools/tutorial/",
"file_operation": "http://127.0.0.1:8079/tools/file_operation/",
"meta_analysis": "http://127.0.0.1:8079/tools/meta_analysis/",
"code_interpreter": "http://127.0.0.1:8079/tools/code_interpreter/",
"arxiv": "http://127.0.0.1:8079/tools/arxiv/",
"google_places": "http://127.0.0.1:8079/tools/google_places/",
"google_serper": "http://127.0.0.1:8079/tools/google_serper/",
"google_scholar": "http://127.0.0.1:8079/tools/google_scholar/",
"python": "http://127.0.0.1:8079/tools/python/",
"sceneXplain": "http://127.0.0.1:8079/tools/sceneXplain/",
"shell": "http://127.0.0.1:8079/tools/shell/",
"image_generation": "http://127.0.0.1:8079/tools/image_generation/",
"hugging_tools": "http://127.0.0.1:8079/tools/hugging_tools/",
"gradio_tools": "http://127.0.0.1:8079/tools/gradio_tools/",
"travel": "http://127.0.0.1:8079/tools/travel",
"walmart": "http://127.0.0.1:8079/tools/walmart",
}
valid_tools_info = []
all_tools_list = []
gr.close_all()
MAX_TURNS = 30
MAX_BOXES = MAX_TURNS * 2
return_msg = []
chat_history = ""
MAX_SLEEP_TIME = 40
def load_tools():
global valid_tools_info
global all_tools_list
try:
valid_tools_info = load_valid_tools(tools_mappings)
except BaseException as e:
print(repr(e))
all_tools_list = sorted(list(valid_tools_info.keys()))
return gr.update(choices=all_tools_list)
def set_environ(OPENAI_API_KEY: str,
WOLFRAMALPH_APP_ID: str = "",
WEATHER_API_KEYS: str = "",
BING_SUBSCRIPT_KEY: str = "",
ALPHA_VANTAGE_KEY: str = "",
BING_MAP_KEY: str = "",
BAIDU_TRANSLATE_KEY: str = "",
RAPIDAPI_KEY: str = "",
SERPER_API_KEY: str = "",
GPLACES_API_KEY: str = "",
SCENEX_API_KEY: str = "",
STEAMSHIP_API_KEY: str = "",
HUGGINGFACE_API_KEY: str = "",
AMADEUS_ID: str = "",
AMADEUS_KEY: str = "",):
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
os.environ["WOLFRAMALPH_APP_ID"] = WOLFRAMALPH_APP_ID
os.environ["WEATHER_API_KEYS"] = WEATHER_API_KEYS
os.environ["BING_SUBSCRIPT_KEY"] = BING_SUBSCRIPT_KEY
os.environ["ALPHA_VANTAGE_KEY"] = ALPHA_VANTAGE_KEY
os.environ["BING_MAP_KEY"] = BING_MAP_KEY
os.environ["BAIDU_TRANSLATE_KEY"] = BAIDU_TRANSLATE_KEY
os.environ["RAPIDAPI_KEY"] = RAPIDAPI_KEY
os.environ["SERPER_API_KEY"] = SERPER_API_KEY
os.environ["GPLACES_API_KEY"] = GPLACES_API_KEY
os.environ["SCENEX_API_KEY"] = SCENEX_API_KEY
os.environ["STEAMSHIP_API_KEY"] = STEAMSHIP_API_KEY
os.environ["HUGGINGFACE_API_KEY"] = HUGGINGFACE_API_KEY
os.environ["AMADEUS_ID"] = AMADEUS_ID
os.environ["AMADEUS_KEY"] = AMADEUS_KEY
if not tool_server_flag:
start_tool_server()
time.sleep(MAX_SLEEP_TIME)
return gr.update(value="OK!")
def show_avatar_imgs(tools_chosen):
if len(tools_chosen) == 0:
tools_chosen = list(valid_tools_info.keys())
img_template = ' {} '
imgs = [valid_tools_info[tool]['avatar'] for tool in tools_chosen if valid_tools_info[tool]['avatar'] != None]
imgs = ' '.join([img_template.format(img, img, tool) for img, tool in zip(imgs, tools_chosen)])
return [gr.update(value='' + imgs + '', visible=True), gr.update(visible=True)]
def answer_by_tools(question, tools_chosen, model_chosen):
global return_msg
return_msg += [(question, None), (None, '...')]
yield [gr.update(visible=True, value=return_msg), gr.update(), gr.update()]
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY', '')
if len(tools_chosen) == 0: # if there is no tools chosen, we use all todo (TODO: What if the pool is too large.)
tools_chosen = list(valid_tools_info.keys())
if len(tools_chosen) == 1:
answerer = STQuestionAnswerer(OPENAI_API_KEY.strip(), stream_output=True, llm=model_chosen)
agent_executor = answerer.load_tools(tools_chosen[0], valid_tools_info[tools_chosen[0]],
prompt_type="react-with-tool-description", return_intermediate_steps=True)
else:
answerer = MTQuestionAnswerer(OPENAI_API_KEY.strip(),
load_valid_tools({k: tools_mappings[k] for k in tools_chosen}),
stream_output=True, llm=model_chosen)
agent_executor = answerer.build_runner()
global chat_history
chat_history += "Question: " + question + "\n"
question = chat_history
for inter in agent_executor(question):
if isinstance(inter, AgentFinish): continue
result_str = []
return_msg.pop()
if isinstance(inter, dict):
result_str.append("Answer: {}".format(inter['output']))
chat_history += "Answer:" + inter['output'] + "\n"
result_str.append("...")
else:
try:
not_observation = inter[0].log
except:
print(inter[0])
not_observation = inter[0]
if not not_observation.startswith('Thought:'):
not_observation = "Thought: " + not_observation
chat_history += not_observation
not_observation = not_observation.replace('Thought:', 'Thought: ')
not_observation = not_observation.replace('Action:', 'Action: ')
not_observation = not_observation.replace('Action Input:', 'Action Input: ')
result_str.append("{}".format(not_observation))
result_str.append("Action output:\n{}".format(inter[1]))
chat_history += "\nAction output:" + inter[1] + "\n"
result_str.append("...")
return_msg += [(None, result) for result in result_str]
yield [gr.update(visible=True, value=return_msg), gr.update(), gr.update()]
return_msg.pop()
if return_msg[-1][1].startswith("Answer: "):
return_msg[-1] = (return_msg[-1][0], return_msg[-1][1].replace("Answer: ",
"Final Answer: "))
yield [gr.update(visible=True, value=return_msg), gr.update(visible=True), gr.update(visible=False)]
def retrieve(tools_search):
if tools_search == "":
return gr.update(choices=all_tools_list)
else:
url = "http://127.0.0.1:8079/retrieve"
param = {
"query": tools_search
}
response = requests.post(url, json=param)
result = response.json()
retrieved_tools = result["tools"]
return gr.update(choices=retrieved_tools)
def clear_retrieve():
return [gr.update(value=""), gr.update(choices=all_tools_list)]
def clear_history():
global return_msg
global chat_history
return_msg = []
chat_history = ""
yield gr.update(visible=True, value=return_msg)
with gr.Blocks() as demo:
with gr.Row():
with gr.Column(scale=14):
gr.Markdown("