import gradio as gr
from gradio import Interface
import threading
import os
from langchain.llms import OpenAIChat
from swarms.agents import OmniModalAgent
# Initialize the OmniModalAgent
llm = OpenAIChat(model_name="gpt-4")
agent = OmniModalAgent(llm)
# Global variable to store chat history
chat_history = []
def update_chat(user_input):
global chat_history
chat_history.append({"type": "user", "content": user_input})
# Get agent response
agent_response = agent.run(user_input)
chat_history.append(agent_response)
return render_chat(chat_history)
def render_chat(chat_history):
chat_str = '
'
for message in chat_history:
timestamp = message.get('timestamp', 'N/A')
if message['type'] == 'user':
chat_str += f'
{message["content"]}
{timestamp}
'
elif message['type'] == 'text':
chat_str += f'
{message["content"]}
{timestamp}
'
elif message['type'] == 'image':
img_path = os.path.join("root_directory", message['content'])
chat_str += f'
{timestamp} '
chat_str += '
'
return chat_str
# Define Gradio interface
iface = Interface(
fn=update_chat,
inputs=gr.inputs.Textbox(lines=2, placeholder="Type your message here..."),
outputs=gr.outputs.HTML(label="Chat History"),
live=True,
title="Conversational AI Interface",
description="Chat with our AI agent!",
allow_flagging=False
)
iface.launch()