parent
868a9c9c38
commit
c93de1f84e
@ -0,0 +1,24 @@
|
|||||||
|
import datetime
|
||||||
|
|
||||||
|
class Message:
|
||||||
|
"""
|
||||||
|
Represents a message with timestamp and optional metadata.
|
||||||
|
|
||||||
|
Usage
|
||||||
|
--------------
|
||||||
|
mes = Message(
|
||||||
|
sender = "Kye",
|
||||||
|
content = "message"
|
||||||
|
)
|
||||||
|
|
||||||
|
print(mes)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, sender, content, metadata=None):
|
||||||
|
self.timestamp = datetime.datetime.now()
|
||||||
|
self.sender = sender
|
||||||
|
self.content = content
|
||||||
|
self.metadata = metadata or {}
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return f"{self.timestamp} - {self.sender}: {self.content}"
|
@ -1,33 +1,69 @@
|
|||||||
from agent_protocol import Agent, Step, Task
|
from swarms.agents.muti_modal_workers.multi_modal_agent import MultiModalVisualAgent
|
||||||
|
|
||||||
from swarms.agents.multi_modal_workers.multi_modal_agent import MultiModalVisualAgent
|
class MultiModalAgent:
|
||||||
|
"""
|
||||||
|
A user-friendly abstraction over the MultiModalVisualAgent that provides a simple interface
|
||||||
|
to process both text and images.
|
||||||
|
|
||||||
class MultiModalVisualAgent:
|
Initializes the MultiModalAgent.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
load_dict (dict, optional): Dictionary of class names and devices to load. Defaults to a basic configuration.
|
||||||
|
temperature (float, optional): Temperature for the OpenAI model. Defaults to 0.
|
||||||
|
default_language (str, optional): Default language for the agent. Defaults to "English".
|
||||||
|
|
||||||
|
Usage
|
||||||
|
|
||||||
|
"""
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
agent: MultiModalVisualAgent
|
load_dict,
|
||||||
|
temperature,
|
||||||
|
language: str = "english"
|
||||||
):
|
):
|
||||||
self.agent = agent
|
self.load_dict = load_dict
|
||||||
|
self.temperature = temperature
|
||||||
|
self.langigage = language
|
||||||
|
|
||||||
async def run(self, text: str) -> str:
|
if load_dict is None:
|
||||||
#run the multi-modal visual agent with the give task
|
load_dict = {
|
||||||
return self.agent.run_text(text)
|
"ImageCaptioning": "default_device"
|
||||||
|
}
|
||||||
|
|
||||||
async def __call__(self, text: str) -> str:
|
self.agent = MultiModalVisualAgent(
|
||||||
return self.agent.run(text)
|
load_dict,
|
||||||
|
temperature
|
||||||
|
)
|
||||||
|
self.language = language
|
||||||
|
|
||||||
async def plan(self, step: Step) -> Step:
|
def run_text(self, text, language=None):
|
||||||
task = Agent
|
"""Run text through the model"""
|
||||||
pass
|
|
||||||
|
if language is None:
|
||||||
|
language = self.language
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.agent.init_agent(language)
|
||||||
|
return self.agent.run_text(text)
|
||||||
|
except Exception as e:
|
||||||
|
return f"Error processing text: {str(e)}"
|
||||||
|
|
||||||
async def task_handler(self, task: Task):
|
def run_img(self, image_path: str, language=None):
|
||||||
await self.agent.run()
|
"""If language is None"""
|
||||||
|
if language is None:
|
||||||
|
language = self.default_language
|
||||||
|
|
||||||
async def step_handler(self, step: Step):
|
try:
|
||||||
if step.name == "plan":
|
return self.agent.run_image(
|
||||||
await self.plan(step)
|
image_path,
|
||||||
else:
|
language
|
||||||
await self.agent.run(step)
|
)
|
||||||
|
except Exception as error:
|
||||||
|
return f"Error processing image: {str(error)}"
|
||||||
|
|
||||||
return step
|
def clear(self):
|
||||||
|
try:
|
||||||
|
self.agent.clear_memory()
|
||||||
|
except Exception as e:
|
||||||
|
return f"Error cleaning memory: {str(e)}"
|
||||||
|
|
||||||
|
Loading…
Reference in new issue