pull/58/head
Kye 1 year ago
parent 0eea6aa88e
commit e9b4c953d4

@ -0,0 +1,27 @@
class AbsractAgent:
def __init__(
self,
llm,
temperature
) -> None:
pass
#single query
def run(self, task: str):
pass
# # conversational back and forth
# def chat(self, message: str):
# message_historys = []
# message_historys.append(message)
# reply = self.run(message)
# message_historys.append(reply)
# return message_historys
# def step(self, message):
# pass
# def reset(self):
# pass

@ -103,6 +103,7 @@ class OmniModalAgent:
self.chat_planner = load_chat_planner(llm)
self.response_generator = load_response_generator(llm)
# self.task_executor = TaskExecutor
self.history = []
def run(

@ -3,7 +3,7 @@ from abc import ABC, abstractmethod
class AbstractModel(ABC):
#abstract base class for language models
@abstractmethod
def generate(self, prompt):
def run(self, prompt):
#generate text using language model
pass

@ -132,7 +132,9 @@ import interpreter
@tool
def compile(task: str):
"""
Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally. You can chat with Open Interpreter through a ChatGPT-like interface in your terminal by running $ interpreter after installing.
Open Interpreter lets LLMs run code (Python, Javascript, Shell, and more) locally.
You can chat with Open Interpreter through a ChatGPT-like interface in your terminal
by running $ interpreter after installing.
This provides a natural-language interface to your computer's general-purpose capabilities:
@ -142,7 +144,6 @@ def compile(task: str):
...etc.
Note: You'll be asked to approve code before it's run.
"""
task = interpreter.chat(task, return_messages=True)
interpreter.chat()
interpreter.reset(task)
@ -156,7 +157,6 @@ def compile(task: str):
# mm model workers
import torch
from PIL import Image
from transformers import (

@ -39,7 +39,6 @@ class SpeechToText:
subprocess.run(["pip", "install", "pydub"])
def download_youtube_video(self):
audio_file = f'video.{self.audio_format}'

@ -142,7 +142,7 @@ class Worker:
query_website_tool,
HumanInputRun(),
compile,
# VQAinference
# VQAinference,
]
if external_tools is not None:
self.tools.extend(external_tools)
@ -311,6 +311,3 @@ class Worker:
"""
for token in response.split():
yield token

Loading…
Cancel
Save