code quality

Former-commit-id: 42ce6cf18c
discord-bot-framework
Kye 1 year ago
parent 069b2aed45
commit 9c0c6c06cc

@ -17,7 +17,7 @@ interpreter.api_key = os.getenv("OPENAI_API_KEY")
def split_text(text, chunk_size=1500):
#########################################################################
return [text[i:i + chunk_size] for i in range(0, len(text), chunk_size)]
return [text[i : i + chunk_size] for i in range(0, len(text), chunk_size)]
# discord initial
@ -32,7 +32,6 @@ model = whisper.load_model("base")
def transcribe(audio):
# load audio and pad/trim it to fit 30 seconds
audio = whisper.load_audio(audio)
audio = whisper.pad_or_trim(audio)
@ -58,13 +57,16 @@ async def on_message(message):
response = []
for chunk in interpreter.chat(message.content, display=False, stream=False):
# await message.channel.send(chunk)
if 'message' in chunk:
response.append(chunk['message'])
if "message" in chunk:
response.append(chunk["message"])
last_response = response[-1]
max_message_length = 2000 # Discord's max message length is 2000 characters
# Splitting the message into chunks of 2000 characters
response_chunks = [last_response[i:i + max_message_length] for i in range(0, len(last_response), max_message_length)]
response_chunks = [
last_response[i : i + max_message_length]
for i in range(0, len(last_response), max_message_length)
]
# Sending each chunk as a separate message
for chunk in response_chunks:
await message.channel.send(chunk)
@ -74,9 +76,9 @@ async def on_message(message):
async def join(ctx):
if ctx.author.voice:
channel = ctx.message.author.voice.channel
print('joining..')
print("joining..")
await channel.connect()
print('joined.')
print("joined.")
else:
print("not in a voice channel!")
@ -92,32 +94,32 @@ async def leave(ctx):
@client.command()
async def listen(ctx):
if ctx.voice_client:
print('trying to listen..')
print("trying to listen..")
ctx.voice_client.start_recording(discord.sinks.WaveSink(), callback, ctx)
print('listening..')
print("listening..")
else:
print("not in a voice channel!")
async def callback(sink: discord.sinks, ctx):
print('in callback..')
print("in callback..")
for user_id, audio in sink.audio_data.items():
if user_id == ctx.author.id:
print('saving audio..')
print("saving audio..")
audio: discord.sinks.core.AudioData = audio
print(user_id)
filename = "audio.wav"
with open(filename, "wb") as f:
f.write(audio.file.getvalue())
print('audio saved.')
print("audio saved.")
transcription = transcribe(filename)
print(transcription)
response = []
for chunk in interpreter.chat(transcription, display=False, stream=True):
# await message.channel.send(chunk)
if 'message' in chunk:
response.append(chunk['message'])
await ctx.message.channel.send(' '.join(response))
if "message" in chunk:
response.append(chunk["message"])
await ctx.message.channel.send(" ".join(response))
@client.command()
@ -129,4 +131,5 @@ async def stop(ctx):
async def on_ready():
print(f"We have logged in as {client.user}")
client.run(bot_token)

@ -34,15 +34,13 @@ def get_audio_length(audio_bytes):
def speak(text):
speaking = True
audio = generate(
text=text,
voice="Daniel"
)
audio = generate(text=text, voice="Daniel")
play(audio, notebook=True)
audio_length = get_audio_length(audio)
time.sleep(audio_length)
# @title Text-only JARVIS
# @markdown Run this cell for a ChatGPT-like interface.
@ -55,13 +53,11 @@ with gr.Blocks() as demo:
return "", history + [[user_message, None]]
def bot(history):
user_message = history[-1][0]
history[-1][1] = ""
active_block_type = ""
for chunk in interpreter.chat(user_message, stream=True, display=False):
# Message
if "message" in chunk:
if active_block_type != "message":
@ -96,6 +92,6 @@ with gr.Blocks() as demo:
bot, chatbot, chatbot
)
if __name__ == '__main__':
if __name__ == "__main__":
demo.queue()
demo.launch(debug=True)

@ -5,6 +5,7 @@ from swarms.agents.stream_response import stream
from swarms.agents.base import AbstractAgent
from swarms.agents.registry import Registry
from swarms.agents.idea_to_image_agent import Idea2Image
"""Agent Infrastructure, models, memory, utils, tools"""
"""Agent Infrastructure, models, memory, utils, tools"""

@ -108,7 +108,7 @@ class MetaPrompterAgent:
def get_new_instructions(self, meta_output):
"""Get New Instructions from the meta_output"""
delimiter = "Instructions: "
new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter):]
new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter) :]
return new_instructions
def run(self, task: str):

@ -207,12 +207,12 @@ def blend_gt2pt(old_image, new_image, sigma=0.15, steps=100):
kernel[steps:-steps, :steps] = left
kernel[steps:-steps, -steps:] = right
pt_gt_img = easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]]
pt_gt_img = easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]]
gaussian_gt_img = (
kernel * gt_img_array + (1 - kernel) * pt_gt_img
) # gt img with blur img
gaussian_gt_img = gaussian_gt_img.astype(np.int64)
easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]] = gaussian_gt_img
easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]] = gaussian_gt_img
gaussian_img = Image.fromarray(easy_img)
return gaussian_img

@ -58,7 +58,7 @@ class BaseChunker(ABC):
half_token_count = token_count // 2
if current_separator:
separators = self.separators[self.separators.index(current_separator):]
separators = self.separators[self.separators.index(current_separator) :]
else:
separators = self.separators
@ -84,7 +84,7 @@ class BaseChunker(ABC):
subchanks[: balance_index + 1]
)
second_subchunk = separator.value + separator.value.join(
subchanks[balance_index + 1:]
subchanks[balance_index + 1 :]
)
else:
first_subchunk = (
@ -92,7 +92,7 @@ class BaseChunker(ABC):
+ separator.value
)
second_subchunk = separator.value.join(
subchanks[balance_index + 1:]
subchanks[balance_index + 1 :]
)
first_subchunk_rec = self._chunk_recursively(

@ -347,7 +347,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special,
)
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j: j + self.embedding_ctx_length])
tokens.append(token[j : j + self.embedding_ctx_length])
indices.append(i)
batched_embeddings: List[List[float]] = []
@ -366,7 +366,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in _iter:
response = embed_with_retry(
self,
input=tokens[i: i + _chunk_size],
input=tokens[i : i + _chunk_size],
**self._invocation_params,
)
batched_embeddings.extend(r["embedding"] for r in response["data"])
@ -428,7 +428,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special,
)
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j: j + self.embedding_ctx_length])
tokens.append(token[j : j + self.embedding_ctx_length])
indices.append(i)
batched_embeddings: List[List[float]] = []
@ -436,7 +436,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in range(0, len(tokens), _chunk_size):
response = await async_embed_with_retry(
self,
input=tokens[i: i + _chunk_size],
input=tokens[i : i + _chunk_size],
**self._invocation_params,
)
batched_embeddings.extend(r["embedding"] for r in response["data"])

@ -28,14 +28,22 @@ class BingChat:
self.cookies = json.loads(open(cookies_path, encoding="utf-8").read())
self.bot = asyncio.run(Chatbot.create(cookies=self.cookies))
def __call__(self, prompt: str, style: ConversationStyle = ConversationStyle.creative) -> str:
def __call__(
self, prompt: str, style: ConversationStyle = ConversationStyle.creative
) -> str:
"""
Get a text response using the EdgeGPT model based on the provided prompt.
"""
response = asyncio.run(self.bot.ask(prompt=prompt, conversation_style=style, simplify_response=True))
return response['text']
response = asyncio.run(
self.bot.ask(
prompt=prompt, conversation_style=style, simplify_response=True
)
)
return response["text"]
def create_img(self, prompt: str, output_dir: str = "./output", auth_cookie: str = None) -> str:
def create_img(
self, prompt: str, output_dir: str = "./output", auth_cookie: str = None
) -> str:
"""
Generate an image based on the provided prompt and save it in the given output directory.
Returns the path of the generated image.
@ -47,7 +55,7 @@ class BingChat:
images = image_generator.get_images(prompt)
image_generator.save_images(images, output_dir=output_dir)
return Path(output_dir) / images[0]['path']
return Path(output_dir) / images[0]["path"]
@staticmethod
def set_cookie_dir_path(path: str):

@ -458,7 +458,7 @@ class BaseOpenAI(BaseLLM):
)
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [
prompts[i: i + self.batch_size]
prompts[i : i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
]
return sub_prompts
@ -469,7 +469,7 @@ class BaseOpenAI(BaseLLM):
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
sub_choices = choices[i * self.n: (i + 1) * self.n]
sub_choices = choices[i * self.n : (i + 1) * self.n]
generations.append(
[
Generation(

@ -462,7 +462,7 @@ class BaseOpenAI(BaseLLM):
)
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [
prompts[i: i + self.batch_size]
prompts[i : i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
]
return sub_prompts
@ -473,7 +473,7 @@ class BaseOpenAI(BaseLLM):
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
sub_choices = choices[i * self.n: (i + 1) * self.n]
sub_choices = choices[i * self.n : (i + 1) * self.n]
generations.append(
[
Generation(

@ -35,8 +35,6 @@ class Vilt:
Args:
text: str
"""
# Download the image

@ -125,7 +125,7 @@ class WebpageQATool(BaseTool):
results = []
# TODO: Handle this with a MapReduceChain
for i in range(0, len(web_docs), 4):
input_docs = web_docs[i: i + 4]
input_docs = web_docs[i : i + 4]
window_result = self.qa_chain(
{"input_documents": input_docs, "question": question},
return_only_outputs=True,

@ -306,7 +306,7 @@ class WriteCommand:
@staticmethod
def from_str(command: str) -> "WriteCommand":
filepath = command.split(WriteCommand.separator)[0]
return WriteCommand(filepath, command[len(filepath) + 1:])
return WriteCommand(filepath, command[len(filepath) + 1 :])
class CodeWriter:
@ -433,7 +433,7 @@ class ReadCommand:
if self.start == self.end:
code = code[self.start - 1]
else:
code = "".join(code[self.start - 1: self.end])
code = "".join(code[self.start - 1 : self.end])
return code
@staticmethod
@ -590,9 +590,9 @@ class PatchCommand:
lines[self.start.line] = (
lines[self.start.line][: self.start.col]
+ self.content
+ lines[self.end.line][self.end.col:]
+ lines[self.end.line][self.end.col :]
)
lines = lines[: self.start.line + 1] + lines[self.end.line + 1:]
lines = lines[: self.start.line + 1] + lines[self.end.line + 1 :]
after = self.write_lines(lines)

@ -851,5 +851,3 @@ def tool(
return _partial
else:
raise ValueError("Too many arguments for tool decorator")

@ -365,7 +365,7 @@ class FileHandler:
try:
if url.startswith(os.environ.get("SERVER", "http://localhost:8000")):
local_filepath = url[
len(os.environ.get("SERVER", "http://localhost:8000")) + 1:
len(os.environ.get("SERVER", "http://localhost:8000")) + 1 :
]
local_filename = Path("file") / local_filepath.split("/")[-1]
src = self.path / local_filepath

@ -1,68 +0,0 @@
import pytest
from unittest.mock import Mock
from swarms.swarms.orchestrate import Orchestrator
@pytest.fixture
def mock_agent():
return Mock()
@pytest.fixture
def mock_task():
return {"task_id": 1, "task_data": "data"}
@pytest.fixture
def mock_vector_db():
return Mock()
@pytest.fixture
def orchestrator(mock_agent, mock_vector_db):
agent_list = [mock_agent for _ in range(5)]
task_queue = []
return Orchestrator(mock_agent, agent_list, task_queue, mock_vector_db)
def test_assign_task(orchestrator, mock_agent, mock_task, mock_vector_db):
orchestrator.task_queue.append(mock_task)
orchestrator.assign_task(0, mock_task)
mock_agent.process_task.assert_called_once()
mock_vector_db.add_documents.assert_called_once()
def test_retrieve_results(orchestrator, mock_vector_db):
mock_vector_db.query.return_value = "expected_result"
assert orchestrator.retrieve_results(0) == "expected_result"
def test_update_vector_db(orchestrator, mock_vector_db):
data = {"vector": [0.1, 0.2, 0.3], "task_id": 1}
orchestrator.update_vector_db(data)
mock_vector_db.add_documents.assert_called_once_with(
[data["vector"]], [str(data["task_id"])]
)
def test_get_vector_db(orchestrator, mock_vector_db):
assert orchestrator.get_vector_db() == mock_vector_db
def test_append_to_db(orchestrator, mock_vector_db):
collection = "test_collection"
result = "test_result"
orchestrator.append_to_db(collection, result)
mock_vector_db.append_document.assert_called_once_with(
collection, result, id=str(id(result))
)
def test_run(orchestrator, mock_agent, mock_vector_db):
objective = "test_objective"
collection = "test_collection"
orchestrator.run(objective, collection)
mock_agent.process_task.assert_called()
mock_vector_db.append_document.assert_called()

@ -1,71 +1,68 @@
import numpy as np
from swarms.swarms.orchestrate import Orchestrator, Worker
import chromadb
import pytest
from unittest.mock import Mock
from swarms.swarms.orchestrate import Orchestrator
def test_orchestrator_initialization():
orchestrator = Orchestrator(agent=Worker, agent_list=[Worker] * 5, task_queue=[])
assert isinstance(orchestrator, Orchestrator)
assert orchestrator.agents.qsize() == 5
assert orchestrator.task_queue.qsize() == 0
@pytest.fixture
def mock_agent():
return Mock()
def test_orchestrator_assign_task():
orchestrator = Orchestrator(agent=Worker, agent_list=[Worker] * 5, task_queue=[])
orchestrator.assign_task(1, {"content": "task1"})
assert orchestrator.task_queue.qsize() == 1
@pytest.fixture
def mock_task():
return {"task_id": 1, "task_data": "data"}
def test_orchestrator_embed():
orchestrator = Orchestrator(agent=Worker, agent_list=[Worker] * 5, task_queue=[])
result = orchestrator.embed("Hello, world!", "api_key", "model_name")
assert isinstance(result, np.ndarray)
@pytest.fixture
def mock_vector_db():
return Mock()
def test_orchestrator_retrieve_results():
orchestrator = Orchestrator(agent=Worker, agent_list=[Worker] * 5, task_queue=[])
result = orchestrator.retrieve_results(1)
assert isinstance(result, list)
@pytest.fixture
def orchestrator(mock_agent, mock_vector_db):
agent_list = [mock_agent for _ in range(5)]
task_queue = []
return Orchestrator(mock_agent, agent_list, task_queue, mock_vector_db)
def test_orchestrator_update_vector_db():
orchestrator = Orchestrator(agent=Worker, agent_list=[Worker] * 5, task_queue=[])
data = {"vector": np.array([1, 2, 3]), "task_id": 1}
orchestrator.update_vector_db(data)
assert orchestrator.collection.count() == 1
def test_assign_task(orchestrator, mock_agent, mock_task, mock_vector_db):
orchestrator.task_queue.append(mock_task)
orchestrator.assign_task(0, mock_task)
mock_agent.process_task.assert_called_once()
mock_vector_db.add_documents.assert_called_once()
def test_orchestrator_get_vector_db():
orchestrator = Orchestrator(agent=Worker, agent_list=[Worker] * 5, task_queue=[])
result = orchestrator.get_vector_db()
assert isinstance(result, chromadb.Collection)
def test_retrieve_results(orchestrator, mock_vector_db):
mock_vector_db.query.return_value = "expected_result"
assert orchestrator.retrieve_results(0) == "expected_result"
def test_orchestrator_append_to_db():
orchestrator = Orchestrator(agent=Worker, agent_list=[Worker] * 5, task_queue=[])
orchestrator.append_to_db("Hello, world!")
assert orchestrator.collection.count() == 1
def test_update_vector_db(orchestrator, mock_vector_db):
data = {"vector": [0.1, 0.2, 0.3], "task_id": 1}
orchestrator.update_vector_db(data)
mock_vector_db.add_documents.assert_called_once_with(
[data["vector"]], [str(data["task_id"])]
)
def test_orchestrator_run():
orchestrator = Orchestrator(agent=Worker, agent_list=[Worker] * 5, task_queue=[])
result = orchestrator.run("Write a short story.")
assert isinstance(result, list)
def test_get_vector_db(orchestrator, mock_vector_db):
assert orchestrator.get_vector_db() == mock_vector_db
def test_orchestrator_chat():
orchestrator = Orchestrator(agent=Worker, agent_list=[Worker] * 5, task_queue=[])
orchestrator.chat(1, 2, "Hello, Agent 2!")
assert orchestrator.collection.count() == 1
def test_append_to_db(orchestrator, mock_vector_db):
collection = "test_collection"
result = "test_result"
orchestrator.append_to_db(collection, result)
mock_vector_db.append_document.assert_called_once_with(
collection, result, id=str(id(result))
)
def test_orchestrator_add_agents():
orchestrator = Orchestrator(agent=Worker, agent_list=[Worker] * 5, task_queue=[])
orchestrator.add_agents(5)
assert orchestrator.agents.qsize() == 10
def test_run(orchestrator, mock_agent, mock_vector_db):
objective = "test_objective"
collection = "test_collection"
orchestrator.run(objective, collection)
def test_orchestrator_remove_agents():
orchestrator = Orchestrator(agent=Worker, agent_list=[Worker] * 5, task_queue=[])
orchestrator.remove_agents(3)
assert orchestrator.agents.qsize() == 2
mock_agent.process_task.assert_called()
mock_vector_db.append_document.assert_called()

Loading…
Cancel
Save