Former-commit-id: 6d7fca8402
discord-bot-framework
Kye 1 year ago
parent 7bb4cb67bb
commit 069b2aed45

@ -14,9 +14,11 @@ interpreter.api_key = os.getenv("OPENAI_API_KEY")
# interpreter.api_base = os.getenv("API_BASE")
# interpreter.auto_run = True
def split_text(text, chunk_size=1500):
#########################################################################
return [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)]
return [text[i:i + chunk_size] for i in range(0, len(text), chunk_size)]
# discord initial
intents = discord.Intents.all()
@ -28,6 +30,7 @@ send_image = False
model = whisper.load_model("base")
def transcribe(audio):
# load audio and pad/trim it to fit 30 seconds
@ -45,6 +48,7 @@ def transcribe(audio):
result = whisper.decode(model, mel, options)
return result.text
@client.event
async def on_message(message):
await client.process_commands(message)

@ -13,7 +13,6 @@ dotenv.load_dotenv(".env")
interpreter.auto_run = True
set_api_key("ELEVEN_LABS_API_KEY")

@ -2,5 +2,3 @@ from swarms.models import Fuyu
fuyu = Fuyu()
fuyu("Hello, my name is", "images/github-banner-swarms.png")

@ -1,22 +1,20 @@
from swarms import workers
from swarms.workers.worker import Worker
from swarms.chunkers import chunkers
from swarms import models
from swarms import structs
from swarms import swarms
from swarms.swarms.orchestrate import Orchestrator
from swarms import agents
from swarms.logo import logo
import os
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
# disable tensorflow warnings
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from swarms.logo import logo
print(logo)
from swarms import agents
from swarms.swarms.orchestrate import Orchestrator
from swarms import swarms
from swarms import structs
from swarms import models
from swarms.chunkers import chunkers
from swarms.workers.worker import Worker
from swarms import workers

@ -1,12 +1,12 @@
"""Agent Infrastructure, models, memory, utils, tools"""
"""Agent Infrastructure, models, memory, utils, tools"""
from swarms.agents.omni_modal_agent import OmniModalAgent
from swarms.agents.hf_agents import HFAgent
# utils
from swarms.agents.message import Message
from swarms.agents.stream_response import stream
from swarms.agents.base import AbstractAgent
from swarms.agents.registry import Registry
from swarms.agents.idea_to_image_agent import Idea2Image
"""Agent Infrastructure, models, memory, utils, tools"""
"""Agent Infrastructure, models, memory, utils, tools"""
# utils

@ -108,7 +108,7 @@ class MetaPrompterAgent:
def get_new_instructions(self, meta_output):
"""Get New Instructions from the meta_output"""
delimiter = "Instructions: "
new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter) :]
new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter):]
return new_instructions
def run(self, task: str):

@ -207,12 +207,12 @@ def blend_gt2pt(old_image, new_image, sigma=0.15, steps=100):
kernel[steps:-steps, :steps] = left
kernel[steps:-steps, -steps:] = right
pt_gt_img = easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]]
pt_gt_img = easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]]
gaussian_gt_img = (
kernel * gt_img_array + (1 - kernel) * pt_gt_img
) # gt img with blur img
gaussian_gt_img = gaussian_gt_img.astype(np.int64)
easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]] = gaussian_gt_img
easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]] = gaussian_gt_img
gaussian_img = Image.fromarray(easy_img)
return gaussian_img

@ -58,7 +58,7 @@ class BaseChunker(ABC):
half_token_count = token_count // 2
if current_separator:
separators = self.separators[self.separators.index(current_separator) :]
separators = self.separators[self.separators.index(current_separator):]
else:
separators = self.separators
@ -84,7 +84,7 @@ class BaseChunker(ABC):
subchanks[: balance_index + 1]
)
second_subchunk = separator.value + separator.value.join(
subchanks[balance_index + 1 :]
subchanks[balance_index + 1:]
)
else:
first_subchunk = (
@ -92,7 +92,7 @@ class BaseChunker(ABC):
+ separator.value
)
second_subchunk = separator.value.join(
subchanks[balance_index + 1 :]
subchanks[balance_index + 1:]
)
first_subchunk_rec = self._chunk_recursively(

@ -347,7 +347,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special,
)
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j : j + self.embedding_ctx_length])
tokens.append(token[j: j + self.embedding_ctx_length])
indices.append(i)
batched_embeddings: List[List[float]] = []
@ -366,7 +366,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in _iter:
response = embed_with_retry(
self,
input=tokens[i : i + _chunk_size],
input=tokens[i: i + _chunk_size],
**self._invocation_params,
)
batched_embeddings.extend(r["embedding"] for r in response["data"])
@ -428,7 +428,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special,
)
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j : j + self.embedding_ctx_length])
tokens.append(token[j: j + self.embedding_ctx_length])
indices.append(i)
batched_embeddings: List[List[float]] = []
@ -436,7 +436,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in range(0, len(tokens), _chunk_size):
response = await async_embed_with_retry(
self,
input=tokens[i : i + _chunk_size],
input=tokens[i: i + _chunk_size],
**self._invocation_params,
)
batched_embeddings.extend(r["embedding"] for r in response["data"])

@ -4,7 +4,6 @@ from swarms.models.mistral import Mistral
from swarms.models.openai_models import OpenAI, AzureOpenAI, OpenAIChat
# MultiModal Models
from swarms.models.idefics import Idefics
from swarms.models.kosmos_two import Kosmos

@ -6,6 +6,7 @@ from EdgeGPT.EdgeUtils import ImageQuery, Query, Cookie
from EdgeGPT.ImageGen import ImageGen
from pathlib import Path
class BingChat:
"""
EdgeGPT model by OpenAI

@ -458,7 +458,7 @@ class BaseOpenAI(BaseLLM):
)
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [
prompts[i : i + self.batch_size]
prompts[i: i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
]
return sub_prompts
@ -469,7 +469,7 @@ class BaseOpenAI(BaseLLM):
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
sub_choices = choices[i * self.n : (i + 1) * self.n]
sub_choices = choices[i * self.n: (i + 1) * self.n]
generations.append(
[
Generation(

@ -30,6 +30,7 @@ class Fuyu:
"""
def __init__(
self,
pretrained_path: str = "adept/fuyu-8b",

@ -462,7 +462,7 @@ class BaseOpenAI(BaseLLM):
)
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [
prompts[i : i + self.batch_size]
prompts[i: i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
]
return sub_prompts
@ -473,7 +473,7 @@ class BaseOpenAI(BaseLLM):
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
sub_choices = choices[i * self.n : (i + 1) * self.n]
sub_choices = choices[i * self.n: (i + 1) * self.n]
generations.append(
[
Generation(

@ -125,7 +125,7 @@ class WebpageQATool(BaseTool):
results = []
# TODO: Handle this with a MapReduceChain
for i in range(0, len(web_docs), 4):
input_docs = web_docs[i : i + 4]
input_docs = web_docs[i: i + 4]
window_result = self.qa_chain(
{"input_documents": input_docs, "question": question},
return_only_outputs=True,

@ -306,7 +306,7 @@ class WriteCommand:
@staticmethod
def from_str(command: str) -> "WriteCommand":
filepath = command.split(WriteCommand.separator)[0]
return WriteCommand(filepath, command[len(filepath) + 1 :])
return WriteCommand(filepath, command[len(filepath) + 1:])
class CodeWriter:
@ -433,7 +433,7 @@ class ReadCommand:
if self.start == self.end:
code = code[self.start - 1]
else:
code = "".join(code[self.start - 1 : self.end])
code = "".join(code[self.start - 1: self.end])
return code
@staticmethod
@ -590,9 +590,9 @@ class PatchCommand:
lines[self.start.line] = (
lines[self.start.line][: self.start.col]
+ self.content
+ lines[self.end.line][self.end.col :]
+ lines[self.end.line][self.end.col:]
)
lines = lines[: self.start.line + 1] + lines[self.end.line + 1 :]
lines = lines[: self.start.line + 1] + lines[self.end.line + 1:]
after = self.write_lines(lines)

@ -577,6 +577,7 @@ class Tool(BaseTool):
**kwargs,
)
class EdgeGPTTool:
def __init__(self, model):
self.model = model
@ -584,6 +585,7 @@ class EdgeGPTTool:
def run(self, prompt):
return self.model.ask(prompt)
class StructuredTool(BaseTool):
"""Tool that can operate on any number of inputs."""
@ -850,10 +852,4 @@ def tool(
else:
raise ValueError("Too many arguments for tool decorator")
class EdgeGPTTool(BaseTool):
def __init__(self, model, name="EdgeGPTTool", description="Tool that uses EdgeGPTModel to generate responses"):
super().__init__(name=name, description=description)
self.model = model
def _run(self, prompt):
return self.model.__call__(prompt)

@ -365,7 +365,7 @@ class FileHandler:
try:
if url.startswith(os.environ.get("SERVER", "http://localhost:8000")):
local_filepath = url[
len(os.environ.get("SERVER", "http://localhost:8000")) + 1 :
len(os.environ.get("SERVER", "http://localhost:8000")) + 1:
]
local_filename = Path("file") / local_filepath.split("/")[-1]
src = self.path / local_filepath

Loading…
Cancel
Save