Former-commit-id: 6d7fca8402
discord-bot-framework
Kye 1 year ago
parent 7bb4cb67bb
commit 069b2aed45

@ -14,9 +14,11 @@ interpreter.api_key = os.getenv("OPENAI_API_KEY")
# interpreter.api_base = os.getenv("API_BASE") # interpreter.api_base = os.getenv("API_BASE")
# interpreter.auto_run = True # interpreter.auto_run = True
def split_text(text, chunk_size=1500): def split_text(text, chunk_size=1500):
######################################################################### #########################################################################
return [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)] return [text[i:i + chunk_size] for i in range(0, len(text), chunk_size)]
# discord initial # discord initial
intents = discord.Intents.all() intents = discord.Intents.all()
@ -28,6 +30,7 @@ send_image = False
model = whisper.load_model("base") model = whisper.load_model("base")
def transcribe(audio): def transcribe(audio):
# load audio and pad/trim it to fit 30 seconds # load audio and pad/trim it to fit 30 seconds
@ -45,12 +48,13 @@ def transcribe(audio):
result = whisper.decode(model, mel, options) result = whisper.decode(model, mel, options)
return result.text return result.text
@client.event @client.event
async def on_message(message): async def on_message(message):
await client.process_commands(message) await client.process_commands(message)
bot_mention = f"<@{bot_id}>" bot_mention = f"<@{bot_id}>"
# if ("<@1158923910855798804>" in message.content) or (message.author == client.user or message.content[0] == '$'): # if ("<@1158923910855798804>" in message.content) or (message.author == client.user or message.content[0] == '$'):
# return # return
response = [] response = []
for chunk in interpreter.chat(message.content, display=False, stream=False): for chunk in interpreter.chat(message.content, display=False, stream=False):
# await message.channel.send(chunk) # await message.channel.send(chunk)

@ -13,7 +13,6 @@ dotenv.load_dotenv(".env")
interpreter.auto_run = True interpreter.auto_run = True
set_api_key("ELEVEN_LABS_API_KEY") set_api_key("ELEVEN_LABS_API_KEY")

@ -1,6 +1,6 @@
from swarms.models.bing_chat import EdgeGPTModel from swarms.models.bing_chat import EdgeGPTModel
from swarms.workers.worker import Worker from swarms.workers.worker import Worker
from swarms.tools.tool import EdgeGPTTool from swarms.tools.tool import EdgeGPTTool
# Initialize the EdgeGPTModel # Initialize the EdgeGPTModel
edgegpt = EdgeGPTModel(cookies_path="./cookies.txt") edgegpt = EdgeGPTModel(cookies_path="./cookies.txt")

@ -2,5 +2,3 @@ from swarms.models import Fuyu
fuyu = Fuyu() fuyu = Fuyu()
fuyu("Hello, my name is", "images/github-banner-swarms.png") fuyu("Hello, my name is", "images/github-banner-swarms.png")

@ -133,7 +133,7 @@ def generate_character_description(character_name):
player_descriptor_system_message, player_descriptor_system_message,
HumanMessage( HumanMessage(
content=f"""{game_description} content=f"""{game_description}
Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities. Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities.
Speak directly to {character_name}. Speak directly to {character_name}.
Do not add anything else.""" Do not add anything else."""
), ),
@ -250,12 +250,12 @@ topic_specifier_prompt = [
SystemMessage(content="You can make a task more specific."), SystemMessage(content="You can make a task more specific."),
HumanMessage( HumanMessage(
content=f"""{game_description} content=f"""{game_description}
You are the debate moderator. You are the debate moderator.
Please make the debate topic more specific. Please make the debate topic more specific.
Frame the debate topic as a problem to be solved. Frame the debate topic as a problem to be solved.
Be creative and imaginative. Be creative and imaginative.
Please reply with the specified topic in {word_limit} words or less. Please reply with the specified topic in {word_limit} words or less.
Speak directly to the presidential candidates: {*character_names,}. Speak directly to the presidential candidates: {*character_names,}.
Do not add anything else.""" Do not add anything else."""
), ),

@ -9,12 +9,12 @@ swarm = HierarchicalSwarm(openai_api_key=api_key)
# Define an objective # Define an objective
objective = """ objective = """
Please make a web GUI for using HTTP API server. Please make a web GUI for using HTTP API server.
The name of it is HierarchicalSwarm. The name of it is HierarchicalSwarm.
You can check the server code at ./main.py. You can check the server code at ./main.py.
The server is served on localhost:8000. The server is served on localhost:8000.
Users should be able to write text input as 'query' and url array as 'files', and check the response. Users should be able to write text input as 'query' and url array as 'files', and check the response.
Users input form should be delivered in JSON format. Users input form should be delivered in JSON format.
I want it to have neumorphism-style. Serve it on port 4500. I want it to have neumorphism-style. Serve it on port 4500.
""" """

@ -8,10 +8,10 @@ swarm = HierarchicalSwarm(openai_api_key=api_key)
# Define an objective # Define an objective
objective = """ objective = """
Please develop and serve a simple community web service. Please develop and serve a simple community web service.
People can signup, login, post, comment. People can signup, login, post, comment.
Post and comment should be visible at once. Post and comment should be visible at once.
I want it to have neumorphism-style. I want it to have neumorphism-style.
The ports you can use are 4500 and 6500. The ports you can use are 4500 and 6500.
""" """

@ -9,9 +9,9 @@ swarm = HierarchicalSwarm(openai_api_key=api_key)
# Define an objective # Define an objective
objective = """ objective = """
Please develop and serve a simple web TODO app. Please develop and serve a simple web TODO app.
The user can list all TODO items and add or delete each TODO item. The user can list all TODO items and add or delete each TODO item.
I want it to have neumorphism-style. I want it to have neumorphism-style.
The ports you can use are 4500 and 6500. The ports you can use are 4500 and 6500.
""" """

@ -2,12 +2,12 @@ from swarms import WorkerUltraUltraNode
# Define an objective # Define an objective
objective = """ objective = """
Please make a web GUI for using HTTP API server. Please make a web GUI for using HTTP API server.
The name of it is Swarms. The name of it is Swarms.
You can check the server code at ./main.py. You can check the server code at ./main.py.
The server is served on localhost:8000. The server is served on localhost:8000.
Users should be able to write text input as 'query' and url array as 'files', and check the response. Users should be able to write text input as 'query' and url array as 'files', and check the response.
Users input form should be delivered in JSON format. Users input form should be delivered in JSON format.
I want it to have neumorphism-style. Serve it on port 4500. I want it to have neumorphism-style. Serve it on port 4500.
""" """

@ -1,22 +1,20 @@
from swarms import workers
from swarms.workers.worker import Worker
from swarms.chunkers import chunkers
from swarms import models
from swarms import structs
from swarms import swarms
from swarms.swarms.orchestrate import Orchestrator
from swarms import agents
from swarms.logo import logo
import os
import warnings import warnings
warnings.filterwarnings("ignore", category=UserWarning) warnings.filterwarnings("ignore", category=UserWarning)
# disable tensorflow warnings # disable tensorflow warnings
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from swarms.logo import logo
print(logo) print(logo)
from swarms import agents
from swarms.swarms.orchestrate import Orchestrator
from swarms import swarms
from swarms import structs
from swarms import models
from swarms.chunkers import chunkers
from swarms.workers.worker import Worker
from swarms import workers

@ -1,12 +1,12 @@
"""Agent Infrastructure, models, memory, utils, tools"""
"""Agent Infrastructure, models, memory, utils, tools"""
from swarms.agents.omni_modal_agent import OmniModalAgent from swarms.agents.omni_modal_agent import OmniModalAgent
from swarms.agents.hf_agents import HFAgent from swarms.agents.hf_agents import HFAgent
# utils
from swarms.agents.message import Message from swarms.agents.message import Message
from swarms.agents.stream_response import stream from swarms.agents.stream_response import stream
from swarms.agents.base import AbstractAgent from swarms.agents.base import AbstractAgent
from swarms.agents.registry import Registry from swarms.agents.registry import Registry
from swarms.agents.idea_to_image_agent import Idea2Image from swarms.agents.idea_to_image_agent import Idea2Image
"""Agent Infrastructure, models, memory, utils, tools"""
"""Agent Infrastructure, models, memory, utils, tools"""
# utils

@ -53,7 +53,7 @@ class Idea2Image:
def llm_prompt(self): def llm_prompt(self):
LLM_PROMPT = f""" LLM_PROMPT = f"""
Refine the USER prompt to create a more precise image tailored to the user's needs using Refine the USER prompt to create a more precise image tailored to the user's needs using
an image generator like DALLE-3. an image generator like DALLE-3.
###### FOLLOW THE GUIDE BELOW TO REFINE THE PROMPT ###### ###### FOLLOW THE GUIDE BELOW TO REFINE THE PROMPT ######
@ -61,7 +61,7 @@ class Idea2Image:
- Frame your photographic prompts like camera position, lighting, film type, year, usage context. This implicitly suggests image qualities. - Frame your photographic prompts like camera position, lighting, film type, year, usage context. This implicitly suggests image qualities.
- For illustrations, you can borrow photographic terms like "close up" and prompt for media, style, artist, animation style, etc. - For illustrations, you can borrow photographic terms like "close up" and prompt for media, style, artist, animation style, etc.
- Prompt hack: name a film/TV show genre + year to "steal the look" for costumes, lighting, etc without knowing technical details. - Prompt hack: name a film/TV show genre + year to "steal the look" for costumes, lighting, etc without knowing technical details.

@ -108,7 +108,7 @@ class MetaPrompterAgent:
def get_new_instructions(self, meta_output): def get_new_instructions(self, meta_output):
"""Get New Instructions from the meta_output""" """Get New Instructions from the meta_output"""
delimiter = "Instructions: " delimiter = "Instructions: "
new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter) :] new_instructions = meta_output[meta_output.find(delimiter) + len(delimiter):]
return new_instructions return new_instructions
def run(self, task: str): def run(self, task: str):

@ -207,12 +207,12 @@ def blend_gt2pt(old_image, new_image, sigma=0.15, steps=100):
kernel[steps:-steps, :steps] = left kernel[steps:-steps, :steps] = left
kernel[steps:-steps, -steps:] = right kernel[steps:-steps, -steps:] = right
pt_gt_img = easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]] pt_gt_img = easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]]
gaussian_gt_img = ( gaussian_gt_img = (
kernel * gt_img_array + (1 - kernel) * pt_gt_img kernel * gt_img_array + (1 - kernel) * pt_gt_img
) # gt img with blur img ) # gt img with blur img
gaussian_gt_img = gaussian_gt_img.astype(np.int64) gaussian_gt_img = gaussian_gt_img.astype(np.int64)
easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]] = gaussian_gt_img easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]] = gaussian_gt_img
gaussian_img = Image.fromarray(easy_img) gaussian_img = Image.fromarray(easy_img)
return gaussian_img return gaussian_img

@ -58,7 +58,7 @@ class BaseChunker(ABC):
half_token_count = token_count // 2 half_token_count = token_count // 2
if current_separator: if current_separator:
separators = self.separators[self.separators.index(current_separator) :] separators = self.separators[self.separators.index(current_separator):]
else: else:
separators = self.separators separators = self.separators
@ -84,7 +84,7 @@ class BaseChunker(ABC):
subchanks[: balance_index + 1] subchanks[: balance_index + 1]
) )
second_subchunk = separator.value + separator.value.join( second_subchunk = separator.value + separator.value.join(
subchanks[balance_index + 1 :] subchanks[balance_index + 1:]
) )
else: else:
first_subchunk = ( first_subchunk = (
@ -92,7 +92,7 @@ class BaseChunker(ABC):
+ separator.value + separator.value
) )
second_subchunk = separator.value.join( second_subchunk = separator.value.join(
subchanks[balance_index + 1 :] subchanks[balance_index + 1:]
) )
first_subchunk_rec = self._chunk_recursively( first_subchunk_rec = self._chunk_recursively(

@ -347,7 +347,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special, disallowed_special=self.disallowed_special,
) )
for j in range(0, len(token), self.embedding_ctx_length): for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j : j + self.embedding_ctx_length]) tokens.append(token[j: j + self.embedding_ctx_length])
indices.append(i) indices.append(i)
batched_embeddings: List[List[float]] = [] batched_embeddings: List[List[float]] = []
@ -366,7 +366,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in _iter: for i in _iter:
response = embed_with_retry( response = embed_with_retry(
self, self,
input=tokens[i : i + _chunk_size], input=tokens[i: i + _chunk_size],
**self._invocation_params, **self._invocation_params,
) )
batched_embeddings.extend(r["embedding"] for r in response["data"]) batched_embeddings.extend(r["embedding"] for r in response["data"])
@ -428,7 +428,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special, disallowed_special=self.disallowed_special,
) )
for j in range(0, len(token), self.embedding_ctx_length): for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j : j + self.embedding_ctx_length]) tokens.append(token[j: j + self.embedding_ctx_length])
indices.append(i) indices.append(i)
batched_embeddings: List[List[float]] = [] batched_embeddings: List[List[float]] = []
@ -436,7 +436,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in range(0, len(tokens), _chunk_size): for i in range(0, len(tokens), _chunk_size):
response = await async_embed_with_retry( response = await async_embed_with_retry(
self, self,
input=tokens[i : i + _chunk_size], input=tokens[i: i + _chunk_size],
**self._invocation_params, **self._invocation_params,
) )
batched_embeddings.extend(r["embedding"] for r in response["data"]) batched_embeddings.extend(r["embedding"] for r in response["data"])

@ -4,8 +4,7 @@ from swarms.models.mistral import Mistral
from swarms.models.openai_models import OpenAI, AzureOpenAI, OpenAIChat from swarms.models.openai_models import OpenAI, AzureOpenAI, OpenAIChat
# MultiModal Models # MultiModal Models
from swarms.models.idefics import Idefics from swarms.models.idefics import Idefics
from swarms.models.kosmos_two import Kosmos from swarms.models.kosmos_two import Kosmos
from swarms.models.vilt import Vilt from swarms.models.vilt import Vilt

@ -6,6 +6,7 @@ from EdgeGPT.EdgeUtils import ImageQuery, Query, Cookie
from EdgeGPT.ImageGen import ImageGen from EdgeGPT.ImageGen import ImageGen
from pathlib import Path from pathlib import Path
class BingChat: class BingChat:
""" """
EdgeGPT model by OpenAI EdgeGPT model by OpenAI
@ -14,7 +15,7 @@ class BingChat:
---------- ----------
cookies_path : str cookies_path : str
Path to the cookies.json necessary for authenticating with EdgeGPT Path to the cookies.json necessary for authenticating with EdgeGPT
Examples Examples
-------- --------
>>> edgegpt = BingChat(cookies_path="./path/to/cookies.json") >>> edgegpt = BingChat(cookies_path="./path/to/cookies.json")
@ -22,11 +23,11 @@ class BingChat:
>>> image_path = edgegpt.create_img("Sunset over mountains") >>> image_path = edgegpt.create_img("Sunset over mountains")
""" """
def __init__(self, cookies_path: str): def __init__(self, cookies_path: str):
self.cookies = json.loads(open(cookies_path, encoding="utf-8").read()) self.cookies = json.loads(open(cookies_path, encoding="utf-8").read())
self.bot = asyncio.run(Chatbot.create(cookies=self.cookies)) self.bot = asyncio.run(Chatbot.create(cookies=self.cookies))
def __call__(self, prompt: str, style: ConversationStyle = ConversationStyle.creative) -> str: def __call__(self, prompt: str, style: ConversationStyle = ConversationStyle.creative) -> str:
""" """
Get a text response using the EdgeGPT model based on the provided prompt. Get a text response using the EdgeGPT model based on the provided prompt.
@ -41,11 +42,11 @@ class BingChat:
""" """
if not auth_cookie: if not auth_cookie:
raise ValueError("Auth cookie is required for image generation.") raise ValueError("Auth cookie is required for image generation.")
image_generator = ImageGen(auth_cookie, quiet=True) image_generator = ImageGen(auth_cookie, quiet=True)
images = image_generator.get_images(prompt) images = image_generator.get_images(prompt)
image_generator.save_images(images, output_dir=output_dir) image_generator.save_images(images, output_dir=output_dir)
return Path(output_dir) / images[0]['path'] return Path(output_dir) / images[0]['path']
@staticmethod @staticmethod
@ -53,4 +54,4 @@ class BingChat:
""" """
Set the directory path for managing cookies. Set the directory path for managing cookies.
""" """
Cookie.dir_path = Path(path) Cookie.dir_path = Path(path)

@ -458,7 +458,7 @@ class BaseOpenAI(BaseLLM):
) )
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [ sub_prompts = [
prompts[i : i + self.batch_size] prompts[i: i + self.batch_size]
for i in range(0, len(prompts), self.batch_size) for i in range(0, len(prompts), self.batch_size)
] ]
return sub_prompts return sub_prompts
@ -469,7 +469,7 @@ class BaseOpenAI(BaseLLM):
"""Create the LLMResult from the choices and prompts.""" """Create the LLMResult from the choices and prompts."""
generations = [] generations = []
for i, _ in enumerate(prompts): for i, _ in enumerate(prompts):
sub_choices = choices[i * self.n : (i + 1) * self.n] sub_choices = choices[i * self.n: (i + 1) * self.n]
generations.append( generations.append(
[ [
Generation( Generation(

@ -21,15 +21,16 @@ class Fuyu:
Device to use for the model Device to use for the model
max_new_tokens : int max_new_tokens : int
Maximum number of tokens to generate Maximum number of tokens to generate
Examples Examples
-------- --------
>>> fuyu = Fuyu() >>> fuyu = Fuyu()
>>> fuyu("Hello, my name is", "path/to/image.png") >>> fuyu("Hello, my name is", "path/to/image.png")
""" """
def __init__( def __init__(
self, self,
pretrained_path: str = "adept/fuyu-8b", pretrained_path: str = "adept/fuyu-8b",

@ -20,7 +20,7 @@ class Kosmos:
""" """
Args: Args:
# Initialize Kosmos # Initialize Kosmos

@ -196,14 +196,14 @@ class BaseOpenAI(BaseLLM):
disallowed_special: Union[Literal["all"], Collection[str]] = "all" disallowed_special: Union[Literal["all"], Collection[str]] = "all"
"""Set of special tokens that are not allowed。""" """Set of special tokens that are not allowed。"""
tiktoken_model_name: Optional[str] = None tiktoken_model_name: Optional[str] = None
"""The model name to pass to tiktoken when using this class. """The model name to pass to tiktoken when using this class.
Tiktoken is used to count the number of tokens in documents to constrain Tiktoken is used to count the number of tokens in documents to constrain
them to be under a certain limit. By default, when set to None, this will them to be under a certain limit. By default, when set to None, this will
be the same as the embedding model name. However, there are some cases be the same as the embedding model name. However, there are some cases
where you may want to use this Embedding class with a model name not where you may want to use this Embedding class with a model name not
supported by tiktoken. This can include when using Azure embeddings or supported by tiktoken. This can include when using Azure embeddings or
when using one of the many model providers that expose an OpenAI-like when using one of the many model providers that expose an OpenAI-like
API but with different models. In those cases, in order to avoid erroring API but with different models. In those cases, in order to avoid erroring
when tiktoken is called, you can specify a model name to use here.""" when tiktoken is called, you can specify a model name to use here."""
def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # type: ignore
@ -462,7 +462,7 @@ class BaseOpenAI(BaseLLM):
) )
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [ sub_prompts = [
prompts[i : i + self.batch_size] prompts[i: i + self.batch_size]
for i in range(0, len(prompts), self.batch_size) for i in range(0, len(prompts), self.batch_size)
] ]
return sub_prompts return sub_prompts
@ -473,7 +473,7 @@ class BaseOpenAI(BaseLLM):
"""Create the LLMResult from the choices and prompts.""" """Create the LLMResult from the choices and prompts."""
generations = [] generations = []
for i, _ in enumerate(prompts): for i, _ in enumerate(prompts):
sub_choices = choices[i * self.n : (i + 1) * self.n] sub_choices = choices[i * self.n: (i + 1) * self.n]
generations.append( generations.append(
[ [
Generation( Generation(

@ -6,10 +6,10 @@ from PIL import Image
class Vilt: class Vilt:
""" """
Vision-and-Language Transformer (ViLT) model fine-tuned on VQAv2. Vision-and-Language Transformer (ViLT) model fine-tuned on VQAv2.
It was introduced in the paper ViLT: Vision-and-Language Transformer Without It was introduced in the paper ViLT: Vision-and-Language Transformer Without
Convolution or Region Supervision by Kim et al. and first released in this repository. Convolution or Region Supervision by Kim et al. and first released in this repository.
Disclaimer: The team releasing ViLT did not write a model card for this model Disclaimer: The team releasing ViLT did not write a model card for this model
so this model card has been written by the Hugging Face team. so this model card has been written by the Hugging Face team.
https://huggingface.co/dandelin/vilt-b32-finetuned-vqa https://huggingface.co/dandelin/vilt-b32-finetuned-vqa
@ -37,7 +37,7 @@ class Vilt:
Args: Args:
text: str text: str
""" """
# Download the image # Download the image
image = Image.open(requests.get(image_url, stream=True).raw) image = Image.open(requests.get(image_url, stream=True).raw)

@ -1,10 +1,10 @@
def documentation(task: str): def documentation(task: str):
documentation = f"""Create multi-page long and explicit professional pytorch-like documentation for the <MODULE> code below follow the outline for the <MODULE> library, documentation = f"""Create multi-page long and explicit professional pytorch-like documentation for the <MODULE> code below follow the outline for the <MODULE> library,
provide many examples and teach the user about the code, provide examples for every function, make the documentation 10,000 words, provide many examples and teach the user about the code, provide examples for every function, make the documentation 10,000 words,
provide many usage examples and note this is markdown docs, create the documentation for the code to document, provide many usage examples and note this is markdown docs, create the documentation for the code to document,
put the arguments and methods in a table in markdown to make it visually seamless put the arguments and methods in a table in markdown to make it visually seamless
Now make the professional documentation for this code, provide the architecture and how the class works and why it works that way, Now make the professional documentation for this code, provide the architecture and how the class works and why it works that way,
it's purpose, provide args, their types, 3 ways of usage examples, in examples show all the code like imports main example etc it's purpose, provide args, their types, 3 ways of usage examples, in examples show all the code like imports main example etc
BE VERY EXPLICIT AND THOROUGH, MAKE IT DEEP AND USEFUL BE VERY EXPLICIT AND THOROUGH, MAKE IT DEEP AND USEFUL
@ -45,7 +45,7 @@ def documentation(task: str):
class torch.nn.MultiheadAttention(embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None, batch_first=False, device=None, dtype=None): class torch.nn.MultiheadAttention(embed_dim, num_heads, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None, batch_first=False, device=None, dtype=None):
``` ```
Creates a multi-head attention module for joint information representation from the different subspaces. Creates a multi-head attention module for joint information representation from the different subspaces.
Parameters: Parameters:
- embed_dim (int): Total dimension of the model. - embed_dim (int): Total dimension of the model.
- num_heads (int): Number of parallel attention heads. The embed_dim will be split across num_heads. - num_heads (int): Number of parallel attention heads. The embed_dim will be split across num_heads.
@ -70,7 +70,7 @@ def documentation(task: str):
- value (Tensor): Value embeddings of shape (S, E_v) for unbatched input, (S, N, E_v) when batch_first=False, or (N, S, E_v) when batch_first=True. - value (Tensor): Value embeddings of shape (S, E_v) for unbatched input, (S, N, E_v) when batch_first=False, or (N, S, E_v) when batch_first=True.
- key_padding_mask (Optional[Tensor]): If specified, a mask indicating elements to be ignored in key for attention computation. - key_padding_mask (Optional[Tensor]): If specified, a mask indicating elements to be ignored in key for attention computation.
- need_weights (bool): If specified, returns attention weights in addition to attention outputs. Default: True. - need_weights (bool): If specified, returns attention weights in addition to attention outputs. Default: True.
- attn_mask (Optional[Tensor]): If specified, a mask preventing attention to certain positions. - attn_mask (Optional[Tensor]): If specified, a mask preventing attention to certain positions.
- average_attn_weights (bool): If true, returns averaged attention weights per head. Otherwise, returns attention weights separately per head. Note that this flag only has an effect when need_weights=True. Default: True. - average_attn_weights (bool): If true, returns averaged attention weights per head. Otherwise, returns attention weights separately per head. Note that this flag only has an effect when need_weights=True. Default: True.
- is_causal (bool): If specified, applies a causal mask as the attention mask. Default: False. - is_causal (bool): If specified, applies a causal mask as the attention mask. Default: False.

@ -125,7 +125,7 @@ class WebpageQATool(BaseTool):
results = [] results = []
# TODO: Handle this with a MapReduceChain # TODO: Handle this with a MapReduceChain
for i in range(0, len(web_docs), 4): for i in range(0, len(web_docs), 4):
input_docs = web_docs[i : i + 4] input_docs = web_docs[i: i + 4]
window_result = self.qa_chain( window_result = self.qa_chain(
{"input_documents": input_docs, "question": question}, {"input_documents": input_docs, "question": question},
return_only_outputs=True, return_only_outputs=True,

@ -306,7 +306,7 @@ class WriteCommand:
@staticmethod @staticmethod
def from_str(command: str) -> "WriteCommand": def from_str(command: str) -> "WriteCommand":
filepath = command.split(WriteCommand.separator)[0] filepath = command.split(WriteCommand.separator)[0]
return WriteCommand(filepath, command[len(filepath) + 1 :]) return WriteCommand(filepath, command[len(filepath) + 1:])
class CodeWriter: class CodeWriter:
@ -433,7 +433,7 @@ class ReadCommand:
if self.start == self.end: if self.start == self.end:
code = code[self.start - 1] code = code[self.start - 1]
else: else:
code = "".join(code[self.start - 1 : self.end]) code = "".join(code[self.start - 1: self.end])
return code return code
@staticmethod @staticmethod
@ -590,9 +590,9 @@ class PatchCommand:
lines[self.start.line] = ( lines[self.start.line] = (
lines[self.start.line][: self.start.col] lines[self.start.line][: self.start.col]
+ self.content + self.content
+ lines[self.end.line][self.end.col :] + lines[self.end.line][self.end.col:]
) )
lines = lines[: self.start.line + 1] + lines[self.end.line + 1 :] lines = lines[: self.start.line + 1] + lines[self.end.line + 1:]
after = self.write_lines(lines) after = self.write_lines(lines)

@ -576,7 +576,8 @@ class Tool(BaseTool):
args_schema=args_schema, args_schema=args_schema,
**kwargs, **kwargs,
) )
class EdgeGPTTool: class EdgeGPTTool:
def __init__(self, model): def __init__(self, model):
self.model = model self.model = model
@ -584,6 +585,7 @@ class EdgeGPTTool:
def run(self, prompt): def run(self, prompt):
return self.model.ask(prompt) return self.model.ask(prompt)
class StructuredTool(BaseTool): class StructuredTool(BaseTool):
"""Tool that can operate on any number of inputs.""" """Tool that can operate on any number of inputs."""
@ -850,10 +852,4 @@ def tool(
else: else:
raise ValueError("Too many arguments for tool decorator") raise ValueError("Too many arguments for tool decorator")
class EdgeGPTTool(BaseTool):
def __init__(self, model, name="EdgeGPTTool", description="Tool that uses EdgeGPTModel to generate responses"):
super().__init__(name=name, description=description)
self.model = model
def _run(self, prompt):
return self.model.__call__(prompt)

@ -365,7 +365,7 @@ class FileHandler:
try: try:
if url.startswith(os.environ.get("SERVER", "http://localhost:8000")): if url.startswith(os.environ.get("SERVER", "http://localhost:8000")):
local_filepath = url[ local_filepath = url[
len(os.environ.get("SERVER", "http://localhost:8000")) + 1 : len(os.environ.get("SERVER", "http://localhost:8000")) + 1:
] ]
local_filename = Path("file") / local_filepath.split("/")[-1] local_filename = Path("file") / local_filepath.split("/")[-1]
src = self.path / local_filepath src = self.path / local_filepath

Loading…
Cancel
Save