Former-commit-id: 0ab9725fe7
discord-bot-framework
Kye 1 year ago
parent 823d87f4e9
commit 4b95a5c8fb

@ -2,23 +2,23 @@ from swarms.workers import Worker
from swarms.agents.meta_prompter import MetaPrompterAgent
from langchain.llms import OpenAI
#init llm
# init llm
llm = OpenAI()
#init the meta prompter agent that optimized prompts
# init the meta prompter agent that optimized prompts
meta_optimizer = MetaPrompterAgent(llm=llm)
#init the worker agent
# init the worker agent
worker = Worker(llm)
#broad task to complete
# broad task to complete
task = "Create a feedforward in pytorch"
#optimize the prompt
optimized_prompt = meta_optimizer.run(task)
# optimize the prompt
optimized_prompt = meta_optimizer.run(task)
#run the optimized prompt with detailed instructions
# run the optimized prompt with detailed instructions
result = worker.run(optimized_prompt)
#print
print(result)
# print
print(result)

@ -977,7 +977,7 @@ class ConversableAgent(Agent):
)
elif lang in ["python", "Python"]:
if code.startswith("# filename: "):
filename = code[11: code.find("\n")].strip()
filename = code[11 : code.find("\n")].strip()
else:
filename = None
exitcode, logs, image = self.run_code(

@ -23,6 +23,6 @@ class Message:
def __repr__(self):
"""
__repr__ means...
__repr__ means...
"""
return f"{self.timestamp} - {self.sender}: {self.content}"

@ -38,7 +38,7 @@ def crop(image, target, region):
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target["masks"] = target["masks"][:, i: i + h, j: j + w]
target["masks"] = target["masks"][:, i : i + h, j : j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area

@ -159,7 +159,7 @@ class Backbone(BackboneBase):
), "Only resnet50 and resnet101 are available."
assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]
num_channels_all = [256, 512, 1024, 2048]
num_channels = num_channels_all[4 - len(return_interm_indices):]
num_channels = num_channels_all[4 - len(return_interm_indices) :]
super().__init__(backbone, train_backbone, num_channels, return_interm_indices)
@ -224,7 +224,7 @@ def build_backbone(args):
use_checkpoint=use_checkpoint,
)
bb_num_channels = backbone.num_features[4 - len(return_interm_indices):]
bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :]
else:
raise NotImplementedError("Unknown backbone {}".format(args.backbone))

@ -649,7 +649,7 @@ class SwinTransformer(nn.Module):
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]): sum(depths[: i_layer + 1])],
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
norm_layer=norm_layer,
# downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
downsample=downsamplelist[i_layer],

@ -221,9 +221,9 @@ def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer
position_ids[row, col] = 0
else:
attention_mask[
row, previous_col + 1: col + 1, previous_col + 1: col + 1
row, previous_col + 1 : col + 1, previous_col + 1 : col + 1
] = True
position_ids[row, previous_col + 1: col + 1] = torch.arange(
position_ids[row, previous_col + 1 : col + 1] = torch.arange(
0, col - previous_col, device=input_ids.device
)
@ -273,13 +273,13 @@ def generate_masks_with_special_tokens_and_transfer_map(
position_ids[row, col] = 0
else:
attention_mask[
row, previous_col + 1: col + 1, previous_col + 1: col + 1
row, previous_col + 1 : col + 1, previous_col + 1 : col + 1
] = True
position_ids[row, previous_col + 1: col + 1] = torch.arange(
position_ids[row, previous_col + 1 : col + 1] = torch.arange(
0, col - previous_col, device=input_ids.device
)
c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()
c2t_maski[previous_col + 1: col] = True
c2t_maski[previous_col + 1 : col] = True
cate_to_token_mask_list[row].append(c2t_maski)
previous_col = col

@ -76,7 +76,7 @@ def gen_encoder_output_proposals(
proposals = []
_cur = 0
for lvl, (H_, W_) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur: (_cur + H_ * W_)].view(
mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(
N_, H_, W_, 1
)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)

@ -619,7 +619,7 @@ def get_phrases_from_posmap(
):
assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor"
if posmap.dim() == 1:
posmap[0: left_idx + 1] = False
posmap[0 : left_idx + 1] = False
posmap[right_idx:] = False
non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()
token_ids = [tokenized["input_ids"][i] for i in non_zero_idx]

@ -41,7 +41,7 @@ def create_positive_map_from_span(tokenized, token_span, max_text_len=256):
positive_map[j, beg_pos] = 1
break
else:
positive_map[j, beg_pos: end_pos + 1].fill_(1)
positive_map[j, beg_pos : end_pos + 1].fill_(1)
return positive_map / (positive_map.sum(-1)[:, None] + 1e-6)

@ -139,7 +139,7 @@ class MaskDecoder(nn.Module):
# Run the transformer
hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, 0, :]
mask_tokens_out = hs[:, 1: (1 + self.num_mask_tokens), :]
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w)

@ -101,7 +101,7 @@ def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
), "Batched iteration must have inputs of all the same size."
n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
for b in range(n_batches):
yield [arg[b * batch_size: (b + 1) * batch_size] for arg in args]
yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
@ -142,7 +142,7 @@ def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
idx = 0
parity = False
for count in rle["counts"]:
mask[idx: idx + count] = parity
mask[idx : idx + count] = parity
idx += count
parity ^= True
mask = mask.reshape(w, h)

@ -207,12 +207,12 @@ def blend_gt2pt(old_image, new_image, sigma=0.15, steps=100):
kernel[steps:-steps, :steps] = left
kernel[steps:-steps, -steps:] = right
pt_gt_img = easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]]
pt_gt_img = easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]]
gaussian_gt_img = (
kernel * gt_img_array + (1 - kernel) * pt_gt_img
) # gt img with blur img
gaussian_gt_img = gaussian_gt_img.astype(np.int64)
easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]] = gaussian_gt_img
easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]] = gaussian_gt_img
gaussian_img = Image.fromarray(easy_img)
return gaussian_img

@ -317,7 +317,7 @@ def find_json(s):
s = s.replace("'", '"')
start = s.find("{")
end = s.rfind("}")
res = s[start: end + 1]
res = s[start : end + 1]
res = res.replace("\n", "")
return res

@ -347,7 +347,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special,
)
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j: j + self.embedding_ctx_length])
tokens.append(token[j : j + self.embedding_ctx_length])
indices.append(i)
batched_embeddings: List[List[float]] = []
@ -366,7 +366,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in _iter:
response = embed_with_retry(
self,
input=tokens[i: i + _chunk_size],
input=tokens[i : i + _chunk_size],
**self._invocation_params,
)
batched_embeddings.extend(r["embedding"] for r in response["data"])
@ -428,7 +428,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special,
)
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j: j + self.embedding_ctx_length])
tokens.append(token[j : j + self.embedding_ctx_length])
indices.append(i)
batched_embeddings: List[List[float]] = []
@ -436,7 +436,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in range(0, len(tokens), _chunk_size):
response = await async_embed_with_retry(
self,
input=tokens[i: i + _chunk_size],
input=tokens[i : i + _chunk_size],
**self._invocation_params,
)
batched_embeddings.extend(r["embedding"] for r in response["data"])

@ -458,7 +458,7 @@ class BaseOpenAI(BaseLLM):
)
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [
prompts[i: i + self.batch_size]
prompts[i : i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
]
return sub_prompts
@ -469,7 +469,7 @@ class BaseOpenAI(BaseLLM):
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
sub_choices = choices[i * self.n: (i + 1) * self.n]
sub_choices = choices[i * self.n : (i + 1) * self.n]
generations.append(
[
Generation(

@ -50,21 +50,21 @@ class Link(Chain):
prompt: BasePromptTemplate
"""Prompt object to use."""
llm: BaseLanguageModel
"""Language model to call."""
output_key: str = "text" #: :meta private:
output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser)
"""Output parser to use.
Defaults to one that takes the most likely string but does not change it
otherwise."""
return_final_only: bool = True
"""Whether to return only the final parsed result. Defaults to True.
If false, will return a bunch of extra information about the generation."""
llm_kwargs: dict = Field(default_factory=dict)
class Config:
@ -349,4 +349,4 @@ class Link(Chain):
def from_string(cls, llm: BaseLanguageModel, template: str) -> Link:
"""Create Link from LLM and template."""
prompt_template = PromptTemplate.from_template(template)
return cls(llm=llm, prompt=prompt_template)
return cls(llm=llm, prompt=prompt_template)

@ -11,7 +11,7 @@ class Workflow:
They string together multiple tasks of varying types, and can use Short-Term Memory
or pass specific arguments downstream.
Usage
llm = LLM()
workflow = Workflow(llm)
@ -47,11 +47,13 @@ class Workflow:
return response
def __init__(self, agent, parallel: bool = False):
"""__init__"""
self.agent = agent
self.tasks: List[Workflow.Task] = []
self.parallel = parallel
def add(self, task: str) -> Task:
"""Add a task"""
task = self.Task(task)
if self.last_task():
@ -62,12 +64,15 @@ class Workflow:
return task
def first_task(self) -> Optional[Task]:
"""Add first task"""
return self.tasks[0] if self.tasks else None
def last_task(self) -> Optional[Task]:
"""Last task"""
return self.tasks[-1] if self.tasks else None
def run(self, *args) -> Task:
"""Run tasks"""
[task.reset() for task in self.tasks]
if self.parallel:
@ -79,6 +84,7 @@ class Workflow:
return self.last_task()
def context(self, task: Task) -> Dict[str, Any]:
"""Context in tasks"""
return {
"parent_output": task.parents[0].output
if task.parents and task.parents[0].output
@ -88,6 +94,7 @@ class Workflow:
}
def __run_from_task(self, task: Optional[Task]) -> None:
"""Run from task"""
if task is None:
return
else:

@ -1,29 +1,30 @@
import interpreter
from transformers import (
BlipForQuestionAnswering,
BlipProcessor,
)
from PIL import Image
import torch
from swarms.utils.logger import logger
from pydantic import Field
from langchain.tools.file_management.write import WriteFileTool
from langchain.tools.file_management.read import ReadFileTool
from langchain.tools import BaseTool
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.qa_with_sources.loading import BaseCombineDocumentsChain
import asyncio
import os
# Tools
from contextlib import contextmanager
from typing import Optional
import interpreter
import pandas as pd
import torch
from langchain.agents import tool
from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
from langchain.chains.qa_with_sources.loading import (
BaseCombineDocumentsChain,
load_qa_with_sources_chain,
)
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.tools import BaseTool
from langchain.tools.file_management.read import ReadFileTool
from langchain.tools.file_management.write import WriteFileTool
from PIL import Image
from pydantic import Field
from transformers import (
BlipForQuestionAnswering,
BlipProcessor,
)
from swarms.utils.logger import logger
ROOT_DIR = "./data/"
@ -128,7 +129,7 @@ class WebpageQATool(BaseTool):
results = []
# TODO: Handle this with a MapReduceChain
for i in range(0, len(web_docs), 4):
input_docs = web_docs[i: i + 4]
input_docs = web_docs[i : i + 4]
window_result = self.qa_chain(
{"input_documents": input_docs, "question": question},
return_only_outputs=True,

@ -306,7 +306,7 @@ class WriteCommand:
@staticmethod
def from_str(command: str) -> "WriteCommand":
filepath = command.split(WriteCommand.separator)[0]
return WriteCommand(filepath, command[len(filepath) + 1:])
return WriteCommand(filepath, command[len(filepath) + 1 :])
class CodeWriter:
@ -433,7 +433,7 @@ class ReadCommand:
if self.start == self.end:
code = code[self.start - 1]
else:
code = "".join(code[self.start - 1: self.end])
code = "".join(code[self.start - 1 : self.end])
return code
@staticmethod
@ -590,9 +590,9 @@ class PatchCommand:
lines[self.start.line] = (
lines[self.start.line][: self.start.col]
+ self.content
+ lines[self.end.line][self.end.col:]
+ lines[self.end.line][self.end.col :]
)
lines = lines[: self.start.line + 1] + lines[self.end.line + 1:]
lines = lines[: self.start.line + 1] + lines[self.end.line + 1 :]
after = self.write_lines(lines)

@ -365,7 +365,7 @@ class FileHandler:
try:
if url.startswith(os.environ.get("SERVER", "http://localhost:8000")):
local_filepath = url[
len(os.environ.get("SERVER", "http://localhost:8000")) + 1:
len(os.environ.get("SERVER", "http://localhost:8000")) + 1 :
]
local_filename = Path("file") / local_filepath.split("/")[-1]
src = self.path / local_filepath

@ -1,18 +1,20 @@
from typing import Dict, List, Optional, Union
import faiss
from langchain.docstore import InMemoryDocstore
from langchain.embeddings import OpenAIEmbeddings
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores import FAISS
from langchain_experimental.autonomous_agents import AutoGPT
from typing import Dict, List, Optional, Union
from swarms.agents.message import Message
from swarms.tools.autogpt import (
ReadFileTool,
WebpageQATool,
WriteFileTool,
compile,
process_csv,
load_qa_with_sources_chain,
WebpageQATool,
process_csv,
)
from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator

Loading…
Cancel
Save