pull/59/head
Kye 1 year ago
parent 6aa7b00bb8
commit 0ab9725fe7

@ -2,23 +2,23 @@ from swarms.workers import Worker
from swarms.agents.meta_prompter import MetaPrompterAgent from swarms.agents.meta_prompter import MetaPrompterAgent
from langchain.llms import OpenAI from langchain.llms import OpenAI
#init llm # init llm
llm = OpenAI() llm = OpenAI()
#init the meta prompter agent that optimized prompts # init the meta prompter agent that optimized prompts
meta_optimizer = MetaPrompterAgent(llm=llm) meta_optimizer = MetaPrompterAgent(llm=llm)
#init the worker agent # init the worker agent
worker = Worker(llm) worker = Worker(llm)
#broad task to complete # broad task to complete
task = "Create a feedforward in pytorch" task = "Create a feedforward in pytorch"
#optimize the prompt # optimize the prompt
optimized_prompt = meta_optimizer.run(task) optimized_prompt = meta_optimizer.run(task)
#run the optimized prompt with detailed instructions # run the optimized prompt with detailed instructions
result = worker.run(optimized_prompt) result = worker.run(optimized_prompt)
#print # print
print(result) print(result)

@ -977,7 +977,7 @@ class ConversableAgent(Agent):
) )
elif lang in ["python", "Python"]: elif lang in ["python", "Python"]:
if code.startswith("# filename: "): if code.startswith("# filename: "):
filename = code[11: code.find("\n")].strip() filename = code[11 : code.find("\n")].strip()
else: else:
filename = None filename = None
exitcode, logs, image = self.run_code( exitcode, logs, image = self.run_code(

@ -23,6 +23,6 @@ class Message:
def __repr__(self): def __repr__(self):
""" """
__repr__ means... __repr__ means...
""" """
return f"{self.timestamp} - {self.sender}: {self.content}" return f"{self.timestamp} - {self.sender}: {self.content}"

@ -38,7 +38,7 @@ def crop(image, target, region):
if "masks" in target: if "masks" in target:
# FIXME should we update the area here if there are no boxes? # FIXME should we update the area here if there are no boxes?
target["masks"] = target["masks"][:, i: i + h, j: j + w] target["masks"] = target["masks"][:, i : i + h, j : j + w]
fields.append("masks") fields.append("masks")
# remove elements for which the boxes or masks that have zero area # remove elements for which the boxes or masks that have zero area

@ -159,7 +159,7 @@ class Backbone(BackboneBase):
), "Only resnet50 and resnet101 are available." ), "Only resnet50 and resnet101 are available."
assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]] assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]
num_channels_all = [256, 512, 1024, 2048] num_channels_all = [256, 512, 1024, 2048]
num_channels = num_channels_all[4 - len(return_interm_indices):] num_channels = num_channels_all[4 - len(return_interm_indices) :]
super().__init__(backbone, train_backbone, num_channels, return_interm_indices) super().__init__(backbone, train_backbone, num_channels, return_interm_indices)
@ -224,7 +224,7 @@ def build_backbone(args):
use_checkpoint=use_checkpoint, use_checkpoint=use_checkpoint,
) )
bb_num_channels = backbone.num_features[4 - len(return_interm_indices):] bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :]
else: else:
raise NotImplementedError("Unknown backbone {}".format(args.backbone)) raise NotImplementedError("Unknown backbone {}".format(args.backbone))

@ -649,7 +649,7 @@ class SwinTransformer(nn.Module):
qk_scale=qk_scale, qk_scale=qk_scale,
drop=drop_rate, drop=drop_rate,
attn_drop=attn_drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]): sum(depths[: i_layer + 1])], drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
norm_layer=norm_layer, norm_layer=norm_layer,
# downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, # downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
downsample=downsamplelist[i_layer], downsample=downsamplelist[i_layer],

@ -221,9 +221,9 @@ def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer
position_ids[row, col] = 0 position_ids[row, col] = 0
else: else:
attention_mask[ attention_mask[
row, previous_col + 1: col + 1, previous_col + 1: col + 1 row, previous_col + 1 : col + 1, previous_col + 1 : col + 1
] = True ] = True
position_ids[row, previous_col + 1: col + 1] = torch.arange( position_ids[row, previous_col + 1 : col + 1] = torch.arange(
0, col - previous_col, device=input_ids.device 0, col - previous_col, device=input_ids.device
) )
@ -273,13 +273,13 @@ def generate_masks_with_special_tokens_and_transfer_map(
position_ids[row, col] = 0 position_ids[row, col] = 0
else: else:
attention_mask[ attention_mask[
row, previous_col + 1: col + 1, previous_col + 1: col + 1 row, previous_col + 1 : col + 1, previous_col + 1 : col + 1
] = True ] = True
position_ids[row, previous_col + 1: col + 1] = torch.arange( position_ids[row, previous_col + 1 : col + 1] = torch.arange(
0, col - previous_col, device=input_ids.device 0, col - previous_col, device=input_ids.device
) )
c2t_maski = torch.zeros((num_token), device=input_ids.device).bool() c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()
c2t_maski[previous_col + 1: col] = True c2t_maski[previous_col + 1 : col] = True
cate_to_token_mask_list[row].append(c2t_maski) cate_to_token_mask_list[row].append(c2t_maski)
previous_col = col previous_col = col

@ -76,7 +76,7 @@ def gen_encoder_output_proposals(
proposals = [] proposals = []
_cur = 0 _cur = 0
for lvl, (H_, W_) in enumerate(spatial_shapes): for lvl, (H_, W_) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur: (_cur + H_ * W_)].view( mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(
N_, H_, W_, 1 N_, H_, W_, 1
) )
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)

@ -619,7 +619,7 @@ def get_phrases_from_posmap(
): ):
assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor" assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor"
if posmap.dim() == 1: if posmap.dim() == 1:
posmap[0: left_idx + 1] = False posmap[0 : left_idx + 1] = False
posmap[right_idx:] = False posmap[right_idx:] = False
non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist() non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()
token_ids = [tokenized["input_ids"][i] for i in non_zero_idx] token_ids = [tokenized["input_ids"][i] for i in non_zero_idx]

@ -41,7 +41,7 @@ def create_positive_map_from_span(tokenized, token_span, max_text_len=256):
positive_map[j, beg_pos] = 1 positive_map[j, beg_pos] = 1
break break
else: else:
positive_map[j, beg_pos: end_pos + 1].fill_(1) positive_map[j, beg_pos : end_pos + 1].fill_(1)
return positive_map / (positive_map.sum(-1)[:, None] + 1e-6) return positive_map / (positive_map.sum(-1)[:, None] + 1e-6)

@ -139,7 +139,7 @@ class MaskDecoder(nn.Module):
# Run the transformer # Run the transformer
hs, src = self.transformer(src, pos_src, tokens) hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, 0, :] iou_token_out = hs[:, 0, :]
mask_tokens_out = hs[:, 1: (1 + self.num_mask_tokens), :] mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens # Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w) src = src.transpose(1, 2).view(b, c, h, w)

@ -101,7 +101,7 @@ def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
), "Batched iteration must have inputs of all the same size." ), "Batched iteration must have inputs of all the same size."
n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
for b in range(n_batches): for b in range(n_batches):
yield [arg[b * batch_size: (b + 1) * batch_size] for arg in args] yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]: def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
@ -142,7 +142,7 @@ def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
idx = 0 idx = 0
parity = False parity = False
for count in rle["counts"]: for count in rle["counts"]:
mask[idx: idx + count] = parity mask[idx : idx + count] = parity
idx += count idx += count
parity ^= True parity ^= True
mask = mask.reshape(w, h) mask = mask.reshape(w, h)

@ -207,12 +207,12 @@ def blend_gt2pt(old_image, new_image, sigma=0.15, steps=100):
kernel[steps:-steps, :steps] = left kernel[steps:-steps, :steps] = left
kernel[steps:-steps, -steps:] = right kernel[steps:-steps, -steps:] = right
pt_gt_img = easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]] pt_gt_img = easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]]
gaussian_gt_img = ( gaussian_gt_img = (
kernel * gt_img_array + (1 - kernel) * pt_gt_img kernel * gt_img_array + (1 - kernel) * pt_gt_img
) # gt img with blur img ) # gt img with blur img
gaussian_gt_img = gaussian_gt_img.astype(np.int64) gaussian_gt_img = gaussian_gt_img.astype(np.int64)
easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]] = gaussian_gt_img easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]] = gaussian_gt_img
gaussian_img = Image.fromarray(easy_img) gaussian_img = Image.fromarray(easy_img)
return gaussian_img return gaussian_img

@ -317,7 +317,7 @@ def find_json(s):
s = s.replace("'", '"') s = s.replace("'", '"')
start = s.find("{") start = s.find("{")
end = s.rfind("}") end = s.rfind("}")
res = s[start: end + 1] res = s[start : end + 1]
res = res.replace("\n", "") res = res.replace("\n", "")
return res return res

@ -347,7 +347,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special, disallowed_special=self.disallowed_special,
) )
for j in range(0, len(token), self.embedding_ctx_length): for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j: j + self.embedding_ctx_length]) tokens.append(token[j : j + self.embedding_ctx_length])
indices.append(i) indices.append(i)
batched_embeddings: List[List[float]] = [] batched_embeddings: List[List[float]] = []
@ -366,7 +366,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in _iter: for i in _iter:
response = embed_with_retry( response = embed_with_retry(
self, self,
input=tokens[i: i + _chunk_size], input=tokens[i : i + _chunk_size],
**self._invocation_params, **self._invocation_params,
) )
batched_embeddings.extend(r["embedding"] for r in response["data"]) batched_embeddings.extend(r["embedding"] for r in response["data"])
@ -428,7 +428,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special, disallowed_special=self.disallowed_special,
) )
for j in range(0, len(token), self.embedding_ctx_length): for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j: j + self.embedding_ctx_length]) tokens.append(token[j : j + self.embedding_ctx_length])
indices.append(i) indices.append(i)
batched_embeddings: List[List[float]] = [] batched_embeddings: List[List[float]] = []
@ -436,7 +436,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in range(0, len(tokens), _chunk_size): for i in range(0, len(tokens), _chunk_size):
response = await async_embed_with_retry( response = await async_embed_with_retry(
self, self,
input=tokens[i: i + _chunk_size], input=tokens[i : i + _chunk_size],
**self._invocation_params, **self._invocation_params,
) )
batched_embeddings.extend(r["embedding"] for r in response["data"]) batched_embeddings.extend(r["embedding"] for r in response["data"])

@ -458,7 +458,7 @@ class BaseOpenAI(BaseLLM):
) )
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [ sub_prompts = [
prompts[i: i + self.batch_size] prompts[i : i + self.batch_size]
for i in range(0, len(prompts), self.batch_size) for i in range(0, len(prompts), self.batch_size)
] ]
return sub_prompts return sub_prompts
@ -469,7 +469,7 @@ class BaseOpenAI(BaseLLM):
"""Create the LLMResult from the choices and prompts.""" """Create the LLMResult from the choices and prompts."""
generations = [] generations = []
for i, _ in enumerate(prompts): for i, _ in enumerate(prompts):
sub_choices = choices[i * self.n: (i + 1) * self.n] sub_choices = choices[i * self.n : (i + 1) * self.n]
generations.append( generations.append(
[ [
Generation( Generation(

@ -50,21 +50,21 @@ class Link(Chain):
prompt: BasePromptTemplate prompt: BasePromptTemplate
"""Prompt object to use.""" """Prompt object to use."""
llm: BaseLanguageModel llm: BaseLanguageModel
"""Language model to call.""" """Language model to call."""
output_key: str = "text" #: :meta private: output_key: str = "text" #: :meta private:
output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser) output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser)
"""Output parser to use. """Output parser to use.
Defaults to one that takes the most likely string but does not change it Defaults to one that takes the most likely string but does not change it
otherwise.""" otherwise."""
return_final_only: bool = True return_final_only: bool = True
"""Whether to return only the final parsed result. Defaults to True. """Whether to return only the final parsed result. Defaults to True.
If false, will return a bunch of extra information about the generation.""" If false, will return a bunch of extra information about the generation."""
llm_kwargs: dict = Field(default_factory=dict) llm_kwargs: dict = Field(default_factory=dict)
class Config: class Config:
@ -349,4 +349,4 @@ class Link(Chain):
def from_string(cls, llm: BaseLanguageModel, template: str) -> Link: def from_string(cls, llm: BaseLanguageModel, template: str) -> Link:
"""Create Link from LLM and template.""" """Create Link from LLM and template."""
prompt_template = PromptTemplate.from_template(template) prompt_template = PromptTemplate.from_template(template)
return cls(llm=llm, prompt=prompt_template) return cls(llm=llm, prompt=prompt_template)

@ -11,7 +11,7 @@ class Workflow:
They string together multiple tasks of varying types, and can use Short-Term Memory They string together multiple tasks of varying types, and can use Short-Term Memory
or pass specific arguments downstream. or pass specific arguments downstream.
Usage Usage
llm = LLM() llm = LLM()
workflow = Workflow(llm) workflow = Workflow(llm)
@ -47,11 +47,13 @@ class Workflow:
return response return response
def __init__(self, agent, parallel: bool = False): def __init__(self, agent, parallel: bool = False):
"""__init__"""
self.agent = agent self.agent = agent
self.tasks: List[Workflow.Task] = [] self.tasks: List[Workflow.Task] = []
self.parallel = parallel self.parallel = parallel
def add(self, task: str) -> Task: def add(self, task: str) -> Task:
"""Add a task"""
task = self.Task(task) task = self.Task(task)
if self.last_task(): if self.last_task():
@ -62,12 +64,15 @@ class Workflow:
return task return task
def first_task(self) -> Optional[Task]: def first_task(self) -> Optional[Task]:
"""Add first task"""
return self.tasks[0] if self.tasks else None return self.tasks[0] if self.tasks else None
def last_task(self) -> Optional[Task]: def last_task(self) -> Optional[Task]:
"""Last task"""
return self.tasks[-1] if self.tasks else None return self.tasks[-1] if self.tasks else None
def run(self, *args) -> Task: def run(self, *args) -> Task:
"""Run tasks"""
[task.reset() for task in self.tasks] [task.reset() for task in self.tasks]
if self.parallel: if self.parallel:
@ -79,6 +84,7 @@ class Workflow:
return self.last_task() return self.last_task()
def context(self, task: Task) -> Dict[str, Any]: def context(self, task: Task) -> Dict[str, Any]:
"""Context in tasks"""
return { return {
"parent_output": task.parents[0].output "parent_output": task.parents[0].output
if task.parents and task.parents[0].output if task.parents and task.parents[0].output
@ -88,6 +94,7 @@ class Workflow:
} }
def __run_from_task(self, task: Optional[Task]) -> None: def __run_from_task(self, task: Optional[Task]) -> None:
"""Run from task"""
if task is None: if task is None:
return return
else: else:

@ -1,29 +1,30 @@
import interpreter
from transformers import (
BlipForQuestionAnswering,
BlipProcessor,
)
from PIL import Image
import torch
from swarms.utils.logger import logger
from pydantic import Field
from langchain.tools.file_management.write import WriteFileTool
from langchain.tools.file_management.read import ReadFileTool
from langchain.tools import BaseTool
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.qa_with_sources.loading import BaseCombineDocumentsChain
import asyncio import asyncio
import os import os
# Tools
from contextlib import contextmanager from contextlib import contextmanager
from typing import Optional from typing import Optional
import interpreter
import pandas as pd import pandas as pd
import torch
from langchain.agents import tool from langchain.agents import tool
from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain from langchain.chains.qa_with_sources.loading import (
BaseCombineDocumentsChain,
load_qa_with_sources_chain,
)
from langchain.docstore.document import Document from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.tools import BaseTool
from langchain.tools.file_management.read import ReadFileTool
from langchain.tools.file_management.write import WriteFileTool
from PIL import Image
from pydantic import Field
from transformers import (
BlipForQuestionAnswering,
BlipProcessor,
)
from swarms.utils.logger import logger
ROOT_DIR = "./data/" ROOT_DIR = "./data/"
@ -128,7 +129,7 @@ class WebpageQATool(BaseTool):
results = [] results = []
# TODO: Handle this with a MapReduceChain # TODO: Handle this with a MapReduceChain
for i in range(0, len(web_docs), 4): for i in range(0, len(web_docs), 4):
input_docs = web_docs[i: i + 4] input_docs = web_docs[i : i + 4]
window_result = self.qa_chain( window_result = self.qa_chain(
{"input_documents": input_docs, "question": question}, {"input_documents": input_docs, "question": question},
return_only_outputs=True, return_only_outputs=True,

@ -306,7 +306,7 @@ class WriteCommand:
@staticmethod @staticmethod
def from_str(command: str) -> "WriteCommand": def from_str(command: str) -> "WriteCommand":
filepath = command.split(WriteCommand.separator)[0] filepath = command.split(WriteCommand.separator)[0]
return WriteCommand(filepath, command[len(filepath) + 1:]) return WriteCommand(filepath, command[len(filepath) + 1 :])
class CodeWriter: class CodeWriter:
@ -433,7 +433,7 @@ class ReadCommand:
if self.start == self.end: if self.start == self.end:
code = code[self.start - 1] code = code[self.start - 1]
else: else:
code = "".join(code[self.start - 1: self.end]) code = "".join(code[self.start - 1 : self.end])
return code return code
@staticmethod @staticmethod
@ -590,9 +590,9 @@ class PatchCommand:
lines[self.start.line] = ( lines[self.start.line] = (
lines[self.start.line][: self.start.col] lines[self.start.line][: self.start.col]
+ self.content + self.content
+ lines[self.end.line][self.end.col:] + lines[self.end.line][self.end.col :]
) )
lines = lines[: self.start.line + 1] + lines[self.end.line + 1:] lines = lines[: self.start.line + 1] + lines[self.end.line + 1 :]
after = self.write_lines(lines) after = self.write_lines(lines)

@ -365,7 +365,7 @@ class FileHandler:
try: try:
if url.startswith(os.environ.get("SERVER", "http://localhost:8000")): if url.startswith(os.environ.get("SERVER", "http://localhost:8000")):
local_filepath = url[ local_filepath = url[
len(os.environ.get("SERVER", "http://localhost:8000")) + 1: len(os.environ.get("SERVER", "http://localhost:8000")) + 1 :
] ]
local_filename = Path("file") / local_filepath.split("/")[-1] local_filename = Path("file") / local_filepath.split("/")[-1]
src = self.path / local_filepath src = self.path / local_filepath

@ -1,18 +1,20 @@
from typing import Dict, List, Optional, Union
import faiss import faiss
from langchain.docstore import InMemoryDocstore from langchain.docstore import InMemoryDocstore
from langchain.embeddings import OpenAIEmbeddings from langchain.embeddings import OpenAIEmbeddings
from langchain.tools.human.tool import HumanInputRun from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores import FAISS from langchain.vectorstores import FAISS
from langchain_experimental.autonomous_agents import AutoGPT from langchain_experimental.autonomous_agents import AutoGPT
from typing import Dict, List, Optional, Union
from swarms.agents.message import Message from swarms.agents.message import Message
from swarms.tools.autogpt import ( from swarms.tools.autogpt import (
ReadFileTool, ReadFileTool,
WebpageQATool,
WriteFileTool, WriteFileTool,
compile, compile,
process_csv,
load_qa_with_sources_chain, load_qa_with_sources_chain,
WebpageQATool, process_csv,
) )
from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator

Loading…
Cancel
Save