quality control

pull/59/head
Kye 1 year ago
parent 0ab2d9108d
commit bcac30d456

@ -0,0 +1,30 @@
name: Linting and Formatting
on:
push:
branches:
- main
jobs:
lint_and_format:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v3
with:
python-version: 3.x
- name: Install dependencies
run: pip install -r requirements.txt
- name: Find Python files
run: find swarms -name "*.py" -type f -exec autopep8 --in-place --aggressive --aggressive {} +
- name: Push changes
uses: ad-m/github-push-action@master
with:
github_token: ${{ secrets.GITHUB_TOKEN }}

@ -63,3 +63,11 @@ types-pytz = "^2023.3.0.0"
black = "^23.1.0" black = "^23.1.0"
types-chardet = "^5.0.4.6" types-chardet = "^5.0.4.6"
mypy-protobuf = "^3.0.0" mypy-protobuf = "^3.0.0"
[tool.autopep8]
max_line_length = 120
ignore = "E501,W6" # or ["E501", "W6"]
in-place = true
recursive = true
aggressive = 3

@ -50,6 +50,7 @@ torchmetrics
transformers transformers
webdataset webdataset
yapf yapf
autopep8
mkdocs mkdocs

@ -1,23 +1,23 @@
#swarms # swarms
from swarms import agents
from swarms.swarms.orchestrate import Orchestrator
from swarms import swarms
from swarms import structs
from swarms import models
from swarms.workers.worker import Worker
from swarms import workers
from swarms.logo import logo2 from swarms.logo import logo2
print(logo2) print(logo2)
# worker # worker
from swarms import workers
from swarms.workers.worker import Worker
#boss # boss
# from swarms.boss.boss_node import Boss # from swarms.boss.boss_node import Boss
#models # models
from swarms import models
#structs # structs
from swarms import structs
# swarms # swarms
from swarms import swarms
from swarms.swarms.orchestrate import Orchestrator
#agents # agents
from swarms import agents

@ -1,15 +1,14 @@
"""Agent Infrastructure, models, memory, utils, tools""" """Agent Infrastructure, models, memory, utils, tools"""
#agents # agents
# from swarms.agents.profitpilot import ProfitPilot # from swarms.agents.profitpilot import ProfitPilot
# from swarms.agents.aot import AoTAgent # from swarms.agents.aot import AoTAgent
# from swarms.agents.multi_modal_visual_agent import MultiModalAgent # from swarms.agents.multi_modal_visual_agent import MultiModalAgent
from swarms.agents.omni_modal_agent import OmniModalAgent from swarms.agents.omni_modal_agent import OmniModalAgent
# utils
#utils
from swarms.agents.message import Message from swarms.agents.message import Message
from swarms.agents.stream_response import stream from swarms.agents.stream_response import stream
from swarms.agents.base import AbstractAgent from swarms.agents.base import AbstractAgent

@ -7,6 +7,7 @@ import openai
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class OpenAI: class OpenAI:
def __init__( def __init__(
self, self,
@ -23,7 +24,7 @@ class OpenAI:
else: else:
raise Exception("Please provide OpenAI API key") raise Exception("Please provide OpenAI API key")
if api_base == ""or api_base is None: if api_base == "" or api_base is None:
api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1" api_base = os.environ.get("OPENAI_API_BASE", "") # if not set, use the default base path of "https://api.openai.com/v1"
if api_base != "": if api_base != "":
# e.g. https://api.openai.com/v1/ or your custom url # e.g. https://api.openai.com/v1/ or your custom url
@ -75,7 +76,7 @@ class OpenAI:
temperature=temperature, temperature=temperature,
) )
with open("openai.logs", 'a') as log_file: with open("openai.logs", 'a') as log_file:
log_file.write("\n" + "-----------" + '\n' +"Prompt : "+ prompt+"\n") log_file.write("\n" + "-----------" + '\n' + "Prompt : " + prompt + "\n")
return response return response
except openai.error.RateLimitError as e: except openai.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30) sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
@ -111,7 +112,7 @@ class OpenAI:
initial_prompt, initial_prompt,
rejected_solutions=None rejected_solutions=None
): ):
if (type(state) == str): if (isinstance(state, str)):
state_text = state state_text = state
else: else:
state_text = '\n'.join(state) state_text = '\n'.join(state)
@ -134,7 +135,6 @@ class OpenAI:
# print(f"Generated thoughts: {thoughts}") # print(f"Generated thoughts: {thoughts}")
return thoughts return thoughts
def generate_solution(self, def generate_solution(self,
initial_prompt, initial_prompt,
state, state,
@ -169,7 +169,7 @@ class OpenAI:
if self.evaluation_strategy == 'value': if self.evaluation_strategy == 'value':
state_values = {} state_values = {}
for state in states: for state in states:
if (type(state) == str): if (isinstance(state, str)):
state_text = state state_text = state
else: else:
state_text = '\n'.join(state) state_text = '\n'.join(state)
@ -193,6 +193,8 @@ class OpenAI:
else: else:
raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.") raise ValueError("Invalid evaluation strategy. Choose 'value' or 'vote'.")
class AoTAgent: class AoTAgent:
def __init__( def __init__(
self, self,
@ -203,7 +205,7 @@ class AoTAgent:
backtracking_threshold=0.4, backtracking_threshold=0.4,
initial_prompt=None, initial_prompt=None,
openai_api_key: str = None, openai_api_key: str = None,
model = None, model=None,
): ):
self.num_thoughts = num_thoughts self.num_thoughts = num_thoughts
self.max_steps = max_steps self.max_steps = max_steps
@ -245,7 +247,7 @@ class AoTAgent:
child = (state, next_state) if isinstance(state, str) else (*state, next_state) child = (state, next_state) if isinstance(state, str) else (*state, next_state)
self.dfs(child, step + 1) self.dfs(child, step + 1)
#backtracking # backtracking
best_value = max([value for _, value in self.output]) best_value = max([value for _, value in self.output])
if best_value < self.backtracking_threshold: if best_value < self.backtracking_threshold:
self.output.pop() self.output.pop()

@ -17,7 +17,7 @@ class AbstractAgent:
def __init__( def __init__(
self, self,
name: str, name: str,
#tools: List[Tool], # tools: List[Tool],
#memory: Memory #memory: Memory
): ):
""" """
@ -62,4 +62,3 @@ class AbstractAgent:
def _astep(self, message: str): def _astep(self, message: str):
"""Asynchronous step""" """Asynchronous step"""

@ -22,8 +22,6 @@ except ImportError:
return x return x
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -902,7 +900,7 @@ class ConversableAgent(Agent):
exitcode, logs, image = self.run_code(code, lang=lang, **self._code_execution_config) exitcode, logs, image = self.run_code(code, lang=lang, **self._code_execution_config)
elif lang in ["python", "Python"]: elif lang in ["python", "Python"]:
if code.startswith("# filename: "): if code.startswith("# filename: "):
filename = code[11 : code.find("\n")].strip() filename = code[11: code.find("\n")].strip()
else: else:
filename = None filename = None
exitcode, logs, image = self.run_code( exitcode, logs, image = self.run_code(

@ -3,6 +3,7 @@ from typing import Any, Dict, List
from swarms.memory.base_memory import BaseChatMemory, get_prompt_input_key from swarms.memory.base_memory import BaseChatMemory, get_prompt_input_key
from swarms.memory.base import VectorStoreRetriever from swarms.memory.base import VectorStoreRetriever
class AgentMemory(BaseChatMemory): class AgentMemory(BaseChatMemory):
retriever: VectorStoreRetriever retriever: VectorStoreRetriever
"""VectorStoreRetriever object to connect to.""" """VectorStoreRetriever object to connect to."""

@ -1,5 +1,6 @@
import datetime import datetime
class Message: class Message:
""" """
Represents a message with timestamp and optional metadata. Represents a message with timestamp and optional metadata.

@ -3,5 +3,3 @@
# from .GroundingDINO.groundingdino.util import box_ops, SLConfig # from .GroundingDINO.groundingdino.util import box_ops, SLConfig
# from .GroundingDINO.groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap # from .GroundingDINO.groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
# from .segment_anything.segment_anything import build_sam, SamPredictor, SamAutomaticMaskGenerator # from .segment_anything.segment_anything import build_sam, SamPredictor, SamAutomaticMaskGenerator

@ -38,7 +38,7 @@ def crop(image, target, region):
if "masks" in target: if "masks" in target:
# FIXME should we update the area here if there are no boxes? # FIXME should we update the area here if there are no boxes?
target["masks"] = target["masks"][:, i : i + h, j : j + w] target["masks"] = target["masks"][:, i: i + h, j: j + w]
fields.append("masks") fields.append("masks")
# remove elements for which the boxes or masks that have zero area # remove elements for which the boxes or masks that have zero area

@ -11,4 +11,3 @@
# Copied from DETR (https://github.com/facebookresearch/detr) # Copied from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------ # ------------------------------------------------------------------------

@ -139,7 +139,7 @@ class Backbone(BackboneBase):
assert name not in ("resnet18", "resnet34"), "Only resnet50 and resnet101 are available." assert name not in ("resnet18", "resnet34"), "Only resnet50 and resnet101 are available."
assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]] assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]
num_channels_all = [256, 512, 1024, 2048] num_channels_all = [256, 512, 1024, 2048]
num_channels = num_channels_all[4 - len(return_interm_indices) :] num_channels = num_channels_all[4 - len(return_interm_indices):]
super().__init__(backbone, train_backbone, num_channels, return_interm_indices) super().__init__(backbone, train_backbone, num_channels, return_interm_indices)
@ -204,7 +204,7 @@ def build_backbone(args):
use_checkpoint=use_checkpoint, use_checkpoint=use_checkpoint,
) )
bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :] bb_num_channels = backbone.num_features[4 - len(return_interm_indices):]
else: else:
raise NotImplementedError("Unknown backbone {}".format(args.backbone)) raise NotImplementedError("Unknown backbone {}".format(args.backbone))

@ -614,7 +614,7 @@ class SwinTransformer(nn.Module):
qk_scale=qk_scale, qk_scale=qk_scale,
drop=drop_rate, drop=drop_rate,
attn_drop=attn_drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], drop_path=dpr[sum(depths[:i_layer]): sum(depths[: i_layer + 1])],
norm_layer=norm_layer, norm_layer=norm_layer,
# downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, # downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
downsample=downsamplelist[i_layer], downsample=downsamplelist[i_layer],

@ -203,8 +203,8 @@ def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer
attention_mask[row, col, col] = True attention_mask[row, col, col] = True
position_ids[row, col] = 0 position_ids[row, col] = 0
else: else:
attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True attention_mask[row, previous_col + 1: col + 1, previous_col + 1: col + 1] = True
position_ids[row, previous_col + 1 : col + 1] = torch.arange( position_ids[row, previous_col + 1: col + 1] = torch.arange(
0, col - previous_col, device=input_ids.device 0, col - previous_col, device=input_ids.device
) )
@ -248,12 +248,12 @@ def generate_masks_with_special_tokens_and_transfer_map(tokenized, special_token
attention_mask[row, col, col] = True attention_mask[row, col, col] = True
position_ids[row, col] = 0 position_ids[row, col] = 0
else: else:
attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True attention_mask[row, previous_col + 1: col + 1, previous_col + 1: col + 1] = True
position_ids[row, previous_col + 1 : col + 1] = torch.arange( position_ids[row, previous_col + 1: col + 1] = torch.arange(
0, col - previous_col, device=input_ids.device 0, col - previous_col, device=input_ids.device
) )
c2t_maski = torch.zeros((num_token), device=input_ids.device).bool() c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()
c2t_maski[previous_col + 1 : col] = True c2t_maski[previous_col + 1: col] = True
cate_to_token_mask_list[row].append(c2t_maski) cate_to_token_mask_list[row].append(c2t_maski)
previous_col = col previous_col = col

@ -27,7 +27,7 @@ from torch.nn.init import constant_, xavier_uniform_
try: try:
from groundingdino import _C from groundingdino import _C
except: except BaseException:
warnings.warn("Failed to load custom C++ ops. Running on CPU mode Only!") warnings.warn("Failed to load custom C++ ops. Running on CPU mode Only!")
@ -241,7 +241,6 @@ class MultiScaleDeformableAttention(nn.Module):
level_start_index: Optional[torch.Tensor] = None, level_start_index: Optional[torch.Tensor] = None,
**kwargs **kwargs
) -> torch.Tensor: ) -> torch.Tensor:
"""Forward Function of MultiScaleDeformableAttention """Forward Function of MultiScaleDeformableAttention
Args: Args:

@ -70,7 +70,7 @@ def gen_encoder_output_proposals(
proposals = [] proposals = []
_cur = 0 _cur = 0
for lvl, (H_, W_) in enumerate(spatial_shapes): for lvl, (H_, W_) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(N_, H_, W_, 1) mask_flatten_ = memory_padding_mask[:, _cur: (_cur + H_ * W_)].view(N_, H_, W_, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)

@ -1,6 +1,7 @@
from transformers import AutoTokenizer, BertModel, RobertaModel from transformers import AutoTokenizer, BertModel, RobertaModel
import os import os
def get_tokenlizer(text_encoder_type): def get_tokenlizer(text_encoder_type):
if not isinstance(text_encoder_type, str): if not isinstance(text_encoder_type, str):
# print("text_encoder_type is not a str") # print("text_encoder_type is not a str")

@ -170,7 +170,7 @@ class SLConfig(object):
elif isinstance(b, list): elif isinstance(b, list):
try: try:
_ = int(k) _ = int(k)
except: except BaseException:
raise TypeError( raise TypeError(
f"b is a list, " f"index {k} should be an int when input but {type(k)}" f"b is a list, " f"index {k} should be an int when input but {type(k)}"
) )

@ -268,6 +268,7 @@ def get_embedder(multires, i=0):
} }
embedder_obj = Embedder(**embed_kwargs) embedder_obj = Embedder(**embed_kwargs)
def embed(x, eo=embedder_obj): def embed(x, eo=embedder_obj):
return eo.embed(x) return eo.embed(x)
return embed, embedder_obj.out_dim return embed, embedder_obj.out_dim

@ -243,7 +243,7 @@ class COCOVisualizer:
for ann in anns: for ann in anns:
c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0] c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
if "segmentation" in ann: if "segmentation" in ann:
if type(ann["segmentation"]) == list: if isinstance(ann["segmentation"], list):
# polygon # polygon
for seg in ann["segmentation"]: for seg in ann["segmentation"]:
poly = np.array(seg).reshape((int(len(seg) / 2), 2)) poly = np.array(seg).reshape((int(len(seg) / 2), 2))
@ -252,7 +252,7 @@ class COCOVisualizer:
else: else:
# mask # mask
t = self.imgs[ann["image_id"]] t = self.imgs[ann["image_id"]]
if type(ann["segmentation"]["counts"]) == list: if isinstance(ann["segmentation"]["counts"], list):
rle = maskUtils.frPyObjects( rle = maskUtils.frPyObjects(
[ann["segmentation"]], t["height"], t["width"] [ann["segmentation"]], t["height"], t["width"]
) )
@ -267,7 +267,7 @@ class COCOVisualizer:
for i in range(3): for i in range(3):
img[:, :, i] = color_mask[i] img[:, :, i] = color_mask[i]
ax.imshow(np.dstack((img, m * 0.5))) ax.imshow(np.dstack((img, m * 0.5)))
if "keypoints" in ann and type(ann["keypoints"]) == list: if "keypoints" in ann and isinstance(ann["keypoints"], list):
# turn skeleton into zero-based index # turn skeleton into zero-based index
sks = np.array(self.loadCats(ann["category_id"])[0]["skeleton"]) - 1 sks = np.array(self.loadCats(ann["category_id"])[0]["skeleton"]) - 1
kp = np.array(ann["keypoints"]) kp = np.array(ann["keypoints"])

@ -24,14 +24,14 @@ def create_positive_map_from_span(tokenized, token_span, max_text_len=256):
beg_pos = tokenized.char_to_token(beg + 1) beg_pos = tokenized.char_to_token(beg + 1)
if beg_pos is None: if beg_pos is None:
beg_pos = tokenized.char_to_token(beg + 2) beg_pos = tokenized.char_to_token(beg + 2)
except: except BaseException:
beg_pos = None beg_pos = None
if end_pos is None: if end_pos is None:
try: try:
end_pos = tokenized.char_to_token(end - 2) end_pos = tokenized.char_to_token(end - 2)
if end_pos is None: if end_pos is None:
end_pos = tokenized.char_to_token(end - 3) end_pos = tokenized.char_to_token(end - 3)
except: except BaseException:
end_pos = None end_pos = None
if beg_pos is None or end_pos is None: if beg_pos is None or end_pos is None:
continue continue
@ -41,7 +41,7 @@ def create_positive_map_from_span(tokenized, token_span, max_text_len=256):
positive_map[j, beg_pos] = 1 positive_map[j, beg_pos] = 1
break break
else: else:
positive_map[j, beg_pos : end_pos + 1].fill_(1) positive_map[j, beg_pos: end_pos + 1].fill_(1)
return positive_map / (positive_map.sum(-1)[:, None] + 1e-6) return positive_map / (positive_map.sum(-1)[:, None] + 1e-6)

@ -3,4 +3,3 @@
# This source code is licensed under the license found in the # This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree. # LICENSE file in the root directory of this source tree.

@ -3,4 +3,3 @@
# This source code is licensed under the license found in the # This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree. # LICENSE file in the root directory of this source tree.

@ -131,7 +131,7 @@ class MaskDecoder(nn.Module):
# Run the transformer # Run the transformer
hs, src = self.transformer(src, pos_src, tokens) hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, 0, :] iou_token_out = hs[:, 0, :]
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] mask_tokens_out = hs[:, 1: (1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens # Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w) src = src.transpose(1, 2).view(b, c, h, w)

@ -101,7 +101,7 @@ def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
), "Batched iteration must have inputs of all the same size." ), "Batched iteration must have inputs of all the same size."
n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
for b in range(n_batches): for b in range(n_batches):
yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args] yield [arg[b * batch_size: (b + 1) * batch_size] for arg in args]
def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]: def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
@ -142,7 +142,7 @@ def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
idx = 0 idx = 0
parity = False parity = False
for count in rle["counts"]: for count in rle["counts"]:
mask[idx : idx + count] = parity mask[idx: idx + count] = parity
idx += count idx += count
parity ^= True parity ^= True
mask = mask.reshape(w, h) mask = mask.reshape(w, h)

@ -1,3 +1,4 @@
from swarms.agents.message import Message
import os import os
import random import random
import torch import torch
@ -36,8 +37,7 @@ import matplotlib.pyplot as plt
import wget import wget
# prompts
#prompts
VISUAL_AGENT_PREFIX = """ VISUAL_AGENT_PREFIX = """
Worker Multi-Modal Agent is designed to be able to assist with Worker Multi-Modal Agent is designed to be able to assist with
a wide range of text and visual related tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. a wide range of text and visual related tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics.
@ -239,6 +239,7 @@ def get_new_image_name(org_img_name, func_name="update"):
new_file_name = f'{this_new_uuid}_{func_name}_{recent_prev_file_name}_{most_org_file_name}.png' new_file_name = f'{this_new_uuid}_{func_name}_{recent_prev_file_name}_{most_org_file_name}.png'
return os.path.join(head, new_file_name) return os.path.join(head, new_file_name)
class InstructPix2Pix: class InstructPix2Pix:
def __init__(self, device): def __init__(self, device):
print(f"Initializing InstructPix2Pix to {device}") print(f"Initializing InstructPix2Pix to {device}")
@ -604,6 +605,7 @@ class PoseText2Image:
f"Output Image: {updated_image_path}") f"Output Image: {updated_image_path}")
return updated_image_path return updated_image_path
class SegText2Image: class SegText2Image:
def __init__(self, device): def __init__(self, device):
print(f"Initializing SegText2Image to {device}") print(f"Initializing SegText2Image to {device}")
@ -800,7 +802,7 @@ class Segmenting:
print(f"Inintializing Segmentation to {device}") print(f"Inintializing Segmentation to {device}")
self.device = device self.device = device
self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
self.model_checkpoint_path = os.path.join("checkpoints","sam") self.model_checkpoint_path = os.path.join("checkpoints", "sam")
self.download_parameters() self.download_parameters()
self.sam = build_sam(checkpoint=self.model_checkpoint_path).to(device) self.sam = build_sam(checkpoint=self.model_checkpoint_path).to(device)
@ -813,12 +815,10 @@ class Segmenting:
def download_parameters(self): def download_parameters(self):
url = "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth" url = "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth"
if not os.path.exists(self.model_checkpoint_path): if not os.path.exists(self.model_checkpoint_path):
wget.download(url,out=self.model_checkpoint_path) wget.download(url, out=self.model_checkpoint_path)
def show_mask(self, mask: np.ndarray, image: np.ndarray,
def show_mask(self, mask: np.ndarray,image: np.ndarray,
random_color: bool = False, transparency=1) -> np.ndarray: random_color: bool = False, transparency=1) -> np.ndarray:
"""Visualize a mask on top of an image. """Visualize a mask on top of an image.
Args: Args:
mask (np.ndarray): A 2D array of shape (H, W). mask (np.ndarray): A 2D array of shape (H, W).
@ -839,16 +839,14 @@ class Segmenting:
image = cv2.addWeighted(image, 0.7, mask_image.astype('uint8'), transparency, 0) image = cv2.addWeighted(image, 0.7, mask_image.astype('uint8'), transparency, 0)
return image return image
def show_box(self, box, ax, label): def show_box(self, box, ax, label):
x0, y0 = box[0], box[1] x0, y0 = box[0], box[1]
w, h = box[2] - box[0], box[3] - box[1] w, h = box[2] - box[0], box[3] - box[1]
ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0,0,0,0), lw=2)) ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2))
ax.text(x0, y0, label) ax.text(x0, y0, label)
def get_mask_with_boxes(self, image_pil, image, boxes_filt): def get_mask_with_boxes(self, image_pil, image, boxes_filt):
size = image_pil.size size = image_pil.size
@ -862,10 +860,10 @@ class Segmenting:
transformed_boxes = self.sam_predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(self.device) transformed_boxes = self.sam_predictor.transform.apply_boxes_torch(boxes_filt, image.shape[:2]).to(self.device)
masks, _, _ = self.sam_predictor.predict_torch( masks, _, _ = self.sam_predictor.predict_torch(
point_coords = None, point_coords=None,
point_labels = None, point_labels=None,
boxes = transformed_boxes.to(self.device), boxes=transformed_boxes.to(self.device),
multimask_output = False, multimask_output=False,
) )
return masks return masks
@ -916,7 +914,6 @@ class Segmenting:
image, p.astype(int), radius=3, color=(255, 0, 0), thickness=-1) image, p.astype(int), radius=3, color=(255, 0, 0), thickness=-1)
return image return image
def segment_image_with_click(self, img, is_positive: bool): def segment_image_with_click(self, img, is_positive: bool):
self.sam_predictor.set_image(img) self.sam_predictor.set_image(img)
@ -971,7 +968,6 @@ class Segmenting:
multimask_output=False, multimask_output=False,
) )
img = self.show_mask(masks[0], img, random_color=False, transparency=0.3) img = self.show_mask(masks[0], img, random_color=False, transparency=0.3)
img = self.show_points(input_point, input_label, img) img = self.show_points(input_point, input_label, img)
@ -989,11 +985,11 @@ class Segmenting:
"or perform segmentation on this image, " "or perform segmentation on this image, "
"or segment all the object in this image." "or segment all the object in this image."
"The input to this tool should be a string, representing the image_path") "The input to this tool should be a string, representing the image_path")
def inference_all(self,image_path): def inference_all(self, image_path):
image = cv2.imread(image_path) image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
masks = self.mask_generator.generate(image) masks = self.mask_generator.generate(image)
plt.figure(figsize=(20,20)) plt.figure(figsize=(20, 20))
plt.imshow(image) plt.imshow(image)
if len(masks) == 0: if len(masks) == 0:
return return
@ -1005,7 +1001,7 @@ class Segmenting:
img = np.ones((m.shape[0], m.shape[1], 3)) img = np.ones((m.shape[0], m.shape[1], 3))
color_mask = np.random.random((1, 3)).tolist()[0] color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3): for i in range(3):
img[:,:,i] = color_mask[i] img[:, :, i] = color_mask[i]
ax.imshow(np.dstack((img, m))) ax.imshow(np.dstack((img, m)))
updated_image_path = get_new_image_name(image_path, func_name="segment-image") updated_image_path = get_new_image_name(image_path, func_name="segment-image")
@ -1016,13 +1012,14 @@ class Segmenting:
) )
return updated_image_path return updated_image_path
class Text2Box: class Text2Box:
def __init__(self, device): def __init__(self, device):
print(f"Initializing ObjectDetection to {device}") print(f"Initializing ObjectDetection to {device}")
self.device = device self.device = device
self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32 self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
self.model_checkpoint_path = os.path.join("checkpoints","groundingdino") self.model_checkpoint_path = os.path.join("checkpoints", "groundingdino")
self.model_config_path = os.path.join("checkpoints","grounding_config.py") self.model_config_path = os.path.join("checkpoints", "grounding_config.py")
self.download_parameters() self.download_parameters()
self.box_threshold = 0.3 self.box_threshold = 0.3
self.text_threshold = 0.25 self.text_threshold = 0.25
@ -1031,11 +1028,12 @@ class Text2Box:
def download_parameters(self): def download_parameters(self):
url = "https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth" url = "https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth"
if not os.path.exists(self.model_checkpoint_path): if not os.path.exists(self.model_checkpoint_path):
wget.download(url,out=self.model_checkpoint_path) wget.download(url, out=self.model_checkpoint_path)
config_url = "https://raw.githubusercontent.com/IDEA-Research/GroundingDINO/main/groundingdino/config/GroundingDINO_SwinT_OGC.py" config_url = "https://raw.githubusercontent.com/IDEA-Research/GroundingDINO/main/groundingdino/config/GroundingDINO_SwinT_OGC.py"
if not os.path.exists(self.model_config_path): if not os.path.exists(self.model_config_path):
wget.download(config_url,out=self.model_config_path) wget.download(config_url, out=self.model_config_path)
def load_image(self,image_path):
def load_image(self, image_path):
# load image # load image
image_pil = Image.open(image_path).convert("RGB") # load image image_pil = Image.open(image_path).convert("RGB") # load image
@ -1148,7 +1146,7 @@ class Text2Box:
pred_dict = { pred_dict = {
"boxes": boxes_filt, "boxes": boxes_filt,
"size": [size[1], size[0]], # H,W "size": [size[1], size[0]], # H,W
"labels": pred_phrases,} "labels": pred_phrases, }
image_with_box = self.plot_boxes_to_image(image_pil, pred_dict)[0] image_with_box = self.plot_boxes_to_image(image_pil, pred_dict)[0]
@ -1168,14 +1166,17 @@ class Inpainting:
self.torch_dtype = torch.float16 if 'cuda' in self.device else torch.float32 self.torch_dtype = torch.float16 if 'cuda' in self.device else torch.float32
self.inpaint = StableDiffusionInpaintPipeline.from_pretrained( self.inpaint = StableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting", revision=self.revision, torch_dtype=self.torch_dtype,safety_checker=StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker')).to(device) "runwayml/stable-diffusion-inpainting", revision=self.revision, torch_dtype=self.torch_dtype, safety_checker=StableDiffusionSafetyChecker.from_pretrained('CompVis/stable-diffusion-safety-checker')).to(device)
def __call__(self, prompt, image, mask_image, height=512, width=512, num_inference_steps=50): def __call__(self, prompt, image, mask_image, height=512, width=512, num_inference_steps=50):
update_image = self.inpaint(prompt=prompt, image=image.resize((width, height)), update_image = self.inpaint(prompt=prompt, image=image.resize((width, height)),
mask_image=mask_image.resize((width, height)), height=height, width=width, num_inference_steps=num_inference_steps).images[0] mask_image=mask_image.resize((width, height)), height=height, width=width, num_inference_steps=num_inference_steps).images[0]
return update_image return update_image
class InfinityOutPainting: class InfinityOutPainting:
template_model = True # Add this line to show this is a template model. template_model = True # Add this line to show this is a template model.
def __init__(self, ImageCaptioning, Inpainting, VisualQuestionAnswering): def __init__(self, ImageCaptioning, Inpainting, VisualQuestionAnswering):
self.llm = OpenAI(temperature=0) self.llm = OpenAI(temperature=0)
self.ImageCaption = ImageCaptioning self.ImageCaption = ImageCaptioning
@ -1272,15 +1273,14 @@ class InfinityOutPainting:
return updated_image_path return updated_image_path
class ObjectSegmenting: class ObjectSegmenting:
template_model = True # Add this line to show this is a template model. template_model = True # Add this line to show this is a template model.
def __init__(self, Text2Box:Text2Box, Segmenting:Segmenting):
def __init__(self, Text2Box: Text2Box, Segmenting: Segmenting):
# self.llm = OpenAI(temperature=0) # self.llm = OpenAI(temperature=0)
self.grounding = Text2Box self.grounding = Text2Box
self.sam = Segmenting self.sam = Segmenting
@prompts(name="Segment the given object", @prompts(name="Segment the given object",
description="useful when you only want to segment the certain objects in the picture" description="useful when you only want to segment the certain objects in the picture"
"according to the given text" "according to the given text"
@ -1294,7 +1294,7 @@ class ObjectSegmenting:
image_pil, image = self.grounding.load_image(image_path) image_pil, image = self.grounding.load_image(image_path)
boxes_filt, pred_phrases = self.grounding.get_grounding_boxes(image, det_prompt) boxes_filt, pred_phrases = self.grounding.get_grounding_boxes(image, det_prompt)
updated_image_path = self.sam.segment_image_with_boxes(image_pil,image_path,boxes_filt,pred_phrases) updated_image_path = self.sam.segment_image_with_boxes(image_pil, image_path, boxes_filt, pred_phrases)
print( print(
f"\nProcessed ObejectSegmenting, Input Image: {image_path}, Object to be Segment {det_prompt}, " f"\nProcessed ObejectSegmenting, Input Image: {image_path}, Object to be Segment {det_prompt}, "
f"Output Image: {updated_image_path}") f"Output Image: {updated_image_path}")
@ -1310,7 +1310,7 @@ class ObjectSegmenting:
if type(masks) == torch.Tensor: if type(masks) == torch.Tensor:
x = masks x = masks
elif type(masks) == np.ndarray: elif type(masks) == np.ndarray:
x = torch.tensor(masks,dtype=int) x = torch.tensor(masks, dtype=int)
else: else:
raise TypeError("the type of the input masks must be numpy.ndarray or torch.tensor") raise TypeError("the type of the input masks must be numpy.ndarray or torch.tensor")
x = x.squeeze(dim=1) x = x.squeeze(dim=1)
@ -1341,7 +1341,6 @@ class ObjectSegmenting:
for mask in masks: for mask in masks:
image = self.sam.show_mask(mask[0].cpu().numpy(), image, random_color=True, transparency=0.3) image = self.sam.show_mask(mask[0].cpu().numpy(), image, random_color=True, transparency=0.3)
Image.fromarray(merged_mask) Image.fromarray(merged_mask)
return merged_mask return merged_mask
@ -1349,14 +1348,15 @@ class ObjectSegmenting:
class ImageEditing: class ImageEditing:
template_model = True template_model = True
def __init__(self, Text2Box:Text2Box, Segmenting:Segmenting, Inpainting:Inpainting):
def __init__(self, Text2Box: Text2Box, Segmenting: Segmenting, Inpainting: Inpainting):
print("Initializing ImageEditing") print("Initializing ImageEditing")
self.sam = Segmenting self.sam = Segmenting
self.grounding = Text2Box self.grounding = Text2Box
self.inpaint = Inpainting self.inpaint = Inpainting
def pad_edge(self,mask,padding): def pad_edge(self, mask, padding):
#mask Tensor [H,W] # mask Tensor [H,W]
mask = mask.numpy() mask = mask.numpy()
true_indices = np.argwhere(mask) true_indices = np.argwhere(mask)
mask_array = np.zeros_like(mask, dtype=bool) mask_array = np.zeros_like(mask, dtype=bool)
@ -1364,7 +1364,7 @@ class ImageEditing:
padded_slice = tuple(slice(max(0, i - padding), i + padding + 1) for i in idx) padded_slice = tuple(slice(max(0, i - padding), i + padding + 1) for i in idx)
mask_array[padded_slice] = True mask_array[padded_slice] = True
new_mask = (mask_array * 255).astype(np.uint8) new_mask = (mask_array * 255).astype(np.uint8)
#new_mask # new_mask
return new_mask return new_mask
@prompts(name="Remove Something From The Photo", @prompts(name="Remove Something From The Photo",
@ -1381,7 +1381,7 @@ class ImageEditing:
"location with another object from its description. " "location with another object from its description. "
"The input to this tool should be a comma separated string of three, " "The input to this tool should be a comma separated string of three, "
"representing the image_path, the object to be replaced, the object to be replaced with ") "representing the image_path, the object to be replaced, the object to be replaced with ")
def inference_replace_sam(self,inputs): def inference_replace_sam(self, inputs):
image_path, to_be_replaced_txt, replace_with_txt = inputs.split(",") image_path, to_be_replaced_txt, replace_with_txt = inputs.split(",")
print(f"image_path={image_path}, to_be_replaced_txt={to_be_replaced_txt}") print(f"image_path={image_path}, to_be_replaced_txt={to_be_replaced_txt}")
@ -1393,9 +1393,9 @@ class ImageEditing:
masks = self.sam.get_mask_with_boxes(image_pil, image, boxes_filt) masks = self.sam.get_mask_with_boxes(image_pil, image, boxes_filt)
mask = torch.sum(masks, dim=0).unsqueeze(0) mask = torch.sum(masks, dim=0).unsqueeze(0)
mask = torch.where(mask > 0, True, False) mask = torch.where(mask > 0, True, False)
mask = mask.squeeze(0).squeeze(0).cpu() #tensor mask = mask.squeeze(0).squeeze(0).cpu() # tensor
mask = self.pad_edge(mask,padding=20) #numpy mask = self.pad_edge(mask, padding=20) # numpy
mask_image = Image.fromarray(mask) mask_image = Image.fromarray(mask)
updated_image = self.inpaint(prompt=replace_with_txt, image=image_pil, updated_image = self.inpaint(prompt=replace_with_txt, image=image_pil,
@ -1408,14 +1408,16 @@ class ImageEditing:
f"Output Image: {updated_image_path}") f"Output Image: {updated_image_path}")
return updated_image_path return updated_image_path
class BackgroundRemoving: class BackgroundRemoving:
''' '''
using to remove the background of the given picture using to remove the background of the given picture
''' '''
template_model = True template_model = True
def __init__(self,VisualQuestionAnswering:VisualQuestionAnswering, Text2Box:Text2Box, Segmenting:Segmenting):
def __init__(self, VisualQuestionAnswering: VisualQuestionAnswering, Text2Box: Text2Box, Segmenting: Segmenting):
self.vqa = VisualQuestionAnswering self.vqa = VisualQuestionAnswering
self.obj_segmenting = ObjectSegmenting(Text2Box,Segmenting) self.obj_segmenting = ObjectSegmenting(Text2Box, Segmenting)
@prompts(name="Remove the background", @prompts(name="Remove the background",
description="useful when you want to extract the object or remove the background," description="useful when you want to extract the object or remove the background,"
@ -1450,7 +1452,7 @@ class BackgroundRemoving:
vqa_input = f"{image_path}, what is the main object in the image?" vqa_input = f"{image_path}, what is the main object in the image?"
text_prompt = self.vqa.inference(vqa_input) text_prompt = self.vqa.inference(vqa_input)
mask = self.obj_segmenting.get_mask(image_path,text_prompt) mask = self.obj_segmenting.get_mask(image_path, text_prompt)
return mask return mask
@ -1476,7 +1478,7 @@ class MultiModalVisualAgent:
for class_name, module in globals().items(): for class_name, module in globals().items():
if getattr(module, 'template_model', False): if getattr(module, 'template_model', False):
template_required_names = { template_required_names = {
k for k in inspect.signature(module.__init__).parameters.keys() if k!='self' k for k in inspect.signature(module.__init__).parameters.keys() if k != 'self'
} }
loaded_names = set([type(e).__name__ for e in self.models.values()]) loaded_names = set([type(e).__name__ for e in self.models.values()])
@ -1509,7 +1511,7 @@ class MultiModalVisualAgent:
agent_suffix = self.suffix agent_suffix = self.suffix
agent_format_instructions = self.format_instructions agent_format_instructions = self.format_instructions
if lang=='English': if lang == 'English':
PREFIX, FORMAT_INSTRUCTIONS, SUFFIX = agent_prefix, agent_format_instructions, agent_suffix PREFIX, FORMAT_INSTRUCTIONS, SUFFIX = agent_prefix, agent_format_instructions, agent_suffix
else: else:
PREFIX, FORMAT_INSTRUCTIONS, SUFFIX = VISUAL_AGENT_PREFIX_CN, VISUAL_AGENT_FORMAT_INSTRUCTIONS_CN, VISUAL_AGENT_SUFFIX_CN PREFIX, FORMAT_INSTRUCTIONS, SUFFIX = VISUAL_AGENT_PREFIX_CN, VISUAL_AGENT_FORMAT_INSTRUCTIONS_CN, VISUAL_AGENT_SUFFIX_CN
@ -1578,10 +1580,7 @@ class MultiModalVisualAgent:
self.memory.clear() self.memory.clear()
# usage
###### usage
from swarms.agents.message import Message
class MultiModalAgent: class MultiModalAgent:
@ -1619,6 +1618,7 @@ class MultiModalAgent:
""" """
def __init__( def __init__(
self, self,
load_dict, load_dict,
@ -1641,11 +1641,10 @@ class MultiModalAgent:
self.language = language self.language = language
self.history = [] self.history = []
def run_text( def run_text(
self, self,
text: str = None, text: str = None,
language = "english" language="english"
): ):
"""Run text through the model""" """Run text through the model"""
@ -1661,7 +1660,7 @@ class MultiModalAgent:
def run_img( def run_img(
self, self,
image_path: str, image_path: str,
language = "english" language="english"
): ):
"""If language is None""" """If language is None"""
if language is None: if language is None:
@ -1701,7 +1700,7 @@ class MultiModalAgent:
if language is None: if language is None:
language = self.default_language language = self.default_language
#add users message to the history # add users message to the history
self.history.append( self.history.append(
Message( Message(
"User", "User",
@ -1709,12 +1708,12 @@ class MultiModalAgent:
) )
) )
#process msg # process msg
try: try:
self.agent.init_agent(language) self.agent.init_agent(language)
response = self.agent.run_text(msg) response = self.agent.run_text(msg)
#add agent's response to the history # add agent's response to the history
self.history.append( self.history.append(
Message( Message(
"Agent", "Agent",
@ -1722,7 +1721,7 @@ class MultiModalAgent:
) )
) )
#if streaming is = True # if streaming is = True
if streaming: if streaming:
return self._stream_response(response) return self._stream_response(response)
else: else:
@ -1731,7 +1730,7 @@ class MultiModalAgent:
except Exception as error: except Exception as error:
error_message = f"Error processing message: {str(error)}" error_message = f"Error processing message: {str(error)}"
#add error to history # add error to history
self.history.append( self.history.append(
Message( Message(
"Agent", "Agent",
@ -1762,5 +1761,3 @@ class MultiModalAgent:
self.agent.clear_memory() self.agent.clear_memory()
except Exception as e: except Exception as e:
return f"Error cleaning memory: {str(e)}" return f"Error cleaning memory: {str(e)}"

@ -34,18 +34,22 @@ max_length = {
"ada": 2049 "ada": 2049
} }
def count_tokens(model_name, text): def count_tokens(model_name, text):
return len(encodings[model_name].encode(text)) return len(encodings[model_name].encode(text))
def get_max_context_length(model_name): def get_max_context_length(model_name):
return max_length[model_name] return max_length[model_name]
def get_token_ids_for_task_parsing(model_name): def get_token_ids_for_task_parsing(model_name):
text = '''{"task": "text-classification", "token-classification", "text2text-generation", "summarization", "translation", "question-answering", "conversational", "text-generation", "sentence-similarity", "tabular-classification", "object-detection", "image-classification", "image-to-image", "image-to-text", "text-to-image", "visual-question-answering", "document-question-answering", "image-segmentation", "text-to-speech", "text-to-video", "automatic-speech-recognition", "audio-to-audio", "audio-classification", "canny-control", "hed-control", "mlsd-control", "normal-control", "openpose-control", "canny-text-to-image", "depth-text-to-image", "hed-text-to-image", "mlsd-text-to-image", "normal-text-to-image", "openpose-text-to-image", "seg-text-to-image", "args", "text", "path", "dep", "id", "<GENERATED>-"}''' text = '''{"task": "text-classification", "token-classification", "text2text-generation", "summarization", "translation", "question-answering", "conversational", "text-generation", "sentence-similarity", "tabular-classification", "object-detection", "image-classification", "image-to-image", "image-to-text", "text-to-image", "visual-question-answering", "document-question-answering", "image-segmentation", "text-to-speech", "text-to-video", "automatic-speech-recognition", "audio-to-audio", "audio-classification", "canny-control", "hed-control", "mlsd-control", "normal-control", "openpose-control", "canny-text-to-image", "depth-text-to-image", "hed-text-to-image", "mlsd-text-to-image", "normal-text-to-image", "openpose-text-to-image", "seg-text-to-image", "args", "text", "path", "dep", "id", "<GENERATED>-"}'''
res = encodings[model_name].encode(text) res = encodings[model_name].encode(text)
res = list(set(res)) res = list(set(res))
return res return res
def get_token_ids_for_choose_model(model_name): def get_token_ids_for_choose_model(model_name):
text = '''{"id": "reason"}''' text = '''{"id": "reason"}'''
res = encodings[model_name].encode(text) res = encodings[model_name].encode(text)

@ -56,8 +56,7 @@ from transformers import (
) )
# logs
#logs
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default="configs/config.default.yaml") parser.add_argument("--config", type=str, default="configs/config.default.yaml")
@ -100,7 +99,7 @@ def load_pipes(local_deployment):
controlnet_sd_pipes = {} controlnet_sd_pipes = {}
if local_deployment in ["full"]: if local_deployment in ["full"]:
other_pipes = { other_pipes = {
"nlpconnect/vit-gpt2-image-captioning":{ "nlpconnect/vit-gpt2-image-captioning": {
"model": VisionEncoderDecoderModel.from_pretrained(f"{local_fold}/nlpconnect/vit-gpt2-image-captioning"), "model": VisionEncoderDecoderModel.from_pretrained(f"{local_fold}/nlpconnect/vit-gpt2-image-captioning"),
"feature_extractor": ViTImageProcessor.from_pretrained(f"{local_fold}/nlpconnect/vit-gpt2-image-captioning"), "feature_extractor": ViTImageProcessor.from_pretrained(f"{local_fold}/nlpconnect/vit-gpt2-image-captioning"),
"tokenizer": AutoTokenizer.from_pretrained(f"{local_fold}/nlpconnect/vit-gpt2-image-captioning"), "tokenizer": AutoTokenizer.from_pretrained(f"{local_fold}/nlpconnect/vit-gpt2-image-captioning"),
@ -139,7 +138,7 @@ def load_pipes(local_deployment):
"device": device "device": device
}, },
"lambdalabs/sd-image-variations-diffusers": { "lambdalabs/sd-image-variations-diffusers": {
"model": DiffusionPipeline.from_pretrained(f"{local_fold}/lambdalabs/sd-image-variations-diffusers"), #torch_dtype=torch.float16 "model": DiffusionPipeline.from_pretrained(f"{local_fold}/lambdalabs/sd-image-variations-diffusers"), # torch_dtype=torch.float16
"device": device "device": device
}, },
# "CompVis/stable-diffusion-v1-4": { # "CompVis/stable-diffusion-v1-4": {
@ -165,7 +164,7 @@ def load_pipes(local_deployment):
# "model": WaveformEnhancement.from_hparams(source="speechbrain/mtl-mimic-voicebank", savedir="models/mtl-mimic-voicebank"), # "model": WaveformEnhancement.from_hparams(source="speechbrain/mtl-mimic-voicebank", savedir="models/mtl-mimic-voicebank"),
# "device": device # "device": device
# }, # },
"microsoft/speecht5_vc":{ "microsoft/speecht5_vc": {
"processor": SpeechT5Processor.from_pretrained(f"{local_fold}/microsoft/speecht5_vc"), "processor": SpeechT5Processor.from_pretrained(f"{local_fold}/microsoft/speecht5_vc"),
"model": SpeechT5ForSpeechToSpeech.from_pretrained(f"{local_fold}/microsoft/speecht5_vc"), "model": SpeechT5ForSpeechToSpeech.from_pretrained(f"{local_fold}/microsoft/speecht5_vc"),
"vocoder": SpeechT5HifiGan.from_pretrained(f"{local_fold}/microsoft/speecht5_hifigan"), "vocoder": SpeechT5HifiGan.from_pretrained(f"{local_fold}/microsoft/speecht5_hifigan"),
@ -295,7 +294,6 @@ def load_pipes(local_deployment):
model.load_state_dict(torch.load(f"{local_fold}/lllyasviel/ControlNet/annotator/ckpts/mlsd_large_512_fp32.pth"), strict=True) model.load_state_dict(torch.load(f"{local_fold}/lllyasviel/ControlNet/annotator/ckpts/mlsd_large_512_fp32.pth"), strict=True)
return MLSDdetector(model) return MLSDdetector(model)
hed_network = Network(f"{local_fold}/lllyasviel/ControlNet/annotator/ckpts/network-bsds500.pth") hed_network = Network(f"{local_fold}/lllyasviel/ControlNet/annotator/ckpts/network-bsds500.pth")
controlnet_sd_pipes = { controlnet_sd_pipes = {
@ -317,37 +315,37 @@ def load_pipes(local_deployment):
"canny-control": { "canny-control": {
"model": CannyDetector() "model": CannyDetector()
}, },
"lllyasviel/sd-controlnet-canny":{ "lllyasviel/sd-controlnet-canny": {
"control": controlnet, "control": controlnet,
"model": controlnetpipe, "model": controlnetpipe,
"device": device "device": device
}, },
"lllyasviel/sd-controlnet-depth":{ "lllyasviel/sd-controlnet-depth": {
"control": ControlNetModel.from_pretrained(f"{local_fold}/lllyasviel/sd-controlnet-depth", torch_dtype=torch.float16), "control": ControlNetModel.from_pretrained(f"{local_fold}/lllyasviel/sd-controlnet-depth", torch_dtype=torch.float16),
"model": controlnetpipe, "model": controlnetpipe,
"device": device "device": device
}, },
"lllyasviel/sd-controlnet-hed":{ "lllyasviel/sd-controlnet-hed": {
"control": ControlNetModel.from_pretrained(f"{local_fold}/lllyasviel/sd-controlnet-hed", torch_dtype=torch.float16), "control": ControlNetModel.from_pretrained(f"{local_fold}/lllyasviel/sd-controlnet-hed", torch_dtype=torch.float16),
"model": controlnetpipe, "model": controlnetpipe,
"device": device "device": device
}, },
"lllyasviel/sd-controlnet-mlsd":{ "lllyasviel/sd-controlnet-mlsd": {
"control": ControlNetModel.from_pretrained(f"{local_fold}/lllyasviel/sd-controlnet-mlsd", torch_dtype=torch.float16), "control": ControlNetModel.from_pretrained(f"{local_fold}/lllyasviel/sd-controlnet-mlsd", torch_dtype=torch.float16),
"model": controlnetpipe, "model": controlnetpipe,
"device": device "device": device
}, },
"lllyasviel/sd-controlnet-openpose":{ "lllyasviel/sd-controlnet-openpose": {
"control": ControlNetModel.from_pretrained(f"{local_fold}/lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16), "control": ControlNetModel.from_pretrained(f"{local_fold}/lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16),
"model": controlnetpipe, "model": controlnetpipe,
"device": device "device": device
}, },
"lllyasviel/sd-controlnet-scribble":{ "lllyasviel/sd-controlnet-scribble": {
"control": ControlNetModel.from_pretrained(f"{local_fold}/lllyasviel/sd-controlnet-scribble", torch_dtype=torch.float16), "control": ControlNetModel.from_pretrained(f"{local_fold}/lllyasviel/sd-controlnet-scribble", torch_dtype=torch.float16),
"model": controlnetpipe, "model": controlnetpipe,
"device": device "device": device
}, },
"lllyasviel/sd-controlnet-seg":{ "lllyasviel/sd-controlnet-seg": {
"control": ControlNetModel.from_pretrained(f"{local_fold}/lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16), "control": ControlNetModel.from_pretrained(f"{local_fold}/lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16),
"model": controlnetpipe, "model": controlnetpipe,
"device": device "device": device
@ -356,6 +354,7 @@ def load_pipes(local_deployment):
pipes = {**standard_pipes, **other_pipes, **controlnet_sd_pipes} pipes = {**standard_pipes, **other_pipes, **controlnet_sd_pipes}
return pipes return pipes
pipes = load_pipes(local_deployment) pipes = load_pipes(local_deployment)
end = time.time() end = time.time()
@ -363,10 +362,12 @@ during = end - start
print(f"[ ready ] {during}s") print(f"[ ready ] {during}s")
@app.route('/running', methods=['GET']) @app.route('/running', methods=['GET'])
def running(): def running():
return jsonify({"running": True}) return jsonify({"running": True})
@app.route('/status/<path:model_id>', methods=['GET']) @app.route('/status/<path:model_id>', methods=['GET'])
def status(model_id): def status(model_id):
disabled_models = ["microsoft/trocr-base-printed", "microsoft/trocr-base-handwritten"] disabled_models = ["microsoft/trocr-base-printed", "microsoft/trocr-base-handwritten"]
@ -377,6 +378,7 @@ def status(model_id):
print(f"[ check {model_id} ] failed") print(f"[ check {model_id} ] failed")
return jsonify({"loaded": False}) return jsonify({"loaded": False})
@app.route('/models/<path:model_id>', methods=['POST']) @app.route('/models/<path:model_id>', methods=['POST'])
def models(model_id): def models(model_id):
while "using" in pipes[model_id] and pipes[model_id]["using"]: while "using" in pipes[model_id] and pipes[model_id]["using"]:
@ -392,7 +394,7 @@ def models(model_id):
if "device" in pipes[model_id]: if "device" in pipes[model_id]:
try: try:
pipe.to(pipes[model_id]["device"]) pipe.to(pipes[model_id]["device"])
except: except BaseException:
pipe.device = torch.device(pipes[model_id]["device"]) pipe.device = torch.device(pipes[model_id]["device"])
pipe.model.to(pipes[model_id]["device"]) pipe.model.to(pipes[model_id]["device"])
@ -424,7 +426,7 @@ def models(model_id):
if model_id.endswith("-control"): if model_id.endswith("-control"):
image = load_image(request.get_json()["img_url"]) image = load_image(request.get_json()["img_url"])
if "scribble" in model_id: if "scribble" in model_id:
control = pipe(image, scribble = True) control = pipe(image, scribble=True)
elif "canny" in model_id: elif "canny" in model_id:
control = pipe(image, low_threshold=100, high_threshold=200) control = pipe(image, low_threshold=100, high_threshold=200)
else: else:
@ -503,7 +505,7 @@ def models(model_id):
img_url = request.get_json()["img_url"] img_url = request.get_json()["img_url"]
result = pipe(question=question, image=img_url) result = pipe(question=question, image=img_url)
#DQA # DQA
if model_id == "impira/layoutlm-document-qa": if model_id == "impira/layoutlm-document-qa":
question = request.get_json()["text"] question = request.get_json()["text"]
img_url = request.get_json()["img_url"] img_url = request.get_json()["img_url"]
@ -558,7 +560,7 @@ def models(model_id):
# ASR # ASR
if model_id == "openai/whisper-base" or model_id == "microsoft/speecht5_asr": if model_id == "openai/whisper-base" or model_id == "microsoft/speecht5_asr":
audio_url = request.get_json()["audio_url"] audio_url = request.get_json()["audio_url"]
result = { "text": pipe(audio_url)["text"]} result = {"text": pipe(audio_url)["text"]}
# audio to audio # audio to audio
if model_id == "JorisCos/DCCRNet_Libri1Mix_enhsingle_16k": if model_id == "JorisCos/DCCRNet_Libri1Mix_enhsingle_16k":
@ -621,7 +623,7 @@ def models(model_id):
try: try:
pipe.to("cpu") pipe.to("cpu")
torch.cuda.empty_cache() torch.cuda.empty_cache()
except: except BaseException:
pipe.device = torch.device("cpu") pipe.device = torch.device("cpu")
pipe.model.to("cpu") pipe.model.to("cpu")
torch.cuda.empty_cache() torch.cuda.empty_cache()

@ -22,7 +22,7 @@ from huggingface_hub.inference_api import InferenceApi
from PIL import Image, ImageDraw from PIL import Image, ImageDraw
from pydub import AudioSegment from pydub import AudioSegment
#tokenizations # tokenizations
encodings = { encodings = {
"gpt-4": tiktoken.get_encoding("cl100k_base"), "gpt-4": tiktoken.get_encoding("cl100k_base"),
"gpt-4-32k": tiktoken.get_encoding("cl100k_base"), "gpt-4-32k": tiktoken.get_encoding("cl100k_base"),
@ -57,18 +57,22 @@ max_length = {
"ada": 2049 "ada": 2049
} }
def count_tokens(model_name, text): def count_tokens(model_name, text):
return len(encodings[model_name].encode(text)) return len(encodings[model_name].encode(text))
def get_max_context_length(model_name): def get_max_context_length(model_name):
return max_length[model_name] return max_length[model_name]
def get_token_ids_for_task_parsing(model_name): def get_token_ids_for_task_parsing(model_name):
text = '''{"task": "text-classification", "token-classification", "text2text-generation", "summarization", "translation", "question-answering", "conversational", "text-generation", "sentence-similarity", "tabular-classification", "object-detection", "image-classification", "image-to-image", "image-to-text", "text-to-image", "visual-question-answering", "document-question-answering", "image-segmentation", "text-to-speech", "text-to-video", "automatic-speech-recognition", "audio-to-audio", "audio-classification", "canny-control", "hed-control", "mlsd-control", "normal-control", "openpose-control", "canny-text-to-image", "depth-text-to-image", "hed-text-to-image", "mlsd-text-to-image", "normal-text-to-image", "openpose-text-to-image", "seg-text-to-image", "args", "text", "path", "dep", "id", "<GENERATED>-"}''' text = '''{"task": "text-classification", "token-classification", "text2text-generation", "summarization", "translation", "question-answering", "conversational", "text-generation", "sentence-similarity", "tabular-classification", "object-detection", "image-classification", "image-to-image", "image-to-text", "text-to-image", "visual-question-answering", "document-question-answering", "image-segmentation", "text-to-speech", "text-to-video", "automatic-speech-recognition", "audio-to-audio", "audio-classification", "canny-control", "hed-control", "mlsd-control", "normal-control", "openpose-control", "canny-text-to-image", "depth-text-to-image", "hed-text-to-image", "mlsd-text-to-image", "normal-text-to-image", "openpose-text-to-image", "seg-text-to-image", "args", "text", "path", "dep", "id", "<GENERATED>-"}'''
res = encodings[model_name].encode(text) res = encodings[model_name].encode(text)
res = list(set(res)) res = list(set(res))
return res return res
def get_token_ids_for_choose_model(model_name): def get_token_ids_for_choose_model(model_name):
text = '''{"id": "reason"}''' text = '''{"id": "reason"}'''
res = encodings[model_name].encode(text) res = encodings[model_name].encode(text)
@ -76,13 +80,7 @@ def get_token_ids_for_choose_model(model_name):
return res return res
######### #########
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default="swarms/agents/workers/multi_modal_workers/omni_agent/config.yml") parser.add_argument("--config", type=str, default="swarms/agents/workers/multi_modal_workers/omni_agent/config.yml")
parser.add_argument("--mode", type=str, default="cli") parser.add_argument("--mode", type=str, default="cli")
@ -176,14 +174,14 @@ inference_mode = config["inference_mode"]
# check the local_inference_endpoint # check the local_inference_endpoint
Model_Server = None Model_Server = None
if inference_mode!="huggingface": if inference_mode != "huggingface":
Model_Server = "http://" + config["local_inference_endpoint"]["host"] + ":" + str(config["local_inference_endpoint"]["port"]) Model_Server = "http://" + config["local_inference_endpoint"]["host"] + ":" + str(config["local_inference_endpoint"]["port"])
message = f"The server of local inference endpoints is not running, please start it first. (or using `inference_mode: huggingface` in {args.config} for a feature-limited experience)" message = f"The server of local inference endpoints is not running, please start it first. (or using `inference_mode: huggingface` in {args.config} for a feature-limited experience)"
try: try:
r = requests.get(Model_Server + "/running") r = requests.get(Model_Server + "/running")
if r.status_code != 200: if r.status_code != 200:
raise ValueError(message) raise ValueError(message)
except: except BaseException:
raise ValueError(message) raise ValueError(message)
@ -222,6 +220,7 @@ elif "HUGGINGFACE_ACCESS_TOKEN" in os.environ and os.getenv("HUGGINGFACE_ACCESS_
else: else:
raise ValueError(f"Incorrect HuggingFace token. Please check your {args.config} file.") raise ValueError(f"Incorrect HuggingFace token. Please check your {args.config} file.")
def convert_chat_to_completion(data): def convert_chat_to_completion(data):
messages = data.pop('messages', []) messages = data.pop('messages', [])
tprompt = "" tprompt = ""
@ -231,11 +230,11 @@ def convert_chat_to_completion(data):
final_prompt = "" final_prompt = ""
for message in messages: for message in messages:
if message['role'] == "user": if message['role'] == "user":
final_prompt += ("<im_start>"+ "user" + "\n" + message['content'] + "<im_end>\n") final_prompt += ("<im_start>" + "user" + "\n" + message['content'] + "<im_end>\n")
elif message['role'] == "assistant": elif message['role'] == "assistant":
final_prompt += ("<im_start>"+ "assistant" + "\n" + message['content'] + "<im_end>\n") final_prompt += ("<im_start>" + "assistant" + "\n" + message['content'] + "<im_end>\n")
else: else:
final_prompt += ("<im_start>"+ "system" + "\n" + message['content'] + "<im_end>\n") final_prompt += ("<im_start>" + "system" + "\n" + message['content'] + "<im_end>\n")
final_prompt = tprompt + final_prompt final_prompt = tprompt + final_prompt
final_prompt = final_prompt + "<im_start>assistant" final_prompt = final_prompt + "<im_start>assistant"
data["prompt"] = final_prompt data["prompt"] = final_prompt
@ -243,6 +242,7 @@ def convert_chat_to_completion(data):
data['max_tokens'] = data.get('max_tokens', max(get_max_context_length(LLM) - count_tokens(LLM_encoding, final_prompt), 1)) data['max_tokens'] = data.get('max_tokens', max(get_max_context_length(LLM) - count_tokens(LLM_encoding, final_prompt), 1))
return data return data
def send_request(data): def send_request(data):
api_key = data.pop("api_key") api_key = data.pop("api_key")
api_type = data.pop("api_type") api_type = data.pop("api_type")
@ -269,36 +269,41 @@ def send_request(data):
else: else:
return response.json()["choices"][0]["message"]["content"].strip() return response.json()["choices"][0]["message"]["content"].strip()
def replace_slot(text, entries): def replace_slot(text, entries):
for key, value in entries.items(): for key, value in entries.items():
if not isinstance(value, str): if not isinstance(value, str):
value = str(value) value = str(value)
text = text.replace("{{" + key +"}}", value.replace('"', "'").replace('\n', "")) text = text.replace("{{" + key + "}}", value.replace('"', "'").replace('\n', ""))
return text return text
def find_json(s): def find_json(s):
s = s.replace("\'", "\"") s = s.replace("\'", "\"")
start = s.find("{") start = s.find("{")
end = s.rfind("}") end = s.rfind("}")
res = s[start:end+1] res = s[start:end + 1]
res = res.replace("\n", "") res = res.replace("\n", "")
return res return res
def field_extract(s, field): def field_extract(s, field):
try: try:
field_rep = re.compile(f'{field}.*?:.*?"(.*?)"', re.IGNORECASE) field_rep = re.compile(f'{field}.*?:.*?"(.*?)"', re.IGNORECASE)
extracted = field_rep.search(s).group(1).replace("\"", "\'") extracted = field_rep.search(s).group(1).replace("\"", "\'")
except: except BaseException:
field_rep = re.compile(f'{field}:\ *"(.*?)"', re.IGNORECASE) field_rep = re.compile(f'{field}:\ *"(.*?)"', re.IGNORECASE)
extracted = field_rep.search(s).group(1).replace("\"", "\'") extracted = field_rep.search(s).group(1).replace("\"", "\'")
return extracted return extracted
def get_id_reason(choose_str): def get_id_reason(choose_str):
reason = field_extract(choose_str, "reason") reason = field_extract(choose_str, "reason")
id = field_extract(choose_str, "id") id = field_extract(choose_str, "id")
choose = {"id": id, "reason": reason} choose = {"id": id, "reason": reason}
return id.strip(), reason.strip(), choose return id.strip(), reason.strip(), choose
def record_case(success, **args): def record_case(success, **args):
if success: if success:
f = open("logs/log_success.jsonl", "a") f = open("logs/log_success.jsonl", "a")
@ -308,6 +313,7 @@ def record_case(success, **args):
f.write(json.dumps(log) + "\n") f.write(json.dumps(log) + "\n")
f.close() f.close()
def image_to_bytes(img_url): def image_to_bytes(img_url):
img_byte = io.BytesIO() img_byte = io.BytesIO()
img_url.split(".")[-1] img_url.split(".")[-1]
@ -315,6 +321,7 @@ def image_to_bytes(img_url):
img_data = img_byte.getvalue() img_data = img_byte.getvalue()
return img_data return img_data
def resource_has_dep(command): def resource_has_dep(command):
args = command["args"] args = command["args"]
for _, v in args.items(): for _, v in args.items():
@ -322,6 +329,7 @@ def resource_has_dep(command):
return True return True
return False return False
def fix_dep(tasks): def fix_dep(tasks):
for task in tasks: for task in tasks:
args = task["args"] args = task["args"]
@ -335,6 +343,7 @@ def fix_dep(tasks):
task["dep"] = [-1] task["dep"] = [-1]
return tasks return tasks
def unfold(tasks): def unfold(tasks):
flag_unfold_task = False flag_unfold_task = False
try: try:
@ -361,6 +370,7 @@ def unfold(tasks):
return tasks return tasks
def chitchat(messages, api_key, api_type, api_endpoint): def chitchat(messages, api_key, api_type, api_endpoint):
data = { data = {
"model": LLM, "model": LLM,
@ -371,6 +381,7 @@ def chitchat(messages, api_key, api_type, api_endpoint):
} }
return send_request(data) return send_request(data)
def parse_task(context, input, api_key, api_type, api_endpoint): def parse_task(context, input, api_key, api_type, api_endpoint):
demos_or_presteps = parse_task_demos_or_presteps demos_or_presteps = parse_task_demos_or_presteps
messages = json.loads(demos_or_presteps) messages = json.loads(demos_or_presteps)
@ -404,6 +415,7 @@ def parse_task(context, input, api_key, api_type, api_endpoint):
} }
return send_request(data) return send_request(data)
def choose_model(input, task, metas, api_key, api_type, api_endpoint): def choose_model(input, task, metas, api_key, api_type, api_endpoint):
prompt = replace_slot(choose_model_prompt, { prompt = replace_slot(choose_model_prompt, {
"input": input, "input": input,
@ -454,13 +466,14 @@ def response_results(input, results, api_key, api_type, api_endpoint):
} }
return send_request(data) return send_request(data)
def huggingface_model_inference(model_id, data, task): def huggingface_model_inference(model_id, data, task):
task_url = f"https://api-inference.huggingface.co/models/{model_id}" # InferenceApi does not yet support some tasks task_url = f"https://api-inference.huggingface.co/models/{model_id}" # InferenceApi does not yet support some tasks
inference = InferenceApi(repo_id=model_id, token=config["huggingface"]["token"]) inference = InferenceApi(repo_id=model_id, token=config["huggingface"]["token"])
# NLP tasks # NLP tasks
if task == "question-answering": if task == "question-answering":
inputs = {"question": data["text"], "context": (data["context"] if "context" in data else "" )} inputs = {"question": data["text"], "context": (data["context"] if "context" in data else "")}
result = inference(inputs) result = inference(inputs)
if task == "sentence-similarity": if task == "sentence-similarity":
inputs = {"source_sentence": data["text1"], "target_sentence": data["text2"]} inputs = {"source_sentence": data["text1"], "target_sentence": data["text2"]}
@ -537,7 +550,7 @@ def huggingface_model_inference(model_id, data, task):
for label in predicted: for label in predicted:
box = label["box"] box = label["box"]
draw.rectangle(((box["xmin"], box["ymin"]), (box["xmax"], box["ymax"])), outline=color_map[label["label"]], width=2) draw.rectangle(((box["xmin"], box["ymin"]), (box["xmax"], box["ymax"])), outline=color_map[label["label"]], width=2)
draw.text((box["xmin"]+5, box["ymin"]-15), label["label"], fill=color_map[label["label"]]) draw.text((box["xmin"] + 5, box["ymin"] - 15), label["label"], fill=color_map[label["label"]])
name = str(uuid.uuid4())[:4] name = str(uuid.uuid4())[:4]
image.save(f"public/images/{name}.jpg") image.save(f"public/images/{name}.jpg")
result = {} result = {}
@ -586,6 +599,7 @@ def huggingface_model_inference(model_id, data, task):
result = {"generated audio": f"/audios/{name}.{type}"} result = {"generated audio": f"/audios/{name}.{type}"}
return result return result
def local_model_inference(model_id, data, task): def local_model_inference(model_id, data, task):
task_url = f"{Model_Server}/models/{model_id}" task_url = f"{Model_Server}/models/{model_id}"
@ -664,7 +678,7 @@ def local_model_inference(model_id, data, task):
for label in predicted: for label in predicted:
box = label["box"] box = label["box"]
draw.rectangle(((box["xmin"], box["ymin"]), (box["xmax"], box["ymax"])), outline=color_map[label["label"]], width=2) draw.rectangle(((box["xmin"], box["ymin"]), (box["xmax"], box["ymax"])), outline=color_map[label["label"]], width=2)
draw.text((box["xmin"]+5, box["ymin"]-15), label["label"], fill=color_map[label["label"]]) draw.text((box["xmin"] + 5, box["ymin"] - 15), label["label"], fill=color_map[label["label"]])
name = str(uuid.uuid4())[:4] name = str(uuid.uuid4())[:4]
image.save(f"public/images/{name}.jpg") image.save(f"public/images/{name}.jpg")
results = {} results = {}
@ -713,11 +727,11 @@ def model_inference(model_id, data, hosted_on, task):
except Exception as e: except Exception as e:
print(e) print(e)
traceback.print_exc() traceback.print_exc()
inference_result = {"error":{"message": str(e)}} inference_result = {"error": {"message": str(e)}}
return inference_result return inference_result
def get_model_status(model_id, url, headers, queue = None): def get_model_status(model_id, url, headers, queue=None):
endpoint_type = "huggingface" if "huggingface" in url else "local" endpoint_type = "huggingface" if "huggingface" in url else "local"
if "huggingface" in url: if "huggingface" in url:
r = requests.get(url, headers=headers, proxies=PROXY) r = requests.get(url, headers=headers, proxies=PROXY)
@ -732,6 +746,7 @@ def get_model_status(model_id, url, headers, queue = None):
queue.put((model_id, False, None)) queue.put((model_id, False, None))
return False return False
def get_avaliable_models(candidates, topk=5): def get_avaliable_models(candidates, topk=5):
all_available_models = {"local": [], "huggingface": []} all_available_models = {"local": [], "huggingface": []}
threads = [] threads = []
@ -766,6 +781,7 @@ def get_avaliable_models(candidates, topk=5):
return all_available_models return all_available_models
def collect_result(command, choose, inference_result): def collect_result(command, choose, inference_result):
result = {"task": command} result = {"task": command}
result["inference result"] = inference_result result["inference result"] = inference_result
@ -865,7 +881,7 @@ def run_task(input, command, results, api_key, api_type, api_endpoint):
logger.debug(f"chosen model: {choose}") logger.debug(f"chosen model: {choose}")
else: else:
logger.warning(f"Task {command['task']} is not available. ControlNet need to be deployed locally.") logger.warning(f"Task {command['task']} is not available. ControlNet need to be deployed locally.")
record_case(success=False, **{"input": input, "task": command, "reason": f"Task {command['task']} is not available. ControlNet need to be deployed locally.", "op":"message"}) record_case(success=False, **{"input": input, "task": command, "reason": f"Task {command['task']} is not available. ControlNet need to be deployed locally.", "op": "message"})
inference_result = {"error": "service related to ControlNet is not available."} inference_result = {"error": "service related to ControlNet is not available."}
results[id] = collect_result(command, "", inference_result) results[id] = collect_result(command, "", inference_result)
return False return False
@ -883,7 +899,7 @@ def run_task(input, command, results, api_key, api_type, api_endpoint):
else: else:
if task not in MODELS_MAP: if task not in MODELS_MAP:
logger.warning(f"no available models on {task} task.") logger.warning(f"no available models on {task} task.")
record_case(success=False, **{"input": input, "task": command, "reason": f"task not support: {command['task']}", "op":"message"}) record_case(success=False, **{"input": input, "task": command, "reason": f"task not support: {command['task']}", "op": "message"})
inference_result = {"error": f"{command['task']} not found in available tasks."} inference_result = {"error": f"{command['task']} not found in available tasks."}
results[id] = collect_result(command, "", inference_result) results[id] = collect_result(command, "", inference_result)
return False return False
@ -895,7 +911,7 @@ def run_task(input, command, results, api_key, api_type, api_endpoint):
if len(all_avaliable_model_ids) == 0: if len(all_avaliable_model_ids) == 0:
logger.warning(f"no available models on {command['task']}") logger.warning(f"no available models on {command['task']}")
record_case(success=False, **{"input": input, "task": command, "reason": f"no available models: {command['task']}", "op":"message"}) record_case(success=False, **{"input": input, "task": command, "reason": f"no available models: {command['task']}", "op": "message"})
inference_result = {"error": f"no available models on {command['task']} task."} inference_result = {"error": f"no available models on {command['task']} task."}
results[id] = collect_result(command, "", inference_result) results[id] = collect_result(command, "", inference_result)
return False return False
@ -938,24 +954,25 @@ def run_task(input, command, results, api_key, api_type, api_endpoint):
if "error" in inference_result: if "error" in inference_result:
logger.warning(f"Inference error: {inference_result['error']}") logger.warning(f"Inference error: {inference_result['error']}")
record_case(success=False, **{"input": input, "task": command, "reason": f"inference error: {inference_result['error']}", "op":"message"}) record_case(success=False, **{"input": input, "task": command, "reason": f"inference error: {inference_result['error']}", "op": "message"})
results[id] = collect_result(command, choose, inference_result) results[id] = collect_result(command, choose, inference_result)
return False return False
results[id] = collect_result(command, choose, inference_result) results[id] = collect_result(command, choose, inference_result)
return True return True
def chat_huggingface(messages, api_key, api_type, api_endpoint, return_planning = False, return_results = False):
def chat_huggingface(messages, api_key, api_type, api_endpoint, return_planning=False, return_results=False):
start = time.time() start = time.time()
context = messages[:-1] context = messages[:-1]
input = messages[-1]["content"] input = messages[-1]["content"]
logger.info("*"*80) logger.info("*" * 80)
logger.info(f"input: {input}") logger.info(f"input: {input}")
task_str = parse_task(context, input, api_key, api_type, api_endpoint) task_str = parse_task(context, input, api_key, api_type, api_endpoint)
if "error" in task_str: if "error" in task_str:
record_case(success=False, **{"input": input, "task": task_str, "reason": f"task parsing error: {task_str['error']['message']}", "op":"report message"}) record_case(success=False, **{"input": input, "task": task_str, "reason": f"task parsing error: {task_str['error']['message']}", "op": "report message"})
return {"message": task_str["error"]["message"]} return {"message": task_str["error"]["message"]}
task_str = task_str.strip() task_str = task_str.strip()
@ -966,7 +983,7 @@ def chat_huggingface(messages, api_key, api_type, api_endpoint, return_planning
except Exception as e: except Exception as e:
logger.debug(e) logger.debug(e)
response = chitchat(messages, api_key, api_type, api_endpoint) response = chitchat(messages, api_key, api_type, api_endpoint)
record_case(success=False, **{"input": input, "task": task_str, "reason": "task parsing fail", "op":"chitchat"}) record_case(success=False, **{"input": input, "task": task_str, "reason": "task parsing fail", "op": "chitchat"})
return {"message": response} return {"message": response}
if task_str == "[]": # using LLM response for empty task if task_str == "[]": # using LLM response for empty task
@ -1028,10 +1045,11 @@ def chat_huggingface(messages, api_key, api_type, api_endpoint, return_planning
during = end - start during = end - start
answer = {"message": response} answer = {"message": response}
record_case(success=True, **{"input": input, "task": task_str, "results": results, "response": response, "during": during, "op":"response"}) record_case(success=True, **{"input": input, "task": task_str, "results": results, "response": response, "during": during, "op": "response"})
logger.info(f"response: {response}") logger.info(f"response: {response}")
return answer return answer
def test(): def test():
# single round examples # single round examples
inputs = [ inputs = [
@ -1045,7 +1063,7 @@ def test():
for input in inputs: for input in inputs:
messages = [{"role": "user", "content": input}] messages = [{"role": "user", "content": input}]
chat_huggingface(messages, API_KEY, API_TYPE, API_ENDPOINT, return_planning = False, return_results = False) chat_huggingface(messages, API_KEY, API_TYPE, API_ENDPOINT, return_planning=False, return_results=False)
# multi rounds example # multi rounds example
messages = [ messages = [
@ -1053,7 +1071,8 @@ def test():
{"role": "assistant", "content": """Sure. I understand your request. Based on the inference results of the models, I have generated a canny image for you. The workflow I used is as follows: First, I used the image-to-text model (nlpconnect/vit-gpt2-image-captioning) to convert the image /examples/f.jpg to text. The generated text is "a herd of giraffes and zebras grazing in a field". Second, I used the canny-control model (canny-control) to generate a canny image from the text. Unfortunately, the model failed to generate the canny image. Finally, I used the canny-text-to-image model (lllyasviel/sd-controlnet-canny) to generate a canny image from the text. The generated image is located at /images/f16d.png. I hope this answers your request. Is there anything else I can help you with?"""}, {"role": "assistant", "content": """Sure. I understand your request. Based on the inference results of the models, I have generated a canny image for you. The workflow I used is as follows: First, I used the image-to-text model (nlpconnect/vit-gpt2-image-captioning) to convert the image /examples/f.jpg to text. The generated text is "a herd of giraffes and zebras grazing in a field". Second, I used the canny-control model (canny-control) to generate a canny image from the text. Unfortunately, the model failed to generate the canny image. Finally, I used the canny-text-to-image model (lllyasviel/sd-controlnet-canny) to generate a canny image from the text. The generated image is located at /images/f16d.png. I hope this answers your request. Is there anything else I can help you with?"""},
{"role": "user", "content": """then based on the above canny image and a prompt "a photo of a zoo", generate a new image."""}, {"role": "user", "content": """then based on the above canny image and a prompt "a photo of a zoo", generate a new image."""},
] ]
chat_huggingface(messages, API_KEY, API_TYPE, API_ENDPOINT, return_planning = False, return_results = False) chat_huggingface(messages, API_KEY, API_TYPE, API_ENDPOINT, return_planning=False, return_results=False)
def cli(): def cli():
messages = [] messages = []

@ -10,5 +10,3 @@ class Replicator:
def run(self, task): def run(self, task):
pass pass

@ -30,6 +30,7 @@ class Step:
self.args = args self.args = args
self.tool = tool self.tool = tool
class Plan: class Plan:
def __init__( def __init__(
self, self,
@ -44,9 +45,6 @@ class Plan:
return str(self) return str(self)
class OmniModalAgent: class OmniModalAgent:
""" """
OmniModalAgent OmniModalAgent
@ -72,6 +70,7 @@ class OmniModalAgent:
agent = OmniModalAgent(llm) agent = OmniModalAgent(llm)
response = agent.run("Hello, how are you? Create an image of how your are doing!") response = agent.run("Hello, how are you? Create an image of how your are doing!")
""" """
def __init__( def __init__(
self, self,
llm: BaseLanguageModel, llm: BaseLanguageModel,
@ -105,7 +104,6 @@ class OmniModalAgent:
# self.task_executor = TaskExecutor # self.task_executor = TaskExecutor
self.history = [] self.history = []
def run( def run(
self, self,
input: str input: str
@ -149,7 +147,7 @@ class OmniModalAgent:
""" """
#add users message to the history # add users message to the history
self.history.append( self.history.append(
Message( Message(
"User", "User",
@ -157,11 +155,11 @@ class OmniModalAgent:
) )
) )
#process msg # process msg
try: try:
response = self.agent.run(msg) response = self.agent.run(msg)
#add agent's response to the history # add agent's response to the history
self.history.append( self.history.append(
Message( Message(
"Agent", "Agent",
@ -169,7 +167,7 @@ class OmniModalAgent:
) )
) )
#if streaming is = True # if streaming is = True
if streaming: if streaming:
return self._stream_response(response) return self._stream_response(response)
else: else:
@ -178,7 +176,7 @@ class OmniModalAgent:
except Exception as error: except Exception as error:
error_message = f"Error processing message: {str(error)}" error_message = f"Error processing message: {str(error)}"
#add error to history # add error to history
self.history.append( self.history.append(
Message( Message(
"Agent", "Agent",
@ -203,5 +201,3 @@ class OmniModalAgent:
""" """
for token in response.split(): for token in response.split():
yield token yield token

@ -132,12 +132,6 @@ class SalesConversationChain(LLMChain):
return cls(prompt=prompt, llm=llm, verbose=verbose) return cls(prompt=prompt, llm=llm, verbose=verbose)
# Set up a knowledge base # Set up a knowledge base
def setup_knowledge_base(product_catalog: str = None): def setup_knowledge_base(product_catalog: str = None):
""" """
@ -173,21 +167,19 @@ def get_tools(product_catalog):
description="useful for when you need to answer questions about product information", description="useful for when you need to answer questions about product information",
), ),
#Interpreter # Interpreter
Tool( Tool(
name="Code Interepeter", name="Code Interepeter",
func=compile, func=compile,
description="Useful when you need to run code locally, such as Python, Javascript, Shell, and more." description="Useful when you need to run code locally, such as Python, Javascript, Shell, and more."
) )
#omnimodal agent # omnimodal agent
] ]
return tools return tools
class CustomPromptTemplateForTools(StringPromptTemplate): class CustomPromptTemplateForTools(StringPromptTemplate):
# The template to use # The template to use
template: str template: str
@ -238,7 +230,7 @@ class SalesConvoOutputParser(AgentOutputParser):
regex = r"Action: (.*?)[\n]*Action Input: (.*)" regex = r"Action: (.*?)[\n]*Action Input: (.*)"
match = re.search(regex, text) match = re.search(regex, text)
if not match: if not match:
## TODO - this is not entirely reliable, sometimes results in an error. # TODO - this is not entirely reliable, sometimes results in an error.
return AgentFinish( return AgentFinish(
{ {
"output": "I apologize, I was unable to find the answer to your question. Is there anything else I can help with?" "output": "I apologize, I was unable to find the answer to your question. Is there anything else I can help with?"
@ -405,7 +397,7 @@ class ProfitPilot(Chain, BaseModel):
tool_names = [tool.name for tool in tools] tool_names = [tool.name for tool in tools]
# WARNING: this output parser is NOT reliable yet # WARNING: this output parser is NOT reliable yet
## It makes assumptions about output from LLM which can break and throw an error # It makes assumptions about output from LLM which can break and throw an error
output_parser = SalesConvoOutputParser(ai_prefix=kwargs["salesperson_name"]) output_parser = SalesConvoOutputParser(ai_prefix=kwargs["salesperson_name"])
sales_agent_with_tools = LLMSingleActionAgent( sales_agent_with_tools = LLMSingleActionAgent(

@ -17,4 +17,3 @@ class ErrorArtifact(BaseArtifact):
from griptape.schemas import ErrorArtifactSchema from griptape.schemas import ErrorArtifactSchema
return dict(ErrorArtifactSchema().dump(self)) return dict(ErrorArtifactSchema().dump(self))

@ -5,6 +5,7 @@ import json
from typing import Optional from typing import Optional
from pydantic import BaseModel, Field, StrictStr from pydantic import BaseModel, Field, StrictStr
class Artifact(BaseModel): class Artifact(BaseModel):
""" """
@ -63,5 +64,3 @@ class Artifact(BaseModel):
) )
return _obj return _obj

@ -14,6 +14,7 @@ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(
# ---------- Boss Node ---------- # ---------- Boss Node ----------
class Boss: class Boss:
""" """
The Bose class is responsible for creating and executing tasks using the BabyAGI model. The Bose class is responsible for creating and executing tasks using the BabyAGI model.
@ -37,6 +38,7 @@ class Boss:
# Run the Bose to process the objective # Run the Bose to process the objective
boss.run() boss.run()
""" """
def __init__( def __init__(
self, self,
objective: str, objective: str,

@ -345,7 +345,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special, disallowed_special=self.disallowed_special,
) )
for j in range(0, len(token), self.embedding_ctx_length): for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j : j + self.embedding_ctx_length]) tokens.append(token[j: j + self.embedding_ctx_length])
indices.append(i) indices.append(i)
batched_embeddings: List[List[float]] = [] batched_embeddings: List[List[float]] = []
@ -364,7 +364,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in _iter: for i in _iter:
response = embed_with_retry( response = embed_with_retry(
self, self,
input=tokens[i : i + _chunk_size], input=tokens[i: i + _chunk_size],
**self._invocation_params, **self._invocation_params,
) )
batched_embeddings.extend(r["embedding"] for r in response["data"]) batched_embeddings.extend(r["embedding"] for r in response["data"])
@ -426,7 +426,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
disallowed_special=self.disallowed_special, disallowed_special=self.disallowed_special,
) )
for j in range(0, len(token), self.embedding_ctx_length): for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(token[j : j + self.embedding_ctx_length]) tokens.append(token[j: j + self.embedding_ctx_length])
indices.append(i) indices.append(i)
batched_embeddings: List[List[float]] = [] batched_embeddings: List[List[float]] = []
@ -434,7 +434,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in range(0, len(tokens), _chunk_size): for i in range(0, len(tokens), _chunk_size):
response = await async_embed_with_retry( response = await async_embed_with_retry(
self, self,
input=tokens[i : i + _chunk_size], input=tokens[i: i + _chunk_size],
**self._invocation_params, **self._invocation_params,
) )
batched_embeddings.extend(r["embedding"] for r in response["data"]) batched_embeddings.extend(r["embedding"] for r in response["data"])

@ -28,4 +28,3 @@ class PegasusEmbedding:
except Exception as e: except Exception as e:
logging.error(f"Failed to generate embeddings. Error: {e}") logging.error(f"Failed to generate embeddings. Error: {e}")
raise raise

@ -1,6 +1,6 @@
# workers in unison # workers in unison
#kye gomez jul 13 4:01pm, can scale up the number of swarms working on a probkem with `hivemind(swarms=4, or swarms=auto which will scale the agents depending on the complexity)` # kye gomez jul 13 4:01pm, can scale up the number of swarms working on a probkem with `hivemind(swarms=4, or swarms=auto which will scale the agents depending on the complexity)`
#this needs to change, we need to specify exactly what needs to be imported # this needs to change, we need to specify exactly what needs to be imported
# add typechecking, documentation, and deeper error handling # add typechecking, documentation, and deeper error handling
# TODO: MANY WORKERS # TODO: MANY WORKERS
@ -12,6 +12,7 @@ from swarms.swarms.swarms import HierarchicalSwarm
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
class HiveMind: class HiveMind:
def __init__( def __init__(
self, self,
@ -62,7 +63,7 @@ class HiveMind:
logging.error(f"No swarm found at index {index}") logging.error(f"No swarm found at index {index}")
def get_progress(self): def get_progress(self):
#this assumes that the swarms class has a get progress method # this assumes that the swarms class has a get progress method
pass pass
def cancel_swarm(self, index): def cancel_swarm(self, index):

@ -10,9 +10,11 @@ from swarms.memory.schemas import Task as APITask
class Step(APIStep): class Step(APIStep):
additional_properties: Optional[Dict[str, str]] = None additional_properties: Optional[Dict[str, str]] = None
class Task(APITask): class Task(APITask):
steps: List[Step] = [] steps: List[Step] = []
class NotFoundException(Exception): class NotFoundException(Exception):
""" """
Exception raised when a resource is not found. Exception raised when a resource is not found.
@ -23,6 +25,7 @@ class NotFoundException(Exception):
self.item_id = item_id self.item_id = item_id
super().__init__(f"{item_name} with {item_id} not found.") super().__init__(f"{item_name} with {item_id} not found.")
class TaskDB(ABC): class TaskDB(ABC):
async def create_task( async def create_task(
self, self,

@ -1,4 +1,4 @@
#init ocean # init ocean
# TODO upload ocean to pip and config it to the abstract class # TODO upload ocean to pip and config it to the abstract class
import logging import logging
from typing import Union, List from typing import Union, List
@ -6,6 +6,7 @@ from typing import Union, List
import oceandb import oceandb
from oceandb.utils.embedding_function import MultiModalEmbeddingFunction from oceandb.utils.embedding_function import MultiModalEmbeddingFunction
class OceanDB: class OceanDB:
def __init__(self): def __init__(self):
try: try:

@ -1,4 +1,4 @@
#prompts # prompts
from swarms.models.anthropic import Anthropic from swarms.models.anthropic import Anthropic
# from swarms.models.palm import GooglePalm # from swarms.models.palm import GooglePalm
from swarms.models.petals import Petals from swarms.models.petals import Petals

@ -1,6 +1,7 @@
import requests import requests
import os import os
class Anthropic: class Anthropic:
"""Anthropic large language models.""" """Anthropic large language models."""

@ -1,15 +1,15 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
class AbstractModel(ABC): class AbstractModel(ABC):
#abstract base class for language models # abstract base class for language models
def __init__(): def __init__():
pass pass
@abstractmethod @abstractmethod
def run(self, prompt): def run(self, prompt):
#generate text using language model # generate text using language model
pass pass
def chat(self, prompt, history): def chat(self, prompt, history):
pass pass

@ -458,7 +458,7 @@ class BaseOpenAI(BaseLLM):
) )
params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) params["max_tokens"] = self.max_tokens_for_prompt(prompts[0])
sub_prompts = [ sub_prompts = [
prompts[i : i + self.batch_size] prompts[i: i + self.batch_size]
for i in range(0, len(prompts), self.batch_size) for i in range(0, len(prompts), self.batch_size)
] ]
return sub_prompts return sub_prompts
@ -469,7 +469,7 @@ class BaseOpenAI(BaseLLM):
"""Create the LLMResult from the choices and prompts.""" """Create the LLMResult from the choices and prompts."""
generations = [] generations = []
for i, _ in enumerate(prompts): for i, _ in enumerate(prompts):
sub_choices = choices[i * self.n : (i + 1) * self.n] sub_choices = choices[i * self.n: (i + 1) * self.n]
generations.append( generations.append(
[ [
Generation( Generation(

@ -13,12 +13,13 @@ class Mistral:
result = model.run(task) result = model.run(task)
print(result) print(result)
""" """
def __init__( def __init__(
self, self,
ai_name: str = "Node Model Agent", ai_name: str = "Node Model Agent",
system_prompt: str = None, system_prompt: str = None,
model_name: str ="mistralai/Mistral-7B-v0.1", model_name: str = "mistralai/Mistral-7B-v0.1",
device: str ="cuda", device: str = "cuda",
use_flash_attention: bool = False, use_flash_attention: bool = False,
temperature: float = 1.0, temperature: float = 1.0,
max_length: int = 100, max_length: int = 100,
@ -97,7 +98,7 @@ class Mistral:
""" """
#add users message to the history # add users message to the history
self.history.append( self.history.append(
Message( Message(
"User", "User",
@ -105,11 +106,11 @@ class Mistral:
) )
) )
#process msg # process msg
try: try:
response = self.agent.run(msg) response = self.agent.run(msg)
#add agent's response to the history # add agent's response to the history
self.history.append( self.history.append(
Message( Message(
"Agent", "Agent",
@ -117,7 +118,7 @@ class Mistral:
) )
) )
#if streaming is = True # if streaming is = True
if streaming: if streaming:
return self._stream_response(response) return self._stream_response(response)
else: else:
@ -126,7 +127,7 @@ class Mistral:
except Exception as error: except Exception as error:
error_message = f"Error processing message: {str(error)}" error_message = f"Error processing message: {str(error)}"
#add error to history # add error to history
self.history.append( self.history.append(
Message( Message(
"Agent", "Agent",
@ -151,4 +152,3 @@ class Mistral:
""" """
for token in response.split(): for token in response.split():
yield token yield token

@ -1,5 +1,6 @@
from transformers import AutoTokenizer, AutoModelForCausalLM from transformers import AutoTokenizer, AutoModelForCausalLM
class Petals: class Petals:
"""Petals Bloom models.""" """Petals Bloom models."""

@ -3,11 +3,13 @@ import re
from abc import abstractmethod from abc import abstractmethod
from typing import Dict, NamedTuple from typing import Dict, NamedTuple
class AgentAction(NamedTuple): class AgentAction(NamedTuple):
"""Action returned by AgentOutputParser.""" """Action returned by AgentOutputParser."""
name: str name: str
args: Dict args: Dict
class BaseAgentOutputParser: class BaseAgentOutputParser:
"""Base Output parser for Agent.""" """Base Output parser for Agent."""
@ -15,6 +17,7 @@ class BaseAgentOutputParser:
def parse(self, text: str) -> AgentAction: def parse(self, text: str) -> AgentAction:
"""Return AgentAction""" """Return AgentAction"""
class AgentOutputParser(BaseAgentOutputParser): class AgentOutputParser(BaseAgentOutputParser):
"""Output parser for Agent.""" """Output parser for Agent."""

@ -1,6 +1,7 @@
import json import json
from typing import List from typing import List
class PromptGenerator: class PromptGenerator:
"""A class for generating custom prompt strings.""" """A class for generating custom prompt strings."""
@ -75,4 +76,3 @@ class PromptGenerator:
) )
return prompt_string return prompt_string

@ -2,6 +2,7 @@ import time
from typing import Any, List from typing import Any, List
from swarms.models.prompts.agent_prompt_generator import get_prompt from swarms.models.prompts.agent_prompt_generator import get_prompt
class TokenUtils: class TokenUtils:
@staticmethod @staticmethod
def count_tokens(text: str) -> int: def count_tokens(text: str) -> int:

@ -27,6 +27,7 @@ def generate_report_prompt(question, research_summary):
" in depth, with facts and numbers if available, a minimum of 1,200 words and with markdown syntax and apa format. "\ " in depth, with facts and numbers if available, a minimum of 1,200 words and with markdown syntax and apa format. "\
"Write all source urls at the end of the report in apa format" "Write all source urls at the end of the report in apa format"
def generate_search_queries_prompt(question): def generate_search_queries_prompt(question):
""" Generates the search queries prompt for the given question. """ Generates the search queries prompt for the given question.
Args: question (str): The question to generate the search queries prompt for Args: question (str): The question to generate the search queries prompt for
@ -69,6 +70,7 @@ def generate_outline_report_prompt(question, research_summary):
' The research report should be detailed, informative, in-depth, and a minimum of 1,200 words.' \ ' The research report should be detailed, informative, in-depth, and a minimum of 1,200 words.' \
' Use appropriate Markdown syntax to format the outline and ensure readability.' ' Use appropriate Markdown syntax to format the outline and ensure readability.'
def generate_concepts_prompt(question, research_summary): def generate_concepts_prompt(question, research_summary):
""" Generates the concepts prompt for the given question. """ Generates the concepts prompt for the given question.
Args: question (str): The question to generate the concepts prompt for Args: question (str): The question to generate the concepts prompt for
@ -96,6 +98,7 @@ def generate_lesson_prompt(concept):
return prompt return prompt
def get_report_by_type(report_type): def get_report_by_type(report_type):
report_type_mapping = { report_type_mapping = {
'research_report': generate_report_prompt, 'research_report': generate_report_prompt,

@ -10,6 +10,7 @@ from swarms.utils.serializable import Serializable
if TYPE_CHECKING: if TYPE_CHECKING:
from langchain.prompts.chat import ChatPromptTemplate from langchain.prompts.chat import ChatPromptTemplate
def get_buffer_string( def get_buffer_string(
messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI" messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI"
) -> str: ) -> str:
@ -95,7 +96,7 @@ class BaseMessageChunk(BaseMessage):
for k, v in right.items(): for k, v in right.items():
if k not in merged: if k not in merged:
merged[k] = v merged[k] = v
elif type(merged[k]) != type(v): elif not isinstance(merged[k], type(v)):
raise ValueError( raise ValueError(
f'additional_kwargs["{k}"] already exists in this message,' f'additional_kwargs["{k}"] already exists in this message,'
" but with a different type." " but with a different type."

@ -11,6 +11,7 @@ class Message:
The base abstract Message class. The base abstract Message class.
Messages are the inputs and outputs of ChatModels. Messages are the inputs and outputs of ChatModels.
""" """
def __init__(self, content: str, role: str, additional_kwargs: Dict = None): def __init__(self, content: str, role: str, additional_kwargs: Dict = None):
self.content = content self.content = content
self.role = role self.role = role
@ -25,6 +26,7 @@ class HumanMessage(Message):
""" """
A Message from a human. A Message from a human.
""" """
def __init__(self, content: str, role: str = "Human", additional_kwargs: Dict = None, example: bool = False): def __init__(self, content: str, role: str = "Human", additional_kwargs: Dict = None, example: bool = False):
super().__init__(content, role, additional_kwargs) super().__init__(content, role, additional_kwargs)
self.example = example self.example = example
@ -37,6 +39,7 @@ class AIMessage(Message):
""" """
A Message from an AI. A Message from an AI.
""" """
def __init__(self, content: str, role: str = "AI", additional_kwargs: Dict = None, example: bool = False): def __init__(self, content: str, role: str = "AI", additional_kwargs: Dict = None, example: bool = False):
super().__init__(content, role, additional_kwargs) super().__init__(content, role, additional_kwargs)
self.example = example self.example = example
@ -50,6 +53,7 @@ class SystemMessage(Message):
A Message for priming AI behavior, usually passed in as the first of a sequence A Message for priming AI behavior, usually passed in as the first of a sequence
of input messages. of input messages.
""" """
def __init__(self, content: str, role: str = "System", additional_kwargs: Dict = None): def __init__(self, content: str, role: str = "System", additional_kwargs: Dict = None):
super().__init__(content, role, additional_kwargs) super().__init__(content, role, additional_kwargs)
@ -61,6 +65,7 @@ class FunctionMessage(Message):
""" """
A Message for passing the result of executing a function back to a model. A Message for passing the result of executing a function back to a model.
""" """
def __init__(self, content: str, role: str = "Function", name: str, additional_kwargs: Dict = None): def __init__(self, content: str, role: str = "Function", name: str, additional_kwargs: Dict = None):
super().__init__(content, role, additional_kwargs) super().__init__(content, role, additional_kwargs)
self.name = name self.name = name
@ -73,6 +78,7 @@ class ChatMessage(Message):
""" """
A Message that can be assigned an arbitrary speaker (i.e. role). A Message that can be assigned an arbitrary speaker (i.e. role).
""" """
def __init__(self, content: str, role: str, additional_kwargs: Dict = None): def __init__(self, content: str, role: str, additional_kwargs: Dict = None):
super().__init__(content, role, additional_kwargs) super().__init__(content, role, additional_kwargs)

@ -21,6 +21,7 @@ def character(character_name, topic, word_limit):
""" """
return prompt return prompt
def debate_monitor(game_description, word_limit, character_names): def debate_monitor(game_description, word_limit, character_names):
prompt = f""" prompt = f"""

@ -1,6 +1,5 @@
SALES_ASSISTANT_PROMPT = """You are a sales assistant helping your sales agent to determine which stage of a sales conversation should the agent move to, or stay at. SALES_ASSISTANT_PROMPT = """You are a sales assistant helping your sales agent to determine which stage of a sales conversation should the agent move to, or stay at.
Following '===' is the conversation history. Following '===' is the conversation history.
Use this conversation history to make your decision. Use this conversation history to make your decision.
@ -48,11 +47,10 @@ Conversation history:
{salesperson_name}: {salesperson_name}:
""" """
conversation_stages = {'1' : "Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional. Your greeting should be welcoming. Always clarify in your greeting the reason why you are contacting the prospect.", conversation_stages = {'1': "Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional. Your greeting should be welcoming. Always clarify in your greeting the reason why you are contacting the prospect.",
'2': "Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.", '2': "Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.",
'3': "Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.", '3': "Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.",
'4': "Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.", '4': "Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.",
'5': "Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points.", '5': "Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points.",
'6': "Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.", '6': "Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.",
'7': "Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits."} '7': "Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits."}

@ -9,7 +9,6 @@ conversation_stages = {
} }
SALES_AGENT_TOOLS_PROMPT = """ SALES_AGENT_TOOLS_PROMPT = """
Never forget your name is {salesperson_name}. You work as a {salesperson_role}. Never forget your name is {salesperson_name}. You work as a {salesperson_role}.
You work at company named {company_name}. {company_name}'s business is the following: {company_business}. You work at company named {company_name}. {company_name}'s business is the following: {company_business}.

@ -1,4 +1,4 @@
#structs # structs
#structs # structs
from swarms.structs.workflow import Workflow from swarms.structs.workflow import Workflow
from swarms.structs.task import Task from swarms.structs.task import Task

@ -2,6 +2,7 @@ from typing import List, Dict, Any, Union
from concurrent.futures import Executor, ThreadPoolExecutor, as_completed from concurrent.futures import Executor, ThreadPoolExecutor, as_completed
from graphlib import TopologicalSorter from graphlib import TopologicalSorter
class Task: class Task:
def __init__( def __init__(
self, self,
@ -46,6 +47,7 @@ class NonLinearWorkflow:
""" """
def __init__( def __init__(
self, self,
agents, agents,
@ -104,4 +106,3 @@ class NonLinearWorkflow:
return [ return [
self.find_task(task_id) for task_id in task_order self.find_task(task_id) for task_id in task_order
] ]

@ -4,7 +4,6 @@ from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
class Workflow: class Workflow:
""" """
Workflows are ideal for prescriptive processes that need to be executed Workflows are ideal for prescriptive processes that need to be executed
@ -94,4 +93,3 @@ class Workflow:
return return
else: else:
self.__run_from_task(next(iter(task.children), None)) self.__run_from_task(next(iter(task.children), None))

@ -5,6 +5,7 @@ from time import sleep
from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator
from swarms.workers.worker import Worker from swarms.workers.worker import Worker
class AutoScaler: class AutoScaler:
""" """
The AutoScaler is like a kubernetes pod, that autoscales an agent or worker or boss! The AutoScaler is like a kubernetes pod, that autoscales an agent or worker or boss!
@ -55,15 +56,15 @@ class AutoScaler:
def scale_down(self): def scale_down(self):
with self.lock: with self.lock:
if len(self.agents_pool) > 10: #ensure minmum of 10 agents if len(self.agents_pool) > 10: # ensure minmum of 10 agents
del self.agents_pool[-1] #remove last agent del self.agents_pool[-1] # remove last agent
@log_decorator @log_decorator
@error_decorator @error_decorator
@timing_decorator @timing_decorator
def monitor_and_scale(self): def monitor_and_scale(self):
while True: while True:
sleep(60)#check minute sleep(60) # check minute
pending_tasks = self.task_queue.qsize() pending_tasks = self.task_queue.qsize()
active_agents = sum([1 for agent in self.agents_pool if agent.is_busy()]) active_agents = sum([1 for agent in self.agents_pool if agent.is_busy()])
@ -91,4 +92,3 @@ class AutoScaler:
if self.agents_pool: if self.agents_pool:
agent_to_remove = self.agents_poo.pop() agent_to_remove = self.agents_poo.pop()
del agent_to_remove del agent_to_remove

@ -1,5 +1,6 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
class AbstractSwarm(ABC): class AbstractSwarm(ABC):
# TODO: Pass in abstract LLM class that can utilize Hf or Anthropic models, Move away from OPENAI # TODO: Pass in abstract LLM class that can utilize Hf or Anthropic models, Move away from OPENAI
# TODO: ADD Universal Communication Layer, a ocean vectorstore instance # TODO: ADD Universal Communication Layer, a ocean vectorstore instance
@ -19,5 +20,3 @@ class AbstractSwarm(ABC):
@abstractmethod @abstractmethod
def run(self): def run(self):
pass pass

@ -1,6 +1,7 @@
from typing import List from typing import List
from swarms.workers.worker import Worker from swarms.workers.worker import Worker
class DialogueSimulator: class DialogueSimulator:
def __init__(self, agents: List[Worker]): def __init__(self, agents: List[Worker]):
self.agents = agents self.agents = agents

@ -29,6 +29,7 @@ class GodMode:
""" """
def __init__( def __init__(
self, self,
llms llms

@ -12,7 +12,7 @@ class GroupChat:
workers: List[Worker] workers: List[Worker]
messages: List[Dict] messages: List[Dict]
max_rounds: int = 10 max_rounds: int = 10
admin_name: str = "Admin" #admin worker admin_name: str = "Admin" # admin worker
@property @property
def worker_names(self) -> List[str]: def worker_names(self) -> List[str]:
@ -72,7 +72,6 @@ class GroupChat:
) )
class GroupChatManager(Worker): class GroupChatManager(Worker):
def __init__( def __init__(
self, self,
@ -103,7 +102,7 @@ class GroupChatManager(Worker):
sender: Optional[Worker] = None, sender: Optional[Worker] = None,
config: Optional[GroupChat] = None, config: Optional[GroupChat] = None,
) -> Union[str, Dict, None]: ) -> Union[str, Dict, None]:
#run # run
if messages is None: if messages is None:
messages = [] messages = []
@ -113,11 +112,11 @@ class GroupChatManager(Worker):
for i in range(groupchat.max_rounds): for i in range(groupchat.max_rounds):
if message["role"] != "function": if message["role"] != "function":
message["name"]= speaker.ai_name message["name"] = speaker.ai_name
groupchat.messages.append(message) groupchat.messages.append(message)
#broadcast the message to all workers except the speaker # broadcast the message to all workers except the speaker
for worker in groupchat.workers: for worker in groupchat.workers:
if worker != speaker: if worker != speaker:
self.send( self.send(
@ -130,24 +129,24 @@ class GroupChatManager(Worker):
break break
try: try:
#select next speaker # select next speaker
speaker = groupchat.select_speaker(speaker, self) speaker = groupchat.select_speaker(speaker, self)
#let the speaker speak # let the speaker speak
reply = speaker.generate_reply(sender=self) reply = speaker.generate_reply(sender=self)
except KeyboardInterrupt: except KeyboardInterrupt:
#let the admin speak if interrupted # let the admin speak if interrupted
if groupchat.admin_name in groupchat.worker_names: if groupchat.admin_name in groupchat.worker_names:
#admin worker is a particpant # admin worker is a particpant
speaker = groupchat.worker_by_name(groupchat.admin_name) speaker = groupchat.worker_by_name(groupchat.admin_name)
reply = speaker.generate_reply(sender=self) reply = speaker.generate_reply(sender=self)
else: else:
#admin worker is not found in particpants # admin worker is not found in particpants
raise raise
if reply is None: if reply is None:
break break
#speaker sends message without requesting a reply # speaker sends message without requesting a reply
speaker.send( speaker.send(
reply, reply,
self, self,

@ -2,22 +2,26 @@ import random
import tenacity import tenacity
from langchain.output_parsers import RegexParser from langchain.output_parsers import RegexParser
#utils # utils
class BidOutputParser(RegexParser): class BidOutputParser(RegexParser):
def get_format_instructions(self) -> str: def get_format_instructions(self) -> str:
return "Your response should be an integrater delimited by angled brackets like this: <int>" return "Your response should be an integrater delimited by angled brackets like this: <int>"
bid_parser = BidOutputParser( bid_parser = BidOutputParser(
regex=r"<(\d+)>", output_keys=["bid"], default_output_key="bid" regex=r"<(\d+)>", output_keys=["bid"], default_output_key="bid"
) )
def select_next_speaker( def select_next_speaker(
step: int, step: int,
agents, agents,
director director
) -> int: ) -> int:
#if the step if even => director # if the step if even => director
#=> director selects next speaker # => director selects next speaker
if step % 2 == 1: if step % 2 == 1:
idx = 0 idx = 0
else: else:
@ -25,7 +29,7 @@ def select_next_speaker(
return idx return idx
#main # main
class MultiAgentCollaboration: class MultiAgentCollaboration:
def __init__( def __init__(
self, self,
@ -63,7 +67,7 @@ class MultiAgentCollaboration:
stop=tenacity.stop_after_attempt(10), stop=tenacity.stop_after_attempt(10),
wait=tenacity.wait_none(), wait=tenacity.wait_none(),
retry=tenacity.retry_if_exception_type(ValueError), retry=tenacity.retry_if_exception_type(ValueError),
before_sleep= lambda retry_state: print( before_sleep=lambda retry_state: print(
f"ValueError occured: {retry_state.outcome.exception()}, retying..." f"ValueError occured: {retry_state.outcome.exception()}, retying..."
), ),
retry_error_callback=lambda retry_state: 0, retry_error_callback=lambda retry_state: 0,

@ -7,6 +7,7 @@ def select_speaker(step: int, agents: List[Worker]) -> int:
# This function selects the speaker in a round-robin fashion # This function selects the speaker in a round-robin fashion
return step % len(agents) return step % len(agents)
class MultiAgentDebate: class MultiAgentDebate:
""" """
MultiAgentDebate MultiAgentDebate
@ -15,6 +16,7 @@ class MultiAgentDebate:
""" """
def __init__( def __init__(
self, self,
agents: List[Worker], agents: List[Worker],

@ -15,6 +15,7 @@ class TaskStatus(Enum):
COMPLETED = 3 COMPLETED = 3
FAILED = 4 FAILED = 4
class Orchestrator: class Orchestrator:
""" """
The Orchestrator takes in an agent, worker, or boss as input The Orchestrator takes in an agent, worker, or boss as input
@ -88,6 +89,7 @@ class Orchestrator:
print(orchestrator.retrieve_result(id(task))) print(orchestrator.retrieve_result(id(task)))
``` ```
""" """
def __init__( def __init__(
self, self,
agent, agent,
@ -96,8 +98,8 @@ class Orchestrator:
collection_name: str = "swarm", collection_name: str = "swarm",
api_key: str = None, api_key: str = None,
model_name: str = None, model_name: str = None,
embed_func = None, embed_func=None,
worker = None worker=None
): ):
self.agent = agent self.agent = agent
self.agents = queue.Queue() self.agents = queue.Queue()
@ -110,7 +112,7 @@ class Orchestrator:
self.chroma_client = chromadb.Client() self.chroma_client = chromadb.Client()
self.collection = self.chroma_client.create_collection( self.collection = self.chroma_client.create_collection(
name = collection_name name=collection_name
) )
self.current_tasks = {} self.current_tasks = {}
@ -121,8 +123,8 @@ class Orchestrator:
self.embed_func = embed_func if embed_func else self.embed self.embed_func = embed_func if embed_func else self.embed
# @abstractmethod # @abstractmethod
def assign_task( def assign_task(
self, self,
agent_id: int, agent_id: int,
@ -140,7 +142,7 @@ class Orchestrator:
try: try:
result = self.worker.run(task["content"]) result = self.worker.run(task["content"])
#using the embed method to get the vector representation of the result # using the embed method to get the vector representation of the result
vector_representation = self.embed( vector_representation = self.embed(
result, result,
self.api_key, self.api_key,
@ -170,13 +172,13 @@ class Orchestrator:
embedding = openai(input) embedding = openai(input)
return embedding return embedding
# @abstractmethod # @abstractmethod
def retrieve_results(self, agent_id: int) -> Any: def retrieve_results(self, agent_id: int) -> Any:
"""Retrieve results from a specific agent""" """Retrieve results from a specific agent"""
try: try:
#Query the vector database for documents created by the agents # Query the vector database for documents created by the agents
results = self.collection.query( results = self.collection.query(
query_texts=[str(agent_id)], query_texts=[str(agent_id)],
n_results=10 n_results=10
@ -202,8 +204,8 @@ class Orchestrator:
logging.error(f"Failed to update the vector database. Error: {e}") logging.error(f"Failed to update the vector database. Error: {e}")
raise raise
# @abstractmethod # @abstractmethod
def get_vector_db(self): def get_vector_db(self):
"""Retrieve the vector database""" """Retrieve the vector database"""
return self.collection return self.collection
@ -224,7 +226,7 @@ class Orchestrator:
logging.error(f"Failed to append the agent output to database. Error: {e}") logging.error(f"Failed to append the agent output to database. Error: {e}")
raise raise
def run(self, objective:str): def run(self, objective: str):
"""Runs""" """Runs"""
if not objective or not isinstance(objective, str): if not objective or not isinstance(objective, str):
logging.error("Invalid objective") logging.error("Invalid objective")
@ -280,7 +282,7 @@ class Orchestrator:
self.model_name self.model_name
) )
#store the mesage in the vector database # store the mesage in the vector database
self.collection.add( self.collection.add(
embeddings=[message_vector], embeddings=[message_vector],
documents=[message], documents=[message],
@ -291,9 +293,6 @@ class Orchestrator:
objective=f"chat with agent {receiver_id} about {message}" objective=f"chat with agent {receiver_id} about {message}"
) )
def add_agents( def add_agents(
self, self,
num_agents: int num_agents: int
@ -311,4 +310,3 @@ class Orchestrator:
self.executor = ThreadPoolExecutor( self.executor = ThreadPoolExecutor(
max_workers=self.agents.qsize() max_workers=self.agents.qsize()
) )

@ -13,6 +13,7 @@ class TaskStatus(Enum):
COMPLETED = 3 COMPLETED = 3
FAILED = 4 FAILED = 4
class ScalableGroupChat: class ScalableGroupChat:
""" """
This is a class to enable scalable groupchat like a telegram, it takes an Worker as an input This is a class to enable scalable groupchat like a telegram, it takes an Worker as an input
@ -26,6 +27,7 @@ class ScalableGroupChat:
-> every worker can communicate without restrictions in parallel -> every worker can communicate without restrictions in parallel
""" """
def __init__( def __init__(
self, self,
worker_count: int = 5, worker_count: int = 5,
@ -61,7 +63,6 @@ class ScalableGroupChat:
return embedding return embedding
def retrieve_results( def retrieve_results(
self, self,
agent_id: int agent_id: int
@ -69,7 +70,7 @@ class ScalableGroupChat:
"""Retrieve results from a specific agent""" """Retrieve results from a specific agent"""
try: try:
#Query the vector database for documents created by the agents # Query the vector database for documents created by the agents
results = self.collection.query( results = self.collection.query(
query_texts=[str(agent_id)], query_texts=[str(agent_id)],
n_results=10 n_results=10
@ -95,13 +96,12 @@ class ScalableGroupChat:
logging.error(f"Failed to update the vector database. Error: {e}") logging.error(f"Failed to update the vector database. Error: {e}")
raise raise
# @abstractmethod # @abstractmethod
def get_vector_db(self): def get_vector_db(self):
"""Retrieve the vector database""" """Retrieve the vector database"""
return self.collection return self.collection
def append_to_db( def append_to_db(
self, self,
result: str result: str
@ -118,8 +118,6 @@ class ScalableGroupChat:
logging.error(f"Failed to append the agent output to database. Error: {e}") logging.error(f"Failed to append the agent output to database. Error: {e}")
raise raise
def chat( def chat(
self, self,
sender_id: int, sender_id: int,
@ -148,7 +146,7 @@ class ScalableGroupChat:
message, message,
) )
#store the mesage in the vector database # store the mesage in the vector database
self.collection.add( self.collection.add(
embeddings=[message_vector], embeddings=[message_vector],
documents=[message], documents=[message],
@ -158,5 +156,3 @@ class ScalableGroupChat:
self.run( self.run(
objective=f"chat with agent {receiver_id} about {message}" objective=f"chat with agent {receiver_id} about {message}"
) )

@ -1,6 +1,7 @@
from swarms.workers.worker import Worker from swarms.workers.worker import Worker
from queue import Queue, PriorityQueue from queue import Queue, PriorityQueue
class SimpleSwarm: class SimpleSwarm:
def __init__( def __init__(
self, self,
@ -55,7 +56,7 @@ class SimpleSwarm:
self.task_queue.put(task) self.task_queue.put(task)
def _process_task(self, task): def _process_task(self, task):
#TODO, Implement load balancing, fallback mechanism # TODO, Implement load balancing, fallback mechanism
for worker in self.workers: for worker in self.workers:
response = worker.run(task) response = worker.run(task)
if response: if response:
@ -67,19 +68,18 @@ class SimpleSwarm:
responses = [] responses = []
#process high priority tasks first # process high priority tasks first
while not self.priority_queue.empty(): while not self.priority_queue.empty():
_, task = self.priority_queue.get() _, task = self.priority_queue.get()
responses.append(self._process_task(task)) responses.append(self._process_task(task))
#process normal tasks # process normal tasks
while not self.task_queue.empty(): while not self.task_queue.empty():
task = self.task_queue.get() task = self.task_queue.get()
responses.append(self._process_task(task)) responses.append(self._process_task(task))
return responses return responses
def run_old(self, task): def run_old(self, task):
responses = [] responses = []

@ -1,3 +1,17 @@
import interpreter
from transformers import (
BlipForQuestionAnswering,
BlipProcessor,
)
from PIL import Image
import torch
from swarms.utils.logger import logger
from pydantic import Field
from langchain.tools.file_management.write import WriteFileTool
from langchain.tools.file_management.read import ReadFileTool
from langchain.tools import BaseTool
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.qa_with_sources.loading import BaseCombineDocumentsChain
import asyncio import asyncio
import os import os
@ -13,16 +27,6 @@ from langchain.docstore.document import Document
ROOT_DIR = "./data/" ROOT_DIR = "./data/"
from langchain.chains.qa_with_sources.loading import BaseCombineDocumentsChain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.tools import BaseTool
from langchain.tools.file_management.read import ReadFileTool
from langchain.tools.file_management.write import WriteFileTool
from pydantic import Field
from swarms.utils.logger import logger
@contextmanager @contextmanager
def pushd(new_dir): def pushd(new_dir):
@ -34,6 +38,7 @@ def pushd(new_dir):
finally: finally:
os.chdir(prev_dir) os.chdir(prev_dir)
@tool @tool
def process_csv( def process_csv(
llm, csv_file_path: str, instructions: str, output_path: Optional[str] = None llm, csv_file_path: str, instructions: str, output_path: Optional[str] = None
@ -84,10 +89,12 @@ async def async_load_playwright(url: str) -> str:
await browser.close() await browser.close()
return results return results
def run_async(coro): def run_async(coro):
event_loop = asyncio.get_event_loop() event_loop = asyncio.get_event_loop()
return event_loop.run_until_complete(coro) return event_loop.run_until_complete(coro)
@tool @tool
def browse_web_page(url: str) -> str: def browse_web_page(url: str) -> str:
"""Verbose way to scrape a whole webpage. Likely to cause issues parsing.""" """Verbose way to scrape a whole webpage. Likely to cause issues parsing."""
@ -97,9 +104,9 @@ def browse_web_page(url: str) -> str:
def _get_text_splitter(): def _get_text_splitter():
return RecursiveCharacterTextSplitter( return RecursiveCharacterTextSplitter(
# Set a really small chunk size, just to show. # Set a really small chunk size, just to show.
chunk_size = 500, chunk_size=500,
chunk_overlap = 20, chunk_overlap=20,
length_function = len, length_function=len,
) )
@ -117,7 +124,7 @@ class WebpageQATool(BaseTool):
results = [] results = []
# TODO: Handle this with a MapReduceChain # TODO: Handle this with a MapReduceChain
for i in range(0, len(web_docs), 4): for i in range(0, len(web_docs), 4):
input_docs = web_docs[i:i+4] input_docs = web_docs[i:i + 4]
window_result = self.qa_chain({"input_documents": input_docs, "question": question}, return_only_outputs=True) window_result = self.qa_chain({"input_documents": input_docs, "question": question}, return_only_outputs=True)
results.append(f"Response from window {i} - {window_result}") results.append(f"Response from window {i} - {window_result}")
results_docs = [Document(page_content="\n".join(results), metadata={"source": url})] results_docs = [Document(page_content="\n".join(results), metadata={"source": url})]
@ -126,8 +133,6 @@ class WebpageQATool(BaseTool):
async def _arun(self, url: str, question: str) -> str: async def _arun(self, url: str, question: str) -> str:
raise NotImplementedError raise NotImplementedError
import interpreter
@tool @tool
def compile(task: str): def compile(task: str):
@ -153,16 +158,7 @@ def compile(task: str):
os.environ["INTERPRETER_CLI_DEBUG"] = True os.environ["INTERPRETER_CLI_DEBUG"] = True
# mm model workers # mm model workers
import torch
from PIL import Image
from transformers import (
BlipForQuestionAnswering,
BlipProcessor,
)
@tool @tool
@ -195,5 +191,3 @@ def VQAinference(self, inputs):
) )
return answer return answer

@ -10,6 +10,7 @@ from langchain.llms.base import BaseLLM
from langchain.agents.agent import AgentExecutor from langchain.agents.agent import AgentExecutor
from langchain.agents import load_tools from langchain.agents import load_tools
class ToolScope(Enum): class ToolScope(Enum):
GLOBAL = "global" GLOBAL = "global"
SESSION = "session" SESSION = "session"

@ -1,8 +1,9 @@
#props to shroominic # props to shroominic
from swarms.tools.base import Tool, ToolException from swarms.tools.base import Tool, ToolException
from typing import Any, List from typing import Any, List
from codeinterpreterapi import CodeInterpreterSession, File, ToolException from codeinterpreterapi import CodeInterpreterSession, File, ToolException
class CodeInterpreter(Tool): class CodeInterpreter(Tool):
def __init__(self, name: str, description: str): def __init__(self, name: str, description: str):
super().__init__(name, description, self.run) super().__init__(name, description, self.run)
@ -51,6 +52,7 @@ class CodeInterpreter(Tool):
# terminate the session # terminate the session
await session.astop() await session.astop()
""" """
tool = CodeInterpreter("Code Interpreter", "A tool to interpret code and generate useful outputs.") tool = CodeInterpreter("Code Interpreter", "A tool to interpret code and generate useful outputs.")

@ -25,7 +25,7 @@ from swarms.tools.base import BaseToolSet, SessionGetter, ToolScope, tool
from swarms.utils.logger import logger from swarms.utils.logger import logger
from swarms.utils.main import ANSI, Color, Style # test from swarms.utils.main import ANSI, Color, Style # test
#helpers # helpers
PipeType = Union[Literal["stdout"], Literal["stderr"]] PipeType = Union[Literal["stdout"], Literal["stderr"]]
@ -42,7 +42,6 @@ def verify(func):
return wrapper return wrapper
class SyscallTimeoutException(Exception): class SyscallTimeoutException(Exception):
def __init__(self, pid: int, *args) -> None: def __init__(self, pid: int, *args) -> None:
super().__init__(f"deadline exceeded while waiting syscall for {pid}", *args) super().__init__(f"deadline exceeded while waiting syscall for {pid}", *args)
@ -132,8 +131,6 @@ class SyscallTracer:
return exitcode, reason return exitcode, reason
class StdoutTracer: class StdoutTracer:
def __init__( def __init__(
self, self,
@ -196,7 +193,6 @@ class StdoutTracer:
return (exitcode, output) return (exitcode, output)
class Terminal(BaseToolSet): class Terminal(BaseToolSet):
def __init__(self): def __init__(self):
self.sessions: Dict[str, List[SyscallTracer]] = {} self.sessions: Dict[str, List[SyscallTracer]] = {}
@ -242,7 +238,6 @@ class Terminal(BaseToolSet):
############# #############
@tool( @tool(
name="Terminal", name="Terminal",
description="Executes commands in a terminal." description="Executes commands in a terminal."
@ -281,8 +276,6 @@ def terminal_execute(self, commands: str, get_session: SessionGetter) -> str:
return output return output
""" """
write protocol: write protocol:
@ -291,7 +284,6 @@ write protocol:
""" """
class WriteCommand: class WriteCommand:
separator = "\n" separator = "\n"
@ -316,7 +308,7 @@ class WriteCommand:
@staticmethod @staticmethod
def from_str(command: str) -> "WriteCommand": def from_str(command: str) -> "WriteCommand":
filepath = command.split(WriteCommand.separator)[0] filepath = command.split(WriteCommand.separator)[0]
return WriteCommand(filepath, command[len(filepath) + 1 :]) return WriteCommand(filepath, command[len(filepath) + 1:])
class CodeWriter: class CodeWriter:
@ -329,15 +321,13 @@ class CodeWriter:
return WriteCommand.from_str(command).with_mode("a").execute() return WriteCommand.from_str(command).with_mode("a").execute()
""" """
read protocol: read protocol:
<filepath>|<start line>-<end line> <filepath>|<start line>-<end line>
""" """
class Line: class Line:
def __init__(self, content: str, line_number: int, depth: int): def __init__(self, content: str, line_number: int, depth: int):
self.__content: str = content self.__content: str = content
@ -445,7 +435,7 @@ class ReadCommand:
if self.start == self.end: if self.start == self.end:
code = code[self.start - 1] code = code[self.start - 1]
else: else:
code = "".join(code[self.start - 1 : self.end]) code = "".join(code[self.start - 1: self.end])
return code return code
@staticmethod @staticmethod
@ -500,10 +490,6 @@ class CodeReader:
return SummaryCommand.from_str(command).execute() return SummaryCommand.from_str(command).execute()
""" """
patch protocol: patch protocol:
@ -563,7 +549,6 @@ test.py|11,16|11,16|_titles
""" """
class Position: class Position:
separator = "," separator = ","
@ -607,9 +592,9 @@ class PatchCommand:
lines[self.start.line] = ( lines[self.start.line] = (
lines[self.start.line][: self.start.col] lines[self.start.line][: self.start.col]
+ self.content + self.content
+ lines[self.end.line][self.end.col :] + lines[self.end.line][self.end.col:]
) )
lines = lines[: self.start.line + 1] + lines[self.end.line + 1 :] lines = lines[: self.start.line + 1] + lines[self.end.line + 1:]
after = self.write_lines(lines) after = self.write_lines(lines)
@ -664,11 +649,6 @@ class CodePatcher:
return written, deleted return written, deleted
class CodeEditor(BaseToolSet): class CodeEditor(BaseToolSet):
@tool( @tool(
name="CodeEditor.READ", name="CodeEditor.READ",
@ -825,6 +805,7 @@ def code_editor_read(self, inputs: str) -> str:
) )
return output return output
@tool( @tool(
name="CodeEditor.SUMMARY", name="CodeEditor.SUMMARY",
description="Summary code. " description="Summary code. "
@ -845,6 +826,7 @@ def code_editor_summary(self, inputs: str) -> str:
) )
return output return output
@tool( @tool(
name="CodeEditor.APPEND", name="CodeEditor.APPEND",
description="Append code to the existing file. " description="Append code to the existing file. "
@ -867,6 +849,7 @@ def code_editor_append(self, inputs: str) -> str:
) )
return output return output
@tool( @tool(
name="CodeEditor.WRITE", name="CodeEditor.WRITE",
description="Write code to create a new tool. " description="Write code to create a new tool. "
@ -890,6 +873,7 @@ def code_editor_write(self, inputs: str) -> str:
) )
return output return output
@tool( @tool(
name="CodeEditor.PATCH", name="CodeEditor.PATCH",
description="Patch the code to correct the error if an error occurs or to improve it. " description="Patch the code to correct the error if an error occurs or to improve it. "
@ -920,6 +904,7 @@ def code_editor_patch(self, patches: str) -> str:
) )
return output return output
@tool( @tool(
name="CodeEditor.DELETE", name="CodeEditor.DELETE",
description="Delete code in file for a new start. " description="Delete code in file for a new start. "

@ -20,6 +20,3 @@ class ExitConversation(BaseToolSet):
logger.debug("\nProcessed ExitConversation.") logger.debug("\nProcessed ExitConversation.")
return message return message

@ -223,7 +223,6 @@ class VisualQuestionAnswering(BaseToolSet):
return answer return answer
class ImageCaptioning(BaseHandler): class ImageCaptioning(BaseHandler):
def __init__(self, device): def __init__(self, device):
print("Initializing ImageCaptioning to %s" % device) print("Initializing ImageCaptioning to %s" % device)
@ -256,8 +255,3 @@ class ImageCaptioning(BaseHandler):
) )
return IMAGE_PROMPT.format(filename=filename, description=description) return IMAGE_PROMPT.format(filename=filename, description=description)

@ -35,4 +35,3 @@ class RequestsGet(BaseToolSet):
) )
return content return content

@ -1,4 +1,4 @@
#speech to text tool # speech to text tool
import os import os
import subprocess import subprocess
@ -14,9 +14,9 @@ class SpeechToText:
video_url, video_url,
audio_format='mp3', audio_format='mp3',
device='cuda', device='cuda',
batch_size = 16, batch_size=16,
compute_type = "float16", compute_type="float16",
hf_api_key = None hf_api_key=None
): ):
""" """
# Example usage # Example usage
@ -38,7 +38,6 @@ class SpeechToText:
subprocess.run(["pip", "install", "pytube"]) subprocess.run(["pip", "install", "pytube"])
subprocess.run(["pip", "install", "pydub"]) subprocess.run(["pip", "install", "pydub"])
def download_youtube_video(self): def download_youtube_video(self):
audio_file = f'video.{self.audio_format}' audio_file = f'video.{self.audio_format}'
@ -121,5 +120,3 @@ class SpeechToText:
return transcription return transcription
except KeyError: except KeyError:
print("The key 'segments' is not found in the result.") print("The key 'segments' is not found in the result.")

@ -13,6 +13,7 @@ def log_decorator(func):
return result return result
return wrapper return wrapper
def error_decorator(func): def error_decorator(func):
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
try: try:
@ -22,6 +23,7 @@ def error_decorator(func):
raise raise
return wrapper return wrapper
def timing_decorator(func): def timing_decorator(func):
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
start_time = time.time() start_time = time.time()
@ -31,6 +33,7 @@ def timing_decorator(func):
return result return result
return wrapper return wrapper
def retry_decorator(max_retries=5): def retry_decorator(max_retries=5):
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
@ -44,16 +47,20 @@ def retry_decorator(max_retries=5):
return wrapper return wrapper
return decorator return decorator
def singleton_decorator(cls): def singleton_decorator(cls):
instances = {} instances = {}
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
if cls not in instances: if cls not in instances:
instances[cls] = cls(*args, **kwargs) instances[cls] = cls(*args, **kwargs)
return instances[cls] return instances[cls]
return wrapper return wrapper
def synchronized_decorator(func): def synchronized_decorator(func):
func.__lock__ = threading.Lock() func.__lock__ = threading.Lock()
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
with func.__lock__: with func.__lock__:
return func(*args, **kwargs) return func(*args, **kwargs)
@ -67,6 +74,7 @@ def deprecated_decorator(func):
return func(*args, **kwargs) return func(*args, **kwargs)
return wrapper return wrapper
def validate_inputs_decorator(validator): def validate_inputs_decorator(validator):
def decorator(func): def decorator(func):
@functools.wraps(func) @functools.wraps(func)
@ -76,4 +84,3 @@ def validate_inputs_decorator(validator):
return func(*args, **kwargs) return func(*args, **kwargs)
return wrapper return wrapper
return decorator return decorator

@ -1,3 +1,12 @@
import pandas as pd
from swarms.models.prompts.prebuild.multi_modal_prompts import DATAFRAME_PROMPT
import requests
from typing import Dict
from enum import Enum
from pathlib import Path
import shutil
import boto3
from abc import ABC, abstractmethod, abstractstaticmethod
import os import os
import random import random
import uuid import uuid
@ -13,7 +22,7 @@ def seed_everything(seed):
torch.manual_seed(seed) torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed) torch.cuda.manual_seed_all(seed)
except: except BaseException:
pass pass
return seed return seed
@ -75,16 +84,10 @@ def get_new_dataframe_name(org_img_name, func_name="update"):
this_new_uuid, func_name, recent_prev_file_name, most_org_file_name this_new_uuid, func_name, recent_prev_file_name, most_org_file_name
) )
return os.path.join(head, new_file_name) return os.path.join(head, new_file_name)
#########=======================> utils end # =======================> utils end
# =======================> ANSI BEGINNING
#########=======================> ANSI BEGINNING
class Code: class Code:
@ -200,13 +203,10 @@ def dim_multiline(message: str) -> str:
return lines[0] return lines[0]
return lines[0] + ANSI("\n... ".join([""] + lines[1:])).to(Color.black().bright()) return lines[0] + ANSI("\n... ".join([""] + lines[1:])).to(Color.black().bright())
#+=============================> ANSI Ending # +=============================> ANSI Ending
#================================> upload base
from abc import ABC, abstractmethod, abstractstaticmethod
# ================================> upload base
STATIC_DIR = "static" STATIC_DIR = "static"
@ -221,13 +221,10 @@ class AbstractUploader(ABC):
def from_settings() -> "AbstractUploader": def from_settings() -> "AbstractUploader":
pass pass
#================================> upload end # ================================> upload end
#========================= upload s3
# ========================= upload s3
import boto3
class S3Uploader(AbstractUploader): class S3Uploader(AbstractUploader):
@ -259,11 +256,10 @@ class S3Uploader(AbstractUploader):
self.client.upload_file(filepath, self.bucket, object_name) self.client.upload_file(filepath, self.bucket, object_name)
return self.get_url(object_name) return self.get_url(object_name)
#========================= upload s3 # ========================= upload s3
#========================> upload/static
import shutil # ========================> upload/static
from pathlib import Path
class StaticUploader(AbstractUploader): class StaticUploader(AbstractUploader):
@ -277,8 +273,6 @@ class StaticUploader(AbstractUploader):
server = os.environ.get("SERVER", "http://localhost:8000") server = os.environ.get("SERVER", "http://localhost:8000")
return StaticUploader(server, path, endpoint) return StaticUploader(server, path, endpoint)
def get_url(self, uploaded_path: str) -> str: def get_url(self, uploaded_path: str) -> str:
return f"{self.server}/{uploaded_path}" return f"{self.server}/{uploaded_path}"
@ -291,14 +285,8 @@ class StaticUploader(AbstractUploader):
return f"{self.server}/{endpoint_path}" return f"{self.server}/{endpoint_path}"
# ========================> handlers/base
#========================> handlers/base
import uuid
from enum import Enum
from typing import Dict
import requests
# from env import settings # from env import settings
@ -371,7 +359,7 @@ class FileHandler:
def handle(self, url: str) -> str: def handle(self, url: str) -> str:
try: try:
if url.startswith(os.environ.get("SERVER", "http://localhost:8000")): if url.startswith(os.environ.get("SERVER", "http://localhost:8000")):
local_filepath = url[len(os.environ.get("SERVER", "http://localhost:8000")) + 1 :] local_filepath = url[len(os.environ.get("SERVER", "http://localhost:8000")) + 1:]
local_filename = Path("file") / local_filepath.split("/")[-1] local_filename = Path("file") / local_filepath.split("/")[-1]
src = self.path / local_filepath src = self.path / local_filepath
dst = self.path / os.environ.get("PLAYGROUND_DIR", "./playground") / local_filename dst = self.path / os.environ.get("PLAYGROUND_DIR", "./playground") / local_filename
@ -391,18 +379,12 @@ class FileHandler:
return handler.handle(local_filename) return handler.handle(local_filename)
except Exception as e: except Exception as e:
raise e raise e
########################### => base end # => base end
# ===========================>
#############===========================>
from swarms.models.prompts.prebuild.multi_modal_prompts import DATAFRAME_PROMPT
import pandas as pd
class CsvToDataframe(BaseHandler): class CsvToDataframe(BaseHandler):
def handle(self, filename: str): def handle(self, filename: str):
df = pd.read_csv(filename) df = pd.read_csv(filename)
@ -417,7 +399,3 @@ class CsvToDataframe(BaseHandler):
) )
return DATAFRAME_PROMPT.format(filename=filename, description=description) return DATAFRAME_PROMPT.format(filename=filename, description=description)

@ -6,6 +6,7 @@ from pathlib import Path
from swarms.utils.main import AbstractUploader from swarms.utils.main import AbstractUploader
class StaticUploader(AbstractUploader): class StaticUploader(AbstractUploader):
def __init__(self, server: str, path: Path, endpoint: str): def __init__(self, server: str, path: Path, endpoint: str):
self.server = server self.server = server

@ -33,7 +33,7 @@ class AbstractWorker:
def send( def send(
self, self,
message: Union[Dict, str], message: Union[Dict, str],
recipient, #add AbstractWorker recipient, # add AbstractWorker
request_reply: Optional[bool] = None request_reply: Optional[bool] = None
): ):
"""(Abstract method) Send a message to another worker.""" """(Abstract method) Send a message to another worker."""
@ -41,7 +41,7 @@ class AbstractWorker:
async def a_send( async def a_send(
self, self,
message: Union[Dict, str], message: Union[Dict, str],
recipient, #add AbstractWorker recipient, # add AbstractWorker
request_reply: Optional[bool] = None request_reply: Optional[bool] = None
): ):
"""(Aabstract async method) Send a message to another worker.""" """(Aabstract async method) Send a message to another worker."""
@ -49,7 +49,7 @@ class AbstractWorker:
def receive( def receive(
self, self,
message: Union[Dict, str], message: Union[Dict, str],
sender, #add AbstractWorker sender, # add AbstractWorker
request_reply: Optional[bool] = None request_reply: Optional[bool] = None
): ):
"""(Abstract method) Receive a message from another worker.""" """(Abstract method) Receive a message from another worker."""
@ -57,7 +57,7 @@ class AbstractWorker:
async def a_receive( async def a_receive(
self, self,
message: Union[Dict, str], message: Union[Dict, str],
sender, #add AbstractWorker sender, # add AbstractWorker
request_reply: Optional[bool] = None request_reply: Optional[bool] = None
): ):
"""(Abstract async method) Receive a message from another worker.""" """(Abstract async method) Receive a message from another worker."""
@ -68,7 +68,7 @@ class AbstractWorker:
def generate_reply( def generate_reply(
self, self,
messages: Optional[List[Dict]] = None, messages: Optional[List[Dict]] = None,
sender = None, #Optional["AbstractWorker"] = None, sender=None, # Optional["AbstractWorker"] = None,
**kwargs, **kwargs,
) -> Union[str, Dict, None]: ) -> Union[str, Dict, None]:
"""(Abstract method) Generate a reply based on the received messages. """(Abstract method) Generate a reply based on the received messages.
@ -83,7 +83,7 @@ class AbstractWorker:
async def a_generate_reply( async def a_generate_reply(
self, self,
messages: Optional[List[Dict]] = None, messages: Optional[List[Dict]] = None,
sender = None, #Optional["AbstractWorker"] = None, sender=None, # Optional["AbstractWorker"] = None,
**kwargs, **kwargs,
) -> Union[str, Dict, None]: ) -> Union[str, Dict, None]:
"""(Abstract async method) Generate a reply based on the received messages. """(Abstract async method) Generate a reply based on the received messages.

@ -17,10 +17,12 @@ from swarms.tools.autogpt import (
) )
from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator
#cache # cache
ROOT_DIR = "./data/" ROOT_DIR = "./data/"
#main # main
class Worker: class Worker:
""" """
Useful for when you need to spawn an autonomous agent instance as a worker to accomplish complex tasks, Useful for when you need to spawn an autonomous agent instance as a worker to accomplish complex tasks,
@ -54,14 +56,15 @@ class Worker:
llm + tools + memory llm + tools + memory
""" """
def __init__( def __init__(
self, self,
ai_name: str = "Autobot Swarm Worker", ai_name: str = "Autobot Swarm Worker",
ai_role: str = "Worker in a swarm", ai_role: str = "Worker in a swarm",
external_tools = None, external_tools=None,
human_in_the_loop = False, human_in_the_loop=False,
temperature: float = 0.5, temperature: float = 0.5,
llm = None, llm=None,
openai_api_key: str = None, openai_api_key: str = None,
): ):
self.temperature = temperature self.temperature = temperature
@ -139,7 +142,6 @@ class Worker:
if external_tools is not None: if external_tools is not None:
self.tools.extend(external_tools) self.tools.extend(external_tools)
def setup_memory(self): def setup_memory(self):
""" """
Set up memory for the worker. Set up memory for the worker.
@ -158,7 +160,6 @@ class Worker:
except Exception as error: except Exception as error:
raise RuntimeError(f"Error setting up memory perhaps try try tuning the embedding size: {error}") raise RuntimeError(f"Error setting up memory perhaps try try tuning the embedding size: {error}")
def setup_agent(self): def setup_agent(self):
""" """
Set up the autonomous agent. Set up the autonomous agent.
@ -249,7 +250,7 @@ class Worker:
""" """
#add users message to the history # add users message to the history
self.history.append( self.history.append(
Message( Message(
"User", "User",
@ -257,11 +258,11 @@ class Worker:
) )
) )
#process msg # process msg
try: try:
response = self.agent.run(msg) response = self.agent.run(msg)
#add agent's response to the history # add agent's response to the history
self.history.append( self.history.append(
Message( Message(
"Agent", "Agent",
@ -269,7 +270,7 @@ class Worker:
) )
) )
#if streaming is = True # if streaming is = True
if streaming: if streaming:
return self._stream_response(response) return self._stream_response(response)
else: else:
@ -278,7 +279,7 @@ class Worker:
except Exception as error: except Exception as error:
error_message = f"Error processing message: {str(error)}" error_message = f"Error processing message: {str(error)}"
#add error to history # add error to history
self.history.append( self.history.append(
Message( Message(
"Agent", "Agent",
@ -311,4 +312,3 @@ class Worker:
return {"content": message} return {"content": message}
else: else:
return message return message
Loading…
Cancel
Save