Flake8 done!

pull/387/head
Wyatt Stanke 1 year ago
parent 4de8e5d51c
commit da06379e2a
No known key found for this signature in database
GPG Key ID: CE6BA5FFF135536D

@ -1,6 +1,6 @@
from swarms import Agent, OpenAIChat
## Initialize the workflow
# Initialize the workflow
agent = Agent(
llm=OpenAIChat(),
max_loops="auto",

@ -14,8 +14,4 @@ from swarms.tokenizers import * # noqa: E402, F403
from swarms.loaders import * # noqa: E402, F403
from swarms.artifacts import * # noqa: E402, F403
from swarms.chunkers import * # noqa: E402, F403
from swarms.structs import * # noqa: E402, F403
from swarms.agents import * # noqa: E402, F403
from swarms.models import * # noqa: E402, F403
from swarms.tools import * # noqa: E402, F403
from swarms.telemetry import * # noqa: E402, F403

@ -22,9 +22,12 @@ class TextArtifact(BaseArtifact):
Methods:
__add__(self, other: BaseArtifact) -> TextArtifact: Concatenates the text value of the artifact with another artifact.
__bool__(self) -> bool: Checks if the text value of the artifact is non-empty.
generate_embedding(self, driver: BaseEmbeddingModel) -> Optional[list[float]]: Generates the embedding of the text artifact using a given embedding model.
token_count(self, tokenizer: BaseTokenizer) -> int: Counts the number of tokens in the text artifact using a given tokenizer.
to_bytes(self) -> bytes: Converts the text value of the artifact to bytes using the specified encoding and error handler.
generate_embedding(self, driver: BaseEmbeddingModel) -> Optional[list[float]]: Generates the embedding of the text
artifact using a given embedding model.
token_count(self, tokenizer: BaseTokenizer) -> int: Counts the number of tokens in the text artifact
using a given tokenizer.
to_bytes(self) -> bytes: Converts the text value of the artifact to bytes
using the specified encoding and error handler.
"""
value: str

@ -72,7 +72,7 @@ class BaseChunker(ABC):
# If a separator is provided, only use separators after it.
if current_separator:
separators = self.separators[
self.separators.index(current_separator) :
self.separators.index(current_separator):
]
else:
separators = self.separators
@ -120,7 +120,7 @@ class BaseChunker(ABC):
second_subchunk = (
separator.value
+ separator.value.join(
subchunks[balance_index + 1 :]
subchunks[balance_index + 1:]
)
)
else:
@ -132,7 +132,7 @@ class BaseChunker(ABC):
+ separator.value
)
second_subchunk = separator.value.join(
subchunks[balance_index + 1 :]
subchunks[balance_index + 1:]
)
# Continue recursively chunking the subchunks.

@ -43,7 +43,7 @@ class PDFLoader:
max_tokens: int
def __post_init__(self):
self.chunker = PdfChunker(
self.chunker = PdfChunker( # noqa: F821
tokenizer=self.tokenizer, max_tokens=self.max_tokens
)

@ -7,7 +7,8 @@ class InternalMemoryBase(ABC):
"""Abstract base class for internal memory of agents in the swarm."""
def __init__(self, n_entries):
"""Initialize the internal memory. In the current architecture the memory always consists of a set of soltuions or evaluations.
"""Initialize the internal memory. In the current architecture the memory always consists of a set of soltuions or
evaluations.
During the operation, the agent should retrivie best solutions from it's internal memory based on the score.
Moreover, the project is designed around LLMs for the proof of concepts, so we treat all entry content as a string.
@ -28,7 +29,8 @@ class InternalMemoryBase(ABC):
class DictInternalMemory(InternalMemoryBase):
def __init__(self, n_entries: int):
"""
Initialize the internal memory. In the current architecture the memory always consists of a set of solutions or evaluations.
Initialize the internal memory. In the current architecture the memory always consists of a set of solutions or
evaluations.
Simple key-value store for now.
Args:

@ -16,13 +16,15 @@ class DictSharedMemory:
Methods:
__init__(self, file_loc: str = None) -> None: Initializes the shared memory.
add_entry(self, score: float, agent_id: str, agent_cycle: int, entry: Any) -> bool: Adds an entry to the internal memory.
add_entry(self, score: float, agent_id: str, agent_cycle: int, entry: Any) -> bool: Adds an entry to the internal
memory.
get_top_n(self, n: int) -> None: Gets the top n entries from the internal memory.
write_to_file(self, data: Dict[str, Dict[str, Any]]) -> bool: Writes the internal memory to a file.
"""
def __init__(self, file_loc: str = None) -> None:
"""Initialize the shared memory. In the current architecture the memory always consists of a set of soltuions or evaluations.
"""Initialize the shared memory. In the current architecture the memory always consists of a set of soltuions
or evaluations.
Moreover, the project is designed around LLMs for the proof of concepts, so we treat all entry content as a string.
"""
if file_loc is not None:

@ -152,7 +152,8 @@ class LangchainChromaVectorMemory:
query (str): The query to search for.
k (int): The number of results to return.
type (str): The type of search to perform: "cos" or "mmr".
distance_threshold (float): The similarity threshold to use for the search. Results with distance > similarity_threshold will be dropped.
distance_threshold (float):
The similarity threshold to use for the search. Results with distance > similarity_threshold will be dropped.
Returns:
list[str]: A list of the top k results.

@ -22,13 +22,26 @@ class PineconeDB(AbstractVectorDatabase):
index (pinecone.Index, optional): The Pinecone index to use. Defaults to None.
Methods:
upsert_vector(vector: list[float], vector_id: Optional[str] = None, namespace: Optional[str] = None, meta: Optional[dict] = None, **kwargs) -> str:
upsert_vector(
vector: list[float],
vector_id: Optional[str] = None,
namespace: Optional[str] = None,
meta: Optional[dict] = None,
**kwargs
) -> str:
Upserts a vector into the index.
load_entry(vector_id: str, namespace: Optional[str] = None) -> Optional[BaseVectorStore.Entry]:
Loads a single vector from the index.
load_entries(namespace: Optional[str] = None) -> list[BaseVectorStore.Entry]:
Loads all vectors from the index.
query(query: str, count: Optional[int] = None, namespace: Optional[str] = None, include_vectors: bool = False, include_metadata=True, **kwargs) -> list[BaseVectorStore.QueryResult]:
query(
query: str,
count: Optional[int] = None,
namespace: Optional[str] = None,
include_vectors: bool = False,
include_metadata: bool = True,
**kwargs
) -> list[BaseVectorStore.QueryResult]:
Queries the index for vectors similar to the given query string.
create_index(name: str, **kwargs) -> None:
Creates a new index.

@ -1,4 +1,4 @@
############################################ LLMs
# LLMs
from swarms.models.base_llm import AbstractLLM # noqa: E402
from swarms.models.anthropic import Anthropic # noqa: E402
from swarms.models.petals import Petals # noqa: E402
@ -25,7 +25,7 @@ from swarms.models.mixtral import Mixtral # noqa: E402
# ) # noqa: E402
from swarms.models.together import TogetherLLM # noqa: E402
################# MultiModal Models
# MultiModal Models
from swarms.models.base_multimodal_model import (
BaseMultiModalModel,
) # noqa: E402
@ -61,10 +61,10 @@ from swarms.models.sam_supervision import SegmentAnythingMarkGenerator
# from swarms.models.cog_agent import CogAgent # noqa: E402
################# Tokenizers
# Tokenizers
############## Types
# Types
from swarms.models.types import (
TextModality,
ImageModality,
@ -77,7 +77,7 @@ from swarms.models.types import (
from swarms.models.base_embedding_model import BaseEmbeddingModel
##### Utils
# Utils
from swarms.models.sampling_params import (
SamplingType,
SamplingParams,

@ -39,15 +39,25 @@ class BaseMultiModalModel:
Examples:
>>> from swarms.models.base_multimodal_model import BaseMultiModalModel
>>> image = ""https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"
>>> model = BaseMultiModalModel()
>>> model.run("Generate a summary of this text")
>>> model.run("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")
>>> model.run("Generate a summary of this text", image)
>>> model.run_batch(["Generate a summary of this text", "Generate a summary of this text"])
>>> model.run_batch([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")])
>>> model.run_batch([
("Generate a summary of this text", image),
("Generate a summary of this text", image)
])
>>> model.run_batch_async(["Generate a summary of this text", "Generate a summary of this text"])
>>> model.run_batch_async([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")])
>>> model.run_batch_async([
("Generate a summary of this text", image),
("Generate a summary of this text", image)
])
>>> model.run_batch_async_with_retries(["Generate a summary of this text", "Generate a summary of this text"])
>>> model.run_batch_async_with_retries([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")])
>>> model.run_batch_async_with_retries([
("Generate a summary of this text", image),
("Generate a summary of this text", image)
])
>>> model.generate_summary("Generate a summary of this text")
>>> model.set_temperature(0.5)
>>> model.set_max_tokens(500)

@ -1,8 +1,9 @@
"""
r"""
BioGPT
Pre-trained language models have attracted increasing attention in the biomedical domain,
inspired by their great success in the general natural language domain.
Among the two main branches of pre-trained language models in the general language domain, i.e. BERT (and its variants) and GPT (and its variants),
Among the two main branches of pre-trained language models in the general language domain,
i.e. BERT (and its variants) and GPT (and its variants),
the first one has been extensively studied in the biomedical domain, such as BioBERT and PubMedBERT.
While they have achieved great success on a variety of discriminative downstream biomedical tasks,
the lack of generation ability constrains their application scope.
@ -24,7 +25,18 @@ advantage of BioGPT on biomedical literature to generate fluent descriptions for
number = {6},
year = {2022},
month = {09},
abstract = "{Pre-trained language models have attracted increasing attention in the biomedical domain, inspired by their great success in the general natural language domain. Among the two main branches of pre-trained language models in the general language domain, i.e. BERT (and its variants) and GPT (and its variants), the first one has been extensively studied in the biomedical domain, such as BioBERT and PubMedBERT. While they have achieved great success on a variety of discriminative downstream biomedical tasks, the lack of generation ability constrains their application scope. In this paper, we propose BioGPT, a domain-specific generative Transformer language model pre-trained on large-scale biomedical literature. We evaluate BioGPT on six biomedical natural language processing tasks and demonstrate that our model outperforms previous models on most tasks. Especially, we get 44.98\%, 38.42\% and 40.76\% F1 score on BC5CDR, KD-DTI and DDI end-to-end relation extraction tasks, respectively, and 78.2\% accuracy on PubMedQA, creating a new record. Our case study on text generation further demonstrates the advantage of BioGPT on biomedical literature to generate fluent descriptions for biomedical terms.}",
abstract = "{Pre-trained language models have attracted increasing attention in the biomedical domain,
inspired by their great success in the general natural language domain.
Among the two main branches of pre-trained language models in the general language domain,
i.e. BERT (and its variants) and GPT (and its variants), the first one has been extensively studied in the
biomedical domain, such as BioBERT and PubMedBERT. While they have achieved great success on a variety of discriminative
downstream biomedical tasks, the lack of generation ability constrains their application scope. In this paper, we propose
BioGPT, a domain-specific generative Transformer language model pre-trained on large-scale biomedical literature.
We evaluate BioGPT on six biomedical natural language processing tasks and demonstrate that our model outperforms previous
models on most tasks. Especially, we get 44.98\%, 38.42\% and 40.76\% F1 score on BC5CDR, KD-DTI and DDI end-to-end
relation extraction tasks, respectively, and 78.2\% accuracy on PubMedQA, creating a new record. Our case study on text
generation further demonstrates the advantage of BioGPT on biomedical literature to generate fluent descriptions for
biomedical terms.}",
issn = {1477-4054},
doi = {10.1093/bib/bbac409},
url = {https://doi.org/10.1093/bib/bbac409},

@ -123,7 +123,7 @@ class CogAgent(BaseMultiModalModel):
with torch.no_grad():
outputs = self.model(**inputs, **kwargs)
outputs = outputs[:, inputs["input_ids"].shape[1] :]
outputs = outputs[:, inputs["input_ids"].shape[1]:]
response = self.decode(outputs[0])
response = response.split("</s>")[0]
print(response)

@ -139,7 +139,7 @@ class DistilWhisperModel:
len(audio_input.array) / sample_rate
chunks = [
audio_input.array[
i : i + sample_rate * chunk_duration
i: i + sample_rate * chunk_duration
]
for i in range(
0,
@ -175,7 +175,7 @@ class DistilWhisperModel:
# Print the chunk's transcription
print(
colored(
f"Chunk {i+1}/{len(chunks)}: ", "yellow"
f"Chunk {i + 1}/{len(chunks)}: ", "yellow"
)
+ transcription
)

@ -37,7 +37,8 @@ class ElevenLabsText2SpeechTool(BaseTool):
Defaults to ElevenLabsModel.MULTI_LINGUAL.
name (str): The name of the tool. Defaults to "eleven_labs_text2speech".
description (str): The description of the tool.
Defaults to "A wrapper around Eleven Labs Text2Speech. Useful for when you need to convert text to speech. It supports multiple languages, including English, German, Polish, Spanish, Italian, French, Portuguese, and Hindi."
Defaults to "A wrapper around Eleven Labs Text2Speech. Useful for when you need to convert text to speech. It
supports multiple languages, including English, German, Polish, Spanish, Italian, French, Portuguese, and Hindi."
Usage:

@ -10,7 +10,8 @@ from typing import Any
class GPT4VSAM(BaseMultiModalModel):
"""
GPT4VSAM class represents a multi-modal model that combines the capabilities of GPT-4 and SegmentAnythingMarkGenerator.
It takes an instance of BaseMultiModalModel (vlm) and a device as input and provides methods for loading images and making predictions.
It takes an instance of BaseMultiModalModel (vlm) and a device as input and provides
methods for loading images and making predictions.
Args:
vlm (BaseMultiModalModel): An instance of BaseMultiModalModel representing the visual language model.

@ -202,7 +202,8 @@ class GPT4VisionAPI(BaseMultiModalModel):
"""
PROMPT = f"""
These are frames from a video that I want to upload. Generate a compelling description that I can upload along with the video:
These are frames from a video that I want to upload.
Generate a compelling description that I can upload along with the video:
{frames}
"""

@ -61,7 +61,8 @@ class Idefics(BaseMultiModalModel):
response = model.chat(user_input)
print(response)
user_input = "User: And who is that? https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052"
user_input = "User: And who is that? \
https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052"
response = model.chat(user_input)
print(response)

@ -323,5 +323,5 @@ class Kosmos(BaseMultiModalModel):
def generate_boxees(self, task, image_url):
image = self.get_image(image_url)
processed_text, entities = self.process_task(task, image)
_, entities = self.process_task(task, image)
self.draw_entity_boxes_on_image(image, entities, show=True)

@ -26,7 +26,8 @@ class MedicalSAM:
Methods:
__post_init__(): Initializes the MedicalSAM object.
download_model_weights(model_path: str): Downloads the model weights from the specified URL and saves them to the given file path.
download_model_weights(model_path: str): Downloads the model weights from the specified URL and
saves them to the given file path.
preprocess(img): Preprocesses the input image.
run(img, box): Runs the semantic segmentation on the input image within the specified bounding box.

@ -386,7 +386,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
)
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(
token[j : j + self.embedding_ctx_length]
token[j: j + self.embedding_ctx_length]
)
indices.append(i)
@ -406,7 +406,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in _iter:
response = embed_with_retry(
self,
input=tokens[i : i + _chunk_size],
input=tokens[i: i + _chunk_size],
**self._invocation_params,
)
batched_embeddings.extend(
@ -486,7 +486,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
)
for j in range(0, len(token), self.embedding_ctx_length):
tokens.append(
token[j : j + self.embedding_ctx_length]
token[j: j + self.embedding_ctx_length]
)
indices.append(i)
@ -495,7 +495,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
for i in range(0, len(tokens), _chunk_size):
response = await async_embed_with_retry(
self,
input=tokens[i : i + _chunk_size],
input=tokens[i: i + _chunk_size],
**self._invocation_params,
)
batched_embeddings.extend(

@ -45,7 +45,6 @@ from tenacity import (
wait_exponential,
)
logger = logging.getLogger(__name__)
from importlib.metadata import version
@ -610,7 +609,7 @@ class BaseOpenAI(BaseLLM):
prompts[0]
)
sub_prompts = [
prompts[i : i + self.batch_size]
prompts[i: i + self.batch_size]
for i in range(0, len(prompts), self.batch_size)
]
return sub_prompts
@ -624,7 +623,7 @@ class BaseOpenAI(BaseLLM):
"""Create the LLMResult from the choices and prompts."""
generations = []
for i, _ in enumerate(prompts):
sub_choices = choices[i * self.n : (i + 1) * self.n]
sub_choices = choices[i * self.n: (i + 1) * self.n]
generations.append(
[
Generation(

@ -26,7 +26,8 @@ class SAM:
processor (SamProcessor): The processor for the SAM model.
Methods:
run(task=None, img=None, *args, **kwargs): Runs the SAM model on the given image and returns the segmentation scores and masks.
run(task=None, img=None, *args, **kwargs): Runs the SAM model on the given image and returns the
segmentation scores and masks.
process_img(img: str = None, *args, **kwargs): Processes the input image and returns the processed image.
"""

@ -2,18 +2,31 @@
SpeechT5 (TTS task)
SpeechT5 model fine-tuned for speech synthesis (text-to-speech) on LibriTTS.
This model was introduced in SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing by Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
This model was introduced in SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing by
Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu,
Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
SpeechT5 was first released in this repository, original weights. The license used is MIT.
Model Description
Motivated by the success of T5 (Text-To-Text Transfer Transformer) in pre-trained natural language processing models, we propose a unified-modal SpeechT5 framework that explores the encoder-decoder pre-training for self-supervised speech/text representation learning. The SpeechT5 framework consists of a shared encoder-decoder network and six modal-specific (speech/text) pre/post-nets. After preprocessing the input speech/text through the pre-nets, the shared encoder-decoder network models the sequence-to-sequence transformation, and then the post-nets generate the output in the speech/text modality based on the output of the decoder.
Leveraging large-scale unlabeled speech and text data, we pre-train SpeechT5 to learn a unified-modal representation, hoping to improve the modeling capability for both speech and text. To align the textual and speech information into this unified semantic space, we propose a cross-modal vector quantization approach that randomly mixes up speech/text states with latent units as the interface between encoder and decoder.
Extensive evaluations show the superiority of the proposed SpeechT5 framework on a wide variety of spoken language processing tasks, including automatic speech recognition, speech synthesis, speech translation, voice conversion, speech enhancement, and speaker identification.
Developed by: Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu, Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
Motivated by the success of T5 (Text-To-Text Transfer Transformer) in pre-trained natural language processing models,
we propose a unified-modal SpeechT5 framework that explores the encoder-decoder pre-training for self-supervised speech/text
representation learning. The SpeechT5 framework consists of a shared encoder-decoder network and six modal-specific
(speech/text) pre/post-nets. After preprocessing the input speech/text through the pre-nets, the shared encoder-decoder network
models the sequence-to-sequence transformation, and then the post-nets generate the output in the speech/text modality
based on the output of the decoder.
Leveraging large-scale unlabeled speech and text data, we pre-train SpeechT5 to learn a unified-modal representation,
hoping to improve the modeling capability for both speech and text. To align the textual and speech information into this
unified semantic space, we propose a cross-modal vector quantization approach that randomly mixes up speech/text states with
latent units as the interface between encoder and decoder.
Extensive evaluations show the superiority of the proposed SpeechT5 framework on a wide variety of spoken language processing
tasks, including automatic speech recognition, speech synthesis, speech translation, voice conversion, speech enhancement, and
speaker identification.
Developed by: Junyi Ao, Rui Wang, Long Zhou, Chengyi Wang, Shuo Ren, Yu Wu, Shujie Liu,
Tom Ko, Qing Li, Yu Zhang, Zhihua Wei, Yao Qian, Jinyu Li, Furu Wei.
Shared by [optional]: Matthijs Hollemans
Model type: text-to-speech
Language(s) (NLP): [More Information Needed]

@ -83,7 +83,7 @@ class VipLlavaMultiModal(BaseMultiModalModel):
)
return self.processor.decode(
generate_ids[0][len(inputs["input_ids"][0]) :],
generate_ids[0][len(inputs["input_ids"][0]):],
skip_special_tokens=True,
)

@ -31,7 +31,7 @@ def debate_monitor(game_description, word_limit, character_names):
Frame the debate topic as a problem to be solved.
Be creative and imaginative.
Please reply with the specified topic in {word_limit} words or less.
Speak directly to the presidential candidates: {*character_names,}.
Speak directly to the presidential candidates: {*character_names, }.
Do not add anything else.
"""

@ -123,7 +123,7 @@ class SchemaGenerator:
return "\n".join(command_strings + [finish_string])
else:
return "\n".join(
f"{i+1}. {item}" for i, item in enumerate(items)
f"{i + 1}. {item}" for i, item in enumerate(items)
)
def generate_prompt_string(self) -> str:

@ -34,7 +34,7 @@ commands: {
"""
########### FEW SHOT EXAMPLES ################
# FEW SHOT EXAMPLES
SCENARIOS = """
commands: {
"tools": {

@ -62,6 +62,6 @@ def worker_tools_sop_promp(name: str, memory: str):
[{memory}]
Human: Determine which next command to use, and respond using the format specified above:
""".format(name=name, memory=memory, time=time)
""".format(name=name, memory=memory, time=time) # noqa: F521
return str(out)

@ -95,7 +95,8 @@ class AsyncWorkflow:
# if self.dashboard:
# self.display()
# Add a stopping condition to stop the workflow, if provided but stopping_condition takes in a parameter s for string
# Add a stopping condition to stop the workflow,
# if provided but stopping_condition takes in a parameter s for string
if self.stopping_condition:
if self.stopping_condition(self.results):
break

@ -91,11 +91,6 @@ class AbstractSwarm(ABC):
"""Step the swarm"""
pass
# @abstractmethod
def add_agent(self, agent: "Agent"):
"""Add a agent to the swarm"""
pass
# @abstractmethod
def remove_agent(self, agent: "Agent"):
"""Remove a agent from the swarm"""

@ -182,7 +182,20 @@ class BaseWorkflow(BaseStructure):
>>> workflow.add("Create a report on these metrics", llm)
>>> workflow.delete_task("What's the weather in miami")
>>> workflow.tasks
[Task(description='Create a report on these metrics', agent=Agent(llm=OpenAIChat(openai_api_key=''), max_loops=1, dashboard=False), args=[], kwargs={}, result=None, history=[])]
[
Task(
description='Create a report on these metrics',
agent=Agent(
llm=OpenAIChat(openai_api_key=''),
max_loops=1,
dashboard=False
),
args=[],
kwargs={},
result=None,
history=[]
)
]
"""
try:
for task in self.tasks:

@ -16,7 +16,8 @@ class ConcurrentWorkflow(BaseStructure):
Args:
max_workers (int): The maximum number of workers to use for the ThreadPoolExecutor.
autosave (bool): Whether to save the state of the workflow to a file. Default is False.
saved_state_filepath (str): The filepath to save the state of the workflow to. Default is "runs/concurrent_workflow.json".
saved_state_filepath (str): The filepath to save the state of the workflow to.
Default is "runs/concurrent_workflow.json".
print_results (bool): Whether to print the results of each task. Default is False.
return_results (bool): Whether to return the results of each task. Default is False.
use_processes (bool): Whether to use processes instead of threads. Default is False.

@ -19,7 +19,8 @@ class DebatePlayer(Agent):
Args:
model_name(str): model name
name (str): name of this player
temperature (float): higher values make the output more random, while lower values make it more focused and deterministic
temperature (float): higher values make the output more random,
while lower values make it more focused and deterministic
openai_api_key (str): As the parameter name suggests
sleep_time (float): sleep because of rate limits
"""
@ -33,7 +34,8 @@ class Debate:
Args:
model_name (str): openai model name
temperature (float): higher values make the output more random, while lower values make it more focused and deterministic
temperature (float): higher values make the output more random,
while lower values make it more focused and deterministic
num_players (int): num of players
save_file_dir (str): dir path to json file
openai_api_key (str): As the parameter name suggests
@ -239,7 +241,7 @@ class Debate:
if self.mod_ans["debate_translation"] != "":
break
else:
print(f"===== Debate Round-{round+2} =====\n")
print(f"===== Debate Round-{round + 2} =====\n")
self.affirmative.add_message_to_memory(
self.save_file["debate_prompt"].replace(
"##oppo_ans##", self.neg_ans
@ -361,6 +363,13 @@ class Debate:
# with open(prompts_path, 'w') as file:
# json.dump(config, file, ensure_ascii=False, indent=4)
# debate = Debate(save_file_dir=save_file_dir, num_players=3, openai_api_key=openai_api_key, prompts_path=prompts_path, temperature=0, sleep_time=0)
# debate = Debate(
# save_file_dir=save_file_dir,
# num_players=3,
# openai_api_key=openai_api_key,
# prompts_path=prompts_path,
# temperature=0,
# sleep_time=0
# )
# debate.run()
# debate.save_file_to_json(id)

@ -18,7 +18,8 @@ class GraphWorkflow(BaseStructure):
connect(from_node, to_node): Connects two nodes in the graph.
set_entry_point(node_name): Sets the entry point node for the workflow.
add_edge(from_node, to_node): Adds an edge between two nodes in the graph.
add_conditional_edges(from_node, condition, edge_dict): Adds conditional edges from a node to multiple nodes based on a condition.
add_conditional_edges(from_node, condition, edge_dict): Adds conditional edges from a node to multiple nodes
based on a condition.
run(): Runs the workflow and returns the graph.
Examples:

@ -115,7 +115,8 @@ class MajorityVoting:
multithreaded (bool, optional): Whether to run the agents using multithreading. Defaults to False.
multiprocess (bool, optional): Whether to run the agents using multiprocessing. Defaults to False.
asynchronous (bool, optional): Whether to run the agents asynchronously. Defaults to False.
output_parser (callable, optional): A callable function to parse the output of the majority voting system. Defaults to None.
output_parser (callable, optional): A callable function to parse the output of the majority voting system.
Defaults to None.
Examples:
>>> from swarms.structs.agent import Agent

@ -88,7 +88,7 @@ class ModelParallelizer:
"""Save responses to file"""
with open(filename, "w") as file:
table = [
[f"LLM {i+1}", response]
[f"LLM {i + 1}", response]
for i, response in enumerate(self.last_responses)
]
file.write(table)
@ -111,7 +111,7 @@ class ModelParallelizer:
print(f"{i + 1}. {task}")
print("\nLast Responses:")
table = [
[f"LLM {i+1}", response]
[f"LLM {i + 1}", response]
for i, response in enumerate(self.last_responses)
]
print(

@ -119,7 +119,7 @@ class MultiThreadedWorkflow(BaseWorkflow):
except Exception as e:
logging.error(
(
f"Attempt {attempt+1} failed for task"
f"Attempt {attempt + 1} failed for task"
f" {task}: {str(e)}"
),
exc_info=True,

@ -109,7 +109,7 @@ class Jsonformer:
response[0], skip_special_tokens=True
)
response = response[len(prompt) :]
response = response[len(prompt):]
response = response.strip().rstrip(".")
self.debug("[generate_number]", response)
try:
@ -181,7 +181,7 @@ class Jsonformer:
response[0][: len(input_tokens[0])] == input_tokens
).all()
):
response = response[0][len(input_tokens[0]) :]
response = response[0][len(input_tokens[0]):]
if response.shape[0] == 1:
response = response[0]

@ -48,7 +48,7 @@ class NumberStoppingCriteria(StoppingCriteria):
scores: torch.FloatTensor,
) -> bool:
decoded = self.tokenizer.decode(
input_ids[0][self.prompt_length :],
input_ids[0][self.prompt_length:],
skip_special_tokens=True,
)

@ -11,7 +11,8 @@ def scrape_tool_func_docs(fn: Callable) -> str:
fn (Callable): The function to scrape.
Returns:
str: A string containing the function's name, documentation string, and a list of its parameters. Each parameter is represented as a line containing the parameter's name, default value, and annotation.
str: A string containing the function's name, documentation string, and a list of its parameters. Each parameter is
represented as a line containing the parameter's name, default value, and annotation.
"""
try:
# If the function is a tool, get the original function

@ -108,7 +108,7 @@ class Action:
def to_json(self):
try:
tool_output = json.loads(self.tool_output)
except:
except json.JSONDecodeError:
tool_output = self.tool_output
return {
"thought": self.thought,

@ -2,10 +2,6 @@ import logging
import os
import warnings
import sys
import logging
import os
import warnings
import sys
def disable_logging():

@ -18,7 +18,8 @@ def load_model_torch(
model_path (str): Path to the saved model file.
device (torch.device): Device to move the model to.
model (nn.Module): The model architecture, if the model file only contains the state dictionary.
strict (bool): Whether to strictly enforce that the keys in the state dictionary match the keys returned by the model's `state_dict()` function.
strict (bool): Whether to strictly enforce that the keys in the state
dictionary match the keys returned by the model's `state_dict()` function.
map_location (callable): A function to remap the storage locations of the loaded model.
*args: Additional arguments to pass to `torch.load`.
**kwargs: Additional keyword arguments to pass to `torch.load`.

@ -376,7 +376,7 @@ class FileHandler:
os.makedirs(os.path.dirname(local_filename), exist_ok=True)
with open(local_filename, "wb") as f:
size = f.write(data)
print(f"Inputs: {url} ({size//1000}MB) => {local_filename}")
print(f"Inputs: {url} ({size // 1000}MB) => {local_filename}")
return local_filename
def handle(self, url: str) -> str:
@ -390,7 +390,7 @@ class FileHandler:
"SERVER", "http://localhost:8000"
)
)
+ 1 :
+ 1:
]
local_filename = (
Path("file") / local_filepath.split("/")[-1]

Loading…
Cancel
Save