updated poetry dependencies

pull/408/head
Joshua David 1 year ago
parent 3b4dd644c4
commit b40800887f

@ -11,9 +11,7 @@ text = node.run_text(
img = node.run_img("/image1", "What is this image about?")
chat = node.chat(
(
"What is your name? Generate a picture of yourself. What is"
" this image about?"
),
"What is your name? Generate a picture of yourself. What is"
" this image about?",
streaming=True,
)

@ -126,10 +126,8 @@ class AutoBlogGenSwarm:
except Exception as error:
print(
colored(
(
"Error while running AutoBlogGenSwarm"
f" {error}"
),
"Error while running AutoBlogGenSwarm"
f" {error}",
"red",
)
)

@ -47,10 +47,8 @@ class BlogGen:
topic_output = topic_result.generations[0][0].text
print(
colored(
(
"\nTopic Selection Task"
f" Output:\n----------------------------\n{topic_output}\n"
),
"\nTopic Selection Task"
f" Output:\n----------------------------\n{topic_output}\n",
"white",
)
)
@ -72,10 +70,8 @@ class BlogGen:
initial_draft_output = auto_temp_output # Assuming AutoTemp.run returns the best output directly
print(
colored(
(
"\nInitial Draft"
f" Output:\n----------------------------\n{initial_draft_output}\n"
),
"\nInitial Draft"
f" Output:\n----------------------------\n{initial_draft_output}\n",
"white",
)
)
@ -88,10 +84,8 @@ class BlogGen:
review_output = review_result.generations[0][0].text
print(
colored(
(
"\nReview"
f" Output:\n----------------------------\n{review_output}\n"
),
"\nReview"
f" Output:\n----------------------------\n{review_output}\n",
"white",
)
)
@ -110,10 +104,8 @@ class BlogGen:
].text
print(
colored(
(
"\nDistribution"
f" Output:\n----------------------------\n{distribution_output}\n"
),
"\nDistribution"
f" Output:\n----------------------------\n{distribution_output}\n",
"white",
)
)
@ -122,10 +114,8 @@ class BlogGen:
final_blog_content = f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}"
print(
colored(
(
"\nFinal Blog"
f" Content:\n----------------------------\n{final_blog_content}\n"
),
"\nFinal Blog"
f" Content:\n----------------------------\n{final_blog_content}\n",
"green",
)
)

@ -95,10 +95,8 @@ product_manager_out = product_manager_agent.run(
)
print(
colored(
(
"---------------------------- Product Manager Plan:"
f" {product_manager_out}"
),
"---------------------------- Product Manager Plan:"
f" {product_manager_out}",
"cyan",
)
)
@ -111,10 +109,8 @@ agent1_out = feature_implementer_frontend.run(
)
print(
colored(
(
"--------------------- Feature Implementer Code logic:"
f" {agent1_out}"
),
"--------------------- Feature Implementer Code logic:"
f" {agent1_out}",
"cyan",
)
)
@ -125,10 +121,8 @@ tester_agent_out = tester_agent.run(
)
print(
colored(
(
"---------------------------- Tests for the logic:"
f" {tester_agent_out}"
),
"---------------------------- Tests for the logic:"
f" {tester_agent_out}",
"green",
)
)
@ -140,10 +134,8 @@ documenter_agent_out = documenting_agent.run(
)
print(
colored(
(
"---------------------------- Documentation for the"
f" logic: {documenter_agent_out}"
),
"---------------------------- Documentation for the"
f" logic: {documenter_agent_out}",
"yellow",
)
)

@ -66,43 +66,33 @@ accessories_stylist_agent = Agent(
# Run agents with respective tasks
haircut_suggestions = haircut_stylist_agent.run(
(
"Suggest suitable haircuts for this user, considering their"
" face shape and hair type."
),
"Suggest suitable haircuts for this user, considering their"
" face shape and hair type.",
user_selfie,
)
# Run Makeup or Beard agent based on gender
if user_gender == "woman":
makeup_suggestions = makeup_or_beard_stylist_agent.run(
(
"Recommend makeup styles for this user, complementing"
" their features."
),
"Recommend makeup styles for this user, complementing"
" their features.",
user_selfie,
)
elif user_gender == "man":
beard_suggestions = makeup_or_beard_stylist_agent.run(
(
"Provide beard styling advice for this user, considering"
" their face shape."
),
"Provide beard styling advice for this user, considering"
" their face shape.",
user_selfie,
)
clothing_suggestions = clothing_stylist_agent.run(
(
"Match clothing styles and colors for this user, using color"
" matching principles."
),
"Match clothing styles and colors for this user, using color"
" matching principles.",
clothes_image,
)
accessories_suggestions = accessories_stylist_agent.run(
(
"Suggest accessories to complement this user's outfit,"
" considering the overall style."
),
"Suggest accessories to complement this user's outfit,"
" considering the overall style.",
clothes_image,
)

@ -68,10 +68,8 @@ workflow.add(
)
workflow.add(
final_plan_agent,
(
"Generate the final urban improvement plan based on all"
" previous agent's findings"
),
"Generate the final urban improvement plan based on all"
" previous agent's findings",
)
# Run the workflow for individual analysis tasks

@ -140,10 +140,12 @@ player_descriptor_system_message = SystemMessage(
def generate_character_description(character_name):
character_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(content=f"""{game_description}
HumanMessage(
content=f"""{game_description}
Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities.
Speak directly to {character_name}.
Do not add anything else."""),
Do not add anything else."""
),
]
character_description = ChatOpenAI(temperature=1.0)(
character_specifier_prompt
@ -164,7 +166,8 @@ Your goal is to be as creative as possible and make the voters think you are the
def generate_character_system_message(
character_name, character_header
):
return SystemMessage(content=f"""{character_header}
return SystemMessage(
content=f"""{character_header}
You will speak in the style of {character_name}, and exaggerate their personality.
You will come up with creative ideas related to {topic}.
Do not say the same things over and over again.
@ -176,7 +179,8 @@ Speak only from the perspective of {character_name}.
Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words!
Do not add anything else.
""")
"""
)
character_descriptions = [
@ -261,7 +265,8 @@ for character_name, bidding_template in zip(
topic_specifier_prompt = [
SystemMessage(content="You can make a task more specific."),
HumanMessage(content=f"""{game_description}
HumanMessage(
content=f"""{game_description}
You are the debate moderator.
Please make the debate topic more specific.
@ -269,7 +274,8 @@ topic_specifier_prompt = [
Be creative and imaginative.
Please reply with the specified topic in {word_limit} words or less.
Speak directly to the presidential candidates: {*character_names,}.
Do not add anything else."""),
Do not add anything else."""
),
]
specified_topic = ChatOpenAI(temperature=1.0)(
topic_specifier_prompt

@ -2,10 +2,12 @@ import pandas as pd
from swarms import dataframe_to_text
# # Example usage:
df = pd.DataFrame({
'A': [1, 2, 3],
'B': [4, 5, 6],
'C': [7, 8, 9],
})
df = pd.DataFrame(
{
"A": [1, 2, 3],
"B": [4, 5, 6],
"C": [7, 8, 9],
}
)
print(dataframe_to_text(df))

6351
poetry.lock generated

File diff suppressed because it is too large Load Diff

@ -0,0 +1,399 @@
# This Pylint rcfile contains a best-effort configuration to uphold the
# best-practices and style described in the Google Python style guide:
# https://google.github.io/styleguide/pyguide.html
#
# Its canonical open-source location is:
# https://google.github.io/styleguide/pylintrc
[MAIN]
# Files or directories to be skipped. They should be base names, not paths.
ignore=third_party
# Files or directories matching the regex patterns are skipped. The regex
# matches against base names, not paths.
ignore-patterns=
# Pickle collected data for later comparisons.
persistent=no
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
# Use multiple processes to speed up Pylint.
jobs=4
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
confidence=
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
#enable=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=R,
abstract-method,
apply-builtin,
arguments-differ,
attribute-defined-outside-init,
backtick,
bad-option-value,
basestring-builtin,
buffer-builtin,
c-extension-no-member,
consider-using-enumerate,
cmp-builtin,
cmp-method,
coerce-builtin,
coerce-method,
delslice-method,
div-method,
eq-without-hash,
execfile-builtin,
file-builtin,
filter-builtin-not-iterating,
fixme,
getslice-method,
global-statement,
hex-method,
idiv-method,
implicit-str-concat,
import-error,
import-self,
import-star-module-level,
input-builtin,
intern-builtin,
invalid-str-codec,
locally-disabled,
long-builtin,
long-suffix,
map-builtin-not-iterating,
misplaced-comparison-constant,
missing-function-docstring,
metaclass-assignment,
next-method-called,
next-method-defined,
no-absolute-import,
no-init, # added
no-member,
no-name-in-module,
no-self-use,
nonzero-method,
oct-method,
old-division,
old-ne-operator,
old-octal-literal,
old-raise-syntax,
parameter-unpacking,
print-statement,
raising-string,
range-builtin-not-iterating,
raw_input-builtin,
rdiv-method,
reduce-builtin,
relative-import,
reload-builtin,
round-builtin,
setslice-method,
signature-differs,
standarderror-builtin,
suppressed-message,
sys-max-int,
trailing-newlines,
unichr-builtin,
unicode-builtin,
unnecessary-pass,
unpacking-in-except,
useless-else-on-loop,
useless-suppression,
using-cmp-argument,
wrong-import-order,
xrange-builtin,
zip-builtin-not-iterating,
[REPORTS]
# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html. You can also give a reporter class, eg
# mypackage.mymodule.MyReporterClass.
output-format=text
# Tells whether to display a full report or only the messages
reports=no
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
#msg-template=
[BASIC]
# Good variable names which should always be accepted, separated by a comma
good-names=main,_
# Bad variable names which should always be refused, separated by a comma
bad-names=
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Include a hint for the correct naming format with invalid-name
include-naming-hint=no
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
property-classes=abc.abstractproperty,cached_property.cached_property,cached_property.threaded_cached_property,cached_property.cached_property_with_ttl,cached_property.threaded_cached_property_with_ttl
# Regular expression matching correct function names
function-rgx=^(?:(?P<exempt>setUp|tearDown|setUpModule|tearDownModule)|(?P<camel_case>_?[A-Z][a-zA-Z0-9]*)|(?P<snake_case>_?[a-z][a-z0-9_]*))$
# Regular expression matching correct variable names
variable-rgx=^[a-z][a-z0-9_]*$
# Regular expression matching correct constant names
const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$
# Regular expression matching correct attribute names
attr-rgx=^_{0,2}[a-z][a-z0-9_]*$
# Regular expression matching correct argument names
argument-rgx=^[a-z][a-z0-9_]*$
# Regular expression matching correct class attribute names
class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$
# Regular expression matching correct inline iteration names
inlinevar-rgx=^[a-z][a-z0-9_]*$
# Regular expression matching correct class names
class-rgx=^_?[A-Z][a-zA-Z0-9]*$
# Regular expression matching correct module names
module-rgx=^(_?[a-z][a-z0-9_]*|__init__)$
# Regular expression matching correct method names
method-rgx=(?x)^(?:(?P<exempt>_[a-z0-9_]+__|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass|(test|assert)_*[A-Z0-9][a-zA-Z0-9_]*|next)|(?P<camel_case>_{0,2}[A-Z][a-zA-Z0-9_]*)|(?P<snake_case>_{0,2}[a-z][a-z0-9_]*))$
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=(__.*__|main|test.*|.*test|.*Test)$
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=12
[TYPECHECK]
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager,contextlib2.contextmanager
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=80
# TODO(https://github.com/pylint-dev/pylint/issues/3352): Direct pylint to exempt
# lines made too long by directives to pytype.
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=(?x)(
^\s*(\#\ )?<?https?://\S+>?$|
^\s*(from\s+\S+\s+)?import\s+.+$)
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=yes
# Maximum number of lines in a module
max-module-lines=99999
# String used as indentation unit. The internal Google style guide mandates 2
# spaces. Google's externaly-published style guide says 4, consistent with
# PEP 8. Here, we use 2 spaces, for conformity with many open-sourced Google
# projects (like TensorFlow).
indent-string=' '
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=TODO
[STRING]
# This flag controls whether inconsistent-quotes generates a warning when the
# character used as a quote delimiter is used inconsistently within a module.
check-quote-consistency=yes
[VARIABLES]
# Tells whether we should check for unused import in __init__ files.
init-import=no
# A regular expression matching the name of dummy variables (i.e. expectedly
# not used).
dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_)
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,_cb
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six,six.moves,past.builtins,future.builtins,functools
[LOGGING]
# Logging modules to check that the string format arguments are in logging
# function parameter format
logging-modules=logging,absl.logging,tensorflow.io.logging
[SIMILARITIES]
# Minimum lines number of a similarity.
min-similarity-lines=4
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
[SPELLING]
# Spelling dictionary name. Available dictionaries: none. To make it working
# install python-enchant package.
spelling-dict=
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to indicated private dictionary in
# --spelling-private-dict-file option instead of raising a message.
spelling-store-unknown-words=no
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=regsub,
TERMIOS,
Bastion,
rexec,
sets
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled)
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant, absl
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,
__new__,
setUp
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,
_fields,
_replace,
_source,
_make
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls,
class_
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs

@ -10,7 +10,7 @@ description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
homepage = "https://github.com/kyegomez/swarms"
documentation = "https://swarms.apac.ai"
documentation = "https://swarms.apac.ai"
readme = "README.md" # Assuming you have a README.md
repository = "https://github.com/kyegomez/swarms"
keywords = ["artificial intelligence", "deep learning", "optimizers", "Prompt Engineering", "swarms", "agents"]
@ -24,16 +24,16 @@ classifiers = [
[tool.poetry.dependencies]
python = "^3.6.1"
python = "~3.11.0"
torch = "2.1.1"
transformers = "4.37.1"
openai = "0.28.0"
langchain = "0.0.333"
langchain = "~0.1"
asyncio = "3.4.3"
einops = "0.7.0"
google-generativeai = "0.3.1"
langchain-experimental = "0.0.10"
tensorflow = "*"
tensorflow = "^2.12.0"
weaviate-client = "3.25.3"
opencv-python-headless = "4.8.1.78"
faiss-cpu = "1.7.4"
@ -75,6 +75,12 @@ supervision = "*"
scikit-image = "*"
pinecone-client = "*"
roboflow = "*"
pre-commit = "^3.6.2"
pylint = "^3.1.0"
pytest = "^8.0.2"
numpy = "^1.26.4"
langchain-core = "^0.1.28"
langchain-community = "^0.0.24"
[tool.poetry.group.lint.dependencies]

@ -168,10 +168,8 @@ class Dalle3:
# Handling exceptions and printing the errors details
print(
colored(
(
f"Error running Dalle3: {error} try"
" optimizing your api key and or try again"
),
f"Error running Dalle3: {error} try"
" optimizing your api key and or try again",
"red",
)
)
@ -235,10 +233,8 @@ class Dalle3:
except (Exception, openai.OpenAIError) as error:
print(
colored(
(
f"Error running Dalle3: {error} try"
" optimizing your api key and or try again"
),
f"Error running Dalle3: {error} try"
" optimizing your api key and or try again",
"red",
)
)
@ -321,20 +317,16 @@ class Dalle3:
except Exception as error:
print(
colored(
(
f"Error running Dalle3: {error} try"
" optimizing your api key and or try"
" again"
),
f"Error running Dalle3: {error} try"
" optimizing your api key and or try"
" again",
"red",
)
)
print(
colored(
(
"Error running Dalle3:"
f" {error.http_status}"
),
"Error running Dalle3:"
f" {error.http_status}",
"red",
)
)

@ -97,11 +97,9 @@ class Fuyu(BaseMultiModalModel):
except Exception as error:
print(
colored(
(
"Error in"
f" {self.__class__.__name__} pipeline:"
f" {error}"
),
"Error in"
f" {self.__class__.__name__} pipeline:"
f" {error}",
"red",
)
)

@ -268,11 +268,9 @@ class HuggingfaceLLM(AbstractLLM):
except Exception as e:
print(
colored(
(
"HuggingfaceLLM could not generate text"
f" because of error: {e}, try optimizing your"
" arguments"
),
"HuggingfaceLLM could not generate text"
f" because of error: {e}, try optimizing your"
" arguments",
"red",
)
)

@ -67,11 +67,9 @@ class HuggingfacePipeline(AbstractLLM):
except Exception as error:
print(
colored(
(
"Error in"
f" {self.__class__.__name__} pipeline:"
f" {error}"
),
"Error in"
f" {self.__class__.__name__} pipeline:"
f" {error}",
"red",
)
)

@ -165,11 +165,9 @@ class Idefics(BaseMultiModalModel):
except Exception as error:
print(
colored(
(
"Error in"
f" {self.__class__.__name__} pipeline:"
f" {error}"
),
"Error in"
f" {self.__class__.__name__} pipeline:"
f" {error}",
"red",
)
)

@ -88,9 +88,10 @@ class Kosmos(BaseMultiModalModel):
skip_special_tokens=True,
)[0]
processed_text, entities = (
self.processor.post_process_generation(generated_texts)
)
(
processed_text,
entities,
) = self.processor.post_process_generation(generated_texts)
return processed_text, entities

@ -115,12 +115,13 @@ class MedicalSAM:
if len(box_torch.shape) == 2:
box_torch = box_torch[:, None, :]
sparse_embeddings, dense_embeddings = (
self.model.prompt_encoder(
points=None,
boxes=box_torch,
masks=None,
)
(
sparse_embeddings,
dense_embeddings,
) = self.model.prompt_encoder(
points=None,
boxes=box_torch,
masks=None,
)
low_res_logits, _ = self.model.mask_decoder(

@ -226,10 +226,8 @@ class OpenAIFunctionCaller:
elif message["role"] == "tool":
print(
colored(
(
f"function ({message['name']}):"
f" {message['content']}\n"
),
f"function ({message['name']}):"
f" {message['content']}\n",
role_to_color[message["role"]],
)
)

@ -239,9 +239,9 @@ class BaseOpenAI(BaseLLM):
attributes["openai_api_base"] = self.openai_api_base
if self.openai_organization != "":
attributes["openai_organization"] = (
self.openai_organization
)
attributes[
"openai_organization"
] = self.openai_organization
if self.openai_proxy != "":
attributes["openai_proxy"] = self.openai_proxy

@ -144,10 +144,8 @@ class SSD1B:
# Handling exceptions and printing the errors details
print(
colored(
(
f"Error running SSD1B: {error} try optimizing"
" your api key and or try again"
),
f"Error running SSD1B: {error} try optimizing"
" your api key and or try again",
"red",
)
)
@ -234,20 +232,16 @@ class SSD1B:
except Exception as error:
print(
colored(
(
f"Error running SSD1B: {error} try"
" optimizing your api key and or try"
" again"
),
f"Error running SSD1B: {error} try"
" optimizing your api key and or try"
" again",
"red",
)
)
print(
colored(
(
"Error running SSD1B:"
f" {error.http_status}"
),
"Error running SSD1B:"
f" {error.http_status}",
"red",
)
)

@ -8,7 +8,7 @@ from pydantic import Field
from swarms.utils.serializable import Serializable
if TYPE_CHECKING:
from langchain.prompts.chat import ChatPromptTemplate
from langchain_core.prompts import ChatPromptTemplate
def get_buffer_string(
@ -86,7 +86,7 @@ class BaseMessage(Serializable):
return True
def __add__(self, other: Any) -> ChatPromptTemplate:
from langchain.prompts.chat import ChatPromptTemplate
from langchain_core.prompts import ChatPromptTemplate
prompt = ChatPromptTemplate(messages=[self])
return prompt + other

@ -62,6 +62,8 @@ def worker_tools_sop_promp(name: str, memory: str, time=time):
[{memory}]
Human: Determine which next command to use, and respond using the format specified above:
""".format(name=name, time=time, memory=memory)
""".format(
name=name, time=time, memory=memory
)
return str(out)

@ -471,10 +471,8 @@ class Agent:
try:
print(
colored(
(
"Initializing Autonomous Agent"
f" {self.agent_name}..."
),
"Initializing Autonomous Agent"
f" {self.agent_name}...",
"yellow",
)
)
@ -494,10 +492,8 @@ class Agent:
except Exception as error:
print(
colored(
(
"Error activating autonomous agent. Try"
" optimizing your parameters..."
),
"Error activating autonomous agent. Try"
" optimizing your parameters...",
"red",
)
)
@ -729,10 +725,8 @@ class Agent:
if self.autosave:
print(
colored(
(
"Autosaving agent state to"
f" {self.saved_state_path}"
),
"Autosaving agent state to"
f" {self.saved_state_path}",
"green",
)
)
@ -843,10 +837,8 @@ class Agent:
except Exception as error:
print(
colored(
(
f"Error running agent: {error} while running"
" concurrently"
),
f"Error running agent: {error} while running"
" concurrently",
"red",
)
)

@ -35,9 +35,9 @@ class ConcurrentWorkflow(BaseStructure):
max_loops: int = 1
max_workers: int = 5
autosave: bool = False
saved_state_filepath: Optional[str] = (
"runs/concurrent_workflow.json"
)
saved_state_filepath: Optional[
str
] = "runs/concurrent_workflow.json"
print_results: bool = False
return_results: bool = False
use_processes: bool = False

@ -317,10 +317,8 @@ class Conversation(BaseStructure):
elif message["role"] == "tool":
print(
colored(
(
f"function ({message['name']}):"
f" {message['content']}\n"
),
f"function ({message['name']}):"
f" {message['content']}\n",
role_to_color[message["role"]],
)
)

@ -317,9 +317,9 @@ class MultiAgentCollaboration:
"""Tracks and reports the performance of each agent"""
performance_data = {}
for agent in self.agents:
performance_data[agent.name] = (
agent.get_performance_metrics()
)
performance_data[
agent.name
] = agent.get_performance_metrics()
return performance_data
def set_interaction_rules(self, rules):

@ -133,10 +133,8 @@ class MultiProcessingWorkflow(BaseWorkflow):
except Exception as e:
logging.error(
(
"An error occurred during execution of task"
f" {task}: {str(e)}"
),
"An error occurred during execution of task"
f" {task}: {str(e)}",
exc_info=True,
)
return None

@ -119,10 +119,8 @@ class MultiThreadedWorkflow(BaseWorkflow):
self._autosave_task_result(task, result)
except Exception as e:
logging.error(
(
f"Attempt {attempt+1} failed for task"
f" {task}: {str(e)}"
),
f"Attempt {attempt+1} failed for task"
f" {task}: {str(e)}",
exc_info=True,
)
if attempt + 1 < self.retry_attempts:

@ -44,9 +44,9 @@ class SequentialWorkflow:
task_pool: List[Task] = None
max_loops: int = 1
autosave: bool = False
saved_state_filepath: Optional[str] = (
"sequential_workflow_state.json"
)
saved_state_filepath: Optional[
str
] = "sequential_workflow_state.json"
restore_state_filepath: Optional[str] = None
dashboard: bool = False
agents: List[Agent] = None

@ -84,7 +84,9 @@ class RedisSwarmRegistry(AbstractSwarm):
query = f"""
{match_query}
CREATE (a)-[r:joined]->(b) RETURN r
""".replace("\n", "")
""".replace(
"\n", ""
)
self.redis_graph.query(query)

@ -269,10 +269,8 @@ class ChildTool(BaseTool):
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
(
"callback_manager is deprecated. Please use"
" callbacks instead."
),
"callback_manager is deprecated. Please use"
" callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)

@ -36,11 +36,9 @@ def scrape_tool_func_docs(fn: Callable) -> str:
except Exception as error:
print(
colored(
(
f"Error scraping tool function docs {error} try"
" optimizing your inputs with different"
" variables and attempt once more."
),
f"Error scraping tool function docs {error} try"
" optimizing your inputs with different"
" variables and attempt once more.",
"red",
)
)

@ -14,7 +14,7 @@ def dataframe_to_text(
Returns:
str: The string representation of the DataFrame.
Example:
>>> df = pd.DataFrame({
... 'A': [1, 2, 3],

@ -55,11 +55,14 @@ def test_metrics_decorator_with_mocked_time(mocker):
return ["tok_1", "tok_2"]
metrics = decorated_func()
assert metrics == """
assert (
metrics
== """
Time to First Token: 5
Generation Latency: 20
Throughput: 0.1
"""
)
mocked_time.assert_any_call()

Loading…
Cancel
Save