code quality + new verison + fuyu fixes

pull/128/head
Kye 1 year ago
parent e0c712b19b
commit c279784458

Binary file not shown.

After

Width:  |  Height:  |  Size: 223 KiB

@ -1,4 +0,0 @@
from swarms.models import Fuyu
fuyu = Fuyu()
fuyu("Hello, my name is", "images/github-banner-swarms.png")

@ -0,0 +1,7 @@
from swarms.models.fuyu import Fuyu
img = "dalle3.jpeg"
fuyu = Fuyu()
fuyu("What is this image", img)

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "2.1.0"
version = "2.1.3"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
@ -37,6 +37,7 @@ duckduckgo-search = "*"
faiss-cpu = "*"
datasets = "*"
diffusers = "*"
accelerate = "*"
sentencepiece = "*"
wget = "*"
griptape = "*"

@ -0,0 +1,18 @@
#!/bin/bash
# Navigate to the directory containing the 'swarms' folder
# cd /path/to/your/code/directory
# Run autopep8 with max aggressiveness (-aaa) and in-place modification (-i)
# on all Python files (*.py) under the 'swarms' directory.
autopep8 --in-place --aggressive --aggressive --recursive --experimental swarms/
# Run black with default settings, since black does not have an aggressiveness level.
# Black will format all Python files it finds in the 'swarms' directory.
black --experimental-string-processing swarms/
# Run ruff on the 'swarms' directory.
# Add any additional flags if needed according to your version of ruff.
ruff swarms/
# If you want to ensure the script stops if any command fails, add 'set -e' at the top.

@ -28,6 +28,7 @@ google-generativeai
sentencepiece
duckduckgo-search
agent-protocol
accelerate
chromadb
tiktoken
open-interpreter

@ -1,3 +1,9 @@
from swarms.agents import *
from swarms.swarms import *
from swarms.structs import *
from swarms.models import * # import * only works when __all__ = [] is defined in __init__.py
from swarms.chunkers import *
from swarms.workers import *
import os
import warnings
@ -6,11 +12,3 @@ warnings.filterwarnings("ignore", category=UserWarning)
# disable tensorflow warnings
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from swarms.workers import *
from swarms.chunkers import *
from swarms.models import * # import * only works when __all__ = [] is defined in __init__.py
from swarms.structs import *
from swarms.swarms import *
from swarms.agents import *

@ -111,8 +111,7 @@ class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel): # type: ignore[misc]
[self.token_counter(doc) for doc in relevant_memory]
)
content_format = (
f"This reminds you of these events "
f"from your past:\n{relevant_memory}\n\n"
f"This reminds you of these events from your past:\n{relevant_memory}\n\n"
)
memory_message = SystemMessage(content=content_format)
used_tokens += self.token_counter(memory_message.content)
@ -233,14 +232,14 @@ class PromptGenerator:
formatted_response_format = json.dumps(self.response_format, indent=4)
prompt_string = (
f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
f"Commands:\n"
"Commands:\n"
f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
f"Performance Evaluation:\n"
"Performance Evaluation:\n"
f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
f"You should only respond in JSON format as described below "
"You should only respond in JSON format as described below "
f"\nResponse Format: \n{formatted_response_format} "
f"\nEnsure the response can be parsed by Python json.loads"
"\nEnsure the response can be parsed by Python json.loads"
)
return prompt_string
@ -419,13 +418,11 @@ class AutoGPT:
else:
result = (
f"Unknown command '{action.name}'. "
f"Please refer to the 'COMMANDS' list for available "
f"commands and only respond in the specified JSON format."
"Please refer to the 'COMMANDS' list for available "
"commands and only respond in the specified JSON format."
)
memory_to_add = (
f"Assistant Reply: {assistant_reply} " f"\nResult: {result} "
)
memory_to_add = f"Assistant Reply: {assistant_reply} \nResult: {result} "
if self.feedback_tool is not None:
feedback = f"\n{self.feedback_tool.run('Input: ')}"
if feedback in {"q", "stop"}:

@ -75,7 +75,8 @@ class OpenAI:
except openai_model.error.RateLimitError as e:
sleep_duratoin = os.environ.get("OPENAI_RATE_TIMEOUT", 30)
print(
f"{str(e)}, sleep for {sleep_duratoin}s, set it by env OPENAI_RATE_TIMEOUT"
f"{str(e)}, sleep for {sleep_duratoin}s, set it by env"
" OPENAI_RATE_TIMEOUT"
)
time.sleep(sleep_duratoin)

@ -53,10 +53,12 @@ def record(agent_name: str, autotab_ext_path: Optional[str] = None):
file.write(data)
print(
"\033[34mYou have the Python debugger open, you can run commands in it like you would in a normal Python shell.\033[0m"
"\033[34mYou have the Python debugger open, you can run commands in it like you"
" would in a normal Python shell.\033[0m"
)
print(
"\033[34mTo exit, type 'q' and press enter. For a list of commands type '?' and press enter.\033[0m"
"\033[34mTo exit, type 'q' and press enter. For a list of commands type '?' and"
" press enter.\033[0m"
)
breakpoint()
@ -116,7 +118,8 @@ def open_plugin_and_login(driver: AutotabChromeDriver):
raise Exception("Invalid API key")
else:
raise Exception(
f"Error {response.status_code} from backend while logging you in with your API key: {response.text}"
f"Error {response.status_code} from backend while logging you in"
f" with your API key: {response.text}"
)
cookie["name"] = cookie["key"]
del cookie["key"]
@ -144,7 +147,8 @@ def get_driver(
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox") # Necessary for running
options.add_argument(
"--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
"--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36"
)
options.add_argument("--enable-webgl")
options.add_argument("--enable-3d-apis")
@ -371,7 +375,10 @@ def _login_with_google(driver, url: str, google_credentials: SiteCredentials):
)
main_window = driver.current_window_handle
xpath = "//*[contains(text(), 'Continue with Google') or contains(text(), 'Sign in with Google') or contains(@title, 'Sign in with Google')]"
xpath = (
"//*[contains(text(), 'Continue with Google') or contains(text(), 'Sign in with"
" Google') or contains(@title, 'Sign in with Google')]"
)
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, xpath)))
driver.find_element(
@ -496,17 +503,17 @@ google_credentials:
# Optional, specify alternative accounts to use with Google login on a per-service basis
- email: you@gmail.com # Credentials without a name use email as key
password: ...
credentials:
notion.so:
notion.so:
alts:
- notion.com
login_with_google_account: default
figma.com:
email: ...
password: ...
airtable.com:
login_with_google_account: you@gmail.com
"""

@ -229,12 +229,14 @@ class Agent:
if len(replacements) > 1:
names = "\n".join([f"- {n}: {t}" for n, t in replacements.items()])
logger.warning(
f"The following tools have been replaced by the ones provided in `additional_tools`:\n{names}."
"The following tools have been replaced by the ones provided in"
f" `additional_tools`:\n{names}."
)
elif len(replacements) == 1:
name = list(replacements.keys())[0]
logger.warning(
f"{name} has been replaced by {replacements[name]} as provided in `additional_tools`."
f"{name} has been replaced by {replacements[name]} as provided in"
" `additional_tools`."
)
self.prepare_for_new_chat()
@ -425,9 +427,9 @@ class HFAgent(Agent):
api_key = os.environ.get("OPENAI_API_KEY", None)
if api_key is None:
raise ValueError(
"You need an openai key to use `OpenAIAgent`. You can get one here: Get one here "
"https://openai.com/api/`. If you have one, set it in your env with `os.environ['OPENAI_API_KEY'] = "
"xxx."
"You need an openai key to use `OpenAIAgent`. You can get one here: Get"
" one here https://openai.com/api/`. If you have one, set it in your"
" env with `os.environ['OPENAI_API_KEY'] = xxx."
)
else:
openai.api_key = api_key
@ -540,8 +542,9 @@ class AzureOpenAI(Agent):
api_key = os.environ.get("AZURE_OPENAI_API_KEY", None)
if api_key is None:
raise ValueError(
"You need an Azure openAI key to use `AzureOpenAIAgent`. If you have one, set it in your env with "
"`os.environ['AZURE_OPENAI_API_KEY'] = xxx."
"You need an Azure openAI key to use `AzureOpenAIAgent`. If you have"
" one, set it in your env with `os.environ['AZURE_OPENAI_API_KEY'] ="
" xxx."
)
else:
openai.api_key = api_key
@ -549,8 +552,9 @@ class AzureOpenAI(Agent):
resource_name = os.environ.get("AZURE_OPENAI_RESOURCE_NAME", None)
if resource_name is None:
raise ValueError(
"You need a resource_name to use `AzureOpenAIAgent`. If you have one, set it in your env with "
"`os.environ['AZURE_OPENAI_RESOURCE_NAME'] = xxx."
"You need a resource_name to use `AzureOpenAIAgent`. If you have one,"
" set it in your env with `os.environ['AZURE_OPENAI_RESOURCE_NAME'] ="
" xxx."
)
else:
openai.api_base = f"https://{resource_name}.openai.azure.com"

@ -270,10 +270,12 @@ class InstructPix2Pix:
@prompts(
name="Instruct Image Using Text",
description="useful when you want to the style of the image to be like the text. "
"like: make it look like a painting. or make it like a robot. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the text. ",
description=(
"useful when you want to the style of the image to be like the text. "
"like: make it look like a painting. or make it like a robot. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the text. "
),
)
def inference(self, inputs):
"""Change style of image."""
@ -286,8 +288,8 @@ class InstructPix2Pix:
updated_image_path = get_new_image_name(image_path, func_name="pix2pix")
image.save(updated_image_path)
print(
f"\nProcessed InstructPix2Pix, Input Image: {image_path}, Instruct Text: {text}, "
f"Output Image: {updated_image_path}"
f"\nProcessed InstructPix2Pix, Input Image: {image_path}, Instruct Text:"
f" {text}, Output Image: {updated_image_path}"
)
return updated_image_path
@ -309,9 +311,12 @@ class Text2Image:
@prompts(
name="Generate Image From User Input Text",
description="useful when you want to generate an image from a user input text and save it to a file. "
"like: generate an image of an object or something, or generate an image that includes some objects. "
"The input to this tool should be a string, representing the text used to generate image. ",
description=(
"useful when you want to generate an image from a user input text and save"
" it to a file. like: generate an image of an object or something, or"
" generate an image that includes some objects. The input to this tool"
" should be a string, representing the text used to generate image. "
),
)
def inference(self, text):
image_filename = os.path.join("image", f"{str(uuid.uuid4())[:8]}.png")
@ -319,7 +324,8 @@ class Text2Image:
image = self.pipe(prompt, negative_prompt=self.n_prompt).images[0]
image.save(image_filename)
print(
f"\nProcessed Text2Image, Input Text: {text}, Output Image: {image_filename}"
f"\nProcessed Text2Image, Input Text: {text}, Output Image:"
f" {image_filename}"
)
return image_filename
@ -338,8 +344,11 @@ class ImageCaptioning:
@prompts(
name="Get Photo Description",
description="useful when you want to know what is inside the photo. receives image_path as input. "
"The input to this tool should be a string, representing the image_path. ",
description=(
"useful when you want to know what is inside the photo. receives image_path"
" as input. The input to this tool should be a string, representing the"
" image_path. "
),
)
def inference(self, image_path):
inputs = self.processor(Image.open(image_path), return_tensors="pt").to(
@ -348,7 +357,8 @@ class ImageCaptioning:
out = self.model.generate(**inputs)
captions = self.processor.decode(out[0], skip_special_tokens=True)
print(
f"\nProcessed ImageCaptioning, Input Image: {image_path}, Output Text: {captions}"
f"\nProcessed ImageCaptioning, Input Image: {image_path}, Output Text:"
f" {captions}"
)
return captions
@ -361,10 +371,12 @@ class Image2Canny:
@prompts(
name="Edge Detection On Image",
description="useful when you want to detect the edge of the image. "
"like: detect the edges of this image, or canny detection on image, "
"or perform edge detection on this image, or detect the canny image of this image. "
"The input to this tool should be a string, representing the image_path",
description=(
"useful when you want to detect the edge of the image. like: detect the"
" edges of this image, or canny detection on image, or perform edge"
" detection on this image, or detect the canny image of this image. The"
" input to this tool should be a string, representing the image_path"
),
)
def inference(self, inputs):
image = Image.open(inputs)
@ -376,7 +388,8 @@ class Image2Canny:
updated_image_path = get_new_image_name(inputs, func_name="edge")
canny.save(updated_image_path)
print(
f"\nProcessed Image2Canny, Input Image: {inputs}, Output Text: {updated_image_path}"
f"\nProcessed Image2Canny, Input Image: {inputs}, Output Text:"
f" {updated_image_path}"
)
return updated_image_path
@ -410,11 +423,14 @@ class CannyText2Image:
@prompts(
name="Generate Image Condition On Canny Image",
description="useful when you want to generate a new real image from both the user description and a canny image."
" like: generate a real image of a object or something from this canny image,"
" or generate a new real image of a object or something from this edge image. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the user description. ",
description=(
"useful when you want to generate a new real image from both the user"
" description and a canny image. like: generate a real image of a object or"
" something from this canny image, or generate a new real image of a object"
" or something from this edge image. The input to this tool should be a"
" comma separated string of two, representing the image_path and the user"
" description. "
),
)
def inference(self, inputs):
image_path, instruct_text = inputs.split(",")[0], ",".join(
@ -435,8 +451,8 @@ class CannyText2Image:
updated_image_path = get_new_image_name(image_path, func_name="canny2image")
image.save(updated_image_path)
print(
f"\nProcessed CannyText2Image, Input Canny: {image_path}, Input Text: {instruct_text}, "
f"Output Text: {updated_image_path}"
f"\nProcessed CannyText2Image, Input Canny: {image_path}, Input Text:"
f" {instruct_text}, Output Text: {updated_image_path}"
)
return updated_image_path
@ -448,10 +464,13 @@ class Image2Line:
@prompts(
name="Line Detection On Image",
description="useful when you want to detect the straight line of the image. "
"like: detect the straight lines of this image, or straight line detection on image, "
"or perform straight line detection on this image, or detect the straight line image of this image. "
"The input to this tool should be a string, representing the image_path",
description=(
"useful when you want to detect the straight line of the image. like:"
" detect the straight lines of this image, or straight line detection on"
" image, or perform straight line detection on this image, or detect the"
" straight line image of this image. The input to this tool should be a"
" string, representing the image_path"
),
)
def inference(self, inputs):
image = Image.open(inputs)
@ -459,7 +478,8 @@ class Image2Line:
updated_image_path = get_new_image_name(inputs, func_name="line-of")
mlsd.save(updated_image_path)
print(
f"\nProcessed Image2Line, Input Image: {inputs}, Output Line: {updated_image_path}"
f"\nProcessed Image2Line, Input Image: {inputs}, Output Line:"
f" {updated_image_path}"
)
return updated_image_path
@ -492,12 +512,14 @@ class LineText2Image:
@prompts(
name="Generate Image Condition On Line Image",
description="useful when you want to generate a new real image from both the user description "
"and a straight line image. "
"like: generate a real image of a object or something from this straight line image, "
"or generate a new real image of a object or something from this straight lines. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the user description. ",
description=(
"useful when you want to generate a new real image from both the user"
" description and a straight line image. like: generate a real image of a"
" object or something from this straight line image, or generate a new real"
" image of a object or something from this straight lines. The input to"
" this tool should be a comma separated string of two, representing the"
" image_path and the user description. "
),
)
def inference(self, inputs):
image_path, instruct_text = inputs.split(",")[0], ",".join(
@ -518,8 +540,8 @@ class LineText2Image:
updated_image_path = get_new_image_name(image_path, func_name="line2image")
image.save(updated_image_path)
print(
f"\nProcessed LineText2Image, Input Line: {image_path}, Input Text: {instruct_text}, "
f"Output Text: {updated_image_path}"
f"\nProcessed LineText2Image, Input Line: {image_path}, Input Text:"
f" {instruct_text}, Output Text: {updated_image_path}"
)
return updated_image_path
@ -531,10 +553,13 @@ class Image2Hed:
@prompts(
name="Hed Detection On Image",
description="useful when you want to detect the soft hed boundary of the image. "
"like: detect the soft hed boundary of this image, or hed boundary detection on image, "
"or perform hed boundary detection on this image, or detect soft hed boundary image of this image. "
"The input to this tool should be a string, representing the image_path",
description=(
"useful when you want to detect the soft hed boundary of the image. like:"
" detect the soft hed boundary of this image, or hed boundary detection on"
" image, or perform hed boundary detection on this image, or detect soft"
" hed boundary image of this image. The input to this tool should be a"
" string, representing the image_path"
),
)
def inference(self, inputs):
image = Image.open(inputs)
@ -542,7 +567,8 @@ class Image2Hed:
updated_image_path = get_new_image_name(inputs, func_name="hed-boundary")
hed.save(updated_image_path)
print(
f"\nProcessed Image2Hed, Input Image: {inputs}, Output Hed: {updated_image_path}"
f"\nProcessed Image2Hed, Input Image: {inputs}, Output Hed:"
f" {updated_image_path}"
)
return updated_image_path
@ -575,12 +601,14 @@ class HedText2Image:
@prompts(
name="Generate Image Condition On Soft Hed Boundary Image",
description="useful when you want to generate a new real image from both the user description "
"and a soft hed boundary image. "
"like: generate a real image of a object or something from this soft hed boundary image, "
"or generate a new real image of a object or something from this hed boundary. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the user description",
description=(
"useful when you want to generate a new real image from both the user"
" description and a soft hed boundary image. like: generate a real image of"
" a object or something from this soft hed boundary image, or generate a"
" new real image of a object or something from this hed boundary. The input"
" to this tool should be a comma separated string of two, representing the"
" image_path and the user description"
),
)
def inference(self, inputs):
image_path, instruct_text = inputs.split(",")[0], ",".join(
@ -601,8 +629,8 @@ class HedText2Image:
updated_image_path = get_new_image_name(image_path, func_name="hed2image")
image.save(updated_image_path)
print(
f"\nProcessed HedText2Image, Input Hed: {image_path}, Input Text: {instruct_text}, "
f"Output Image: {updated_image_path}"
f"\nProcessed HedText2Image, Input Hed: {image_path}, Input Text:"
f" {instruct_text}, Output Image: {updated_image_path}"
)
return updated_image_path
@ -614,10 +642,12 @@ class Image2Scribble:
@prompts(
name="Sketch Detection On Image",
description="useful when you want to generate a scribble of the image. "
"like: generate a scribble of this image, or generate a sketch from this image, "
"detect the sketch from this image. "
"The input to this tool should be a string, representing the image_path",
description=(
"useful when you want to generate a scribble of the image. like: generate a"
" scribble of this image, or generate a sketch from this image, detect the"
" sketch from this image. The input to this tool should be a string,"
" representing the image_path"
),
)
def inference(self, inputs):
image = Image.open(inputs)
@ -625,7 +655,8 @@ class Image2Scribble:
updated_image_path = get_new_image_name(inputs, func_name="scribble")
scribble.save(updated_image_path)
print(
f"\nProcessed Image2Scribble, Input Image: {inputs}, Output Scribble: {updated_image_path}"
f"\nProcessed Image2Scribble, Input Image: {inputs}, Output Scribble:"
f" {updated_image_path}"
)
return updated_image_path
@ -659,10 +690,12 @@ class ScribbleText2Image:
@prompts(
name="Generate Image Condition On Sketch Image",
description="useful when you want to generate a new real image from both the user description and "
"a scribble image or a sketch image. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the user description",
description=(
"useful when you want to generate a new real image from both the user"
" description and a scribble image or a sketch image. The input to this"
" tool should be a comma separated string of two, representing the"
" image_path and the user description"
),
)
def inference(self, inputs):
image_path, instruct_text = inputs.split(",")[0], ",".join(
@ -683,8 +716,8 @@ class ScribbleText2Image:
updated_image_path = get_new_image_name(image_path, func_name="scribble2image")
image.save(updated_image_path)
print(
f"\nProcessed ScribbleText2Image, Input Scribble: {image_path}, Input Text: {instruct_text}, "
f"Output Image: {updated_image_path}"
f"\nProcessed ScribbleText2Image, Input Scribble: {image_path}, Input Text:"
f" {instruct_text}, Output Image: {updated_image_path}"
)
return updated_image_path
@ -696,9 +729,11 @@ class Image2Pose:
@prompts(
name="Pose Detection On Image",
description="useful when you want to detect the human pose of the image. "
"like: generate human poses of this image, or generate a pose image from this image. "
"The input to this tool should be a string, representing the image_path",
description=(
"useful when you want to detect the human pose of the image. like: generate"
" human poses of this image, or generate a pose image from this image. The"
" input to this tool should be a string, representing the image_path"
),
)
def inference(self, inputs):
image = Image.open(inputs)
@ -706,7 +741,8 @@ class Image2Pose:
updated_image_path = get_new_image_name(inputs, func_name="human-pose")
pose.save(updated_image_path)
print(
f"\nProcessed Image2Pose, Input Image: {inputs}, Output Pose: {updated_image_path}"
f"\nProcessed Image2Pose, Input Image: {inputs}, Output Pose:"
f" {updated_image_path}"
)
return updated_image_path
@ -742,12 +778,13 @@ class PoseText2Image:
@prompts(
name="Generate Image Condition On Pose Image",
description="useful when you want to generate a new real image from both the user description "
"and a human pose image. "
"like: generate a real image of a human from this human pose image, "
"or generate a new real image of a human from this pose. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the user description",
description=(
"useful when you want to generate a new real image from both the user"
" description and a human pose image. like: generate a real image of a"
" human from this human pose image, or generate a new real image of a human"
" from this pose. The input to this tool should be a comma separated string"
" of two, representing the image_path and the user description"
),
)
def inference(self, inputs):
image_path, instruct_text = inputs.split(",")[0], ",".join(
@ -768,8 +805,8 @@ class PoseText2Image:
updated_image_path = get_new_image_name(image_path, func_name="pose2image")
image.save(updated_image_path)
print(
f"\nProcessed PoseText2Image, Input Pose: {image_path}, Input Text: {instruct_text}, "
f"Output Image: {updated_image_path}"
f"\nProcessed PoseText2Image, Input Pose: {image_path}, Input Text:"
f" {instruct_text}, Output Image: {updated_image_path}"
)
return updated_image_path
@ -802,11 +839,14 @@ class SegText2Image:
@prompts(
name="Generate Image Condition On Segmentations",
description="useful when you want to generate a new real image from both the user description and segmentations. "
"like: generate a real image of a object or something from this segmentation image, "
"or generate a new real image of a object or something from these segmentations. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the user description",
description=(
"useful when you want to generate a new real image from both the user"
" description and segmentations. like: generate a real image of a object or"
" something from this segmentation image, or generate a new real image of a"
" object or something from these segmentations. The input to this tool"
" should be a comma separated string of two, representing the image_path"
" and the user description"
),
)
def inference(self, inputs):
image_path, instruct_text = inputs.split(",")[0], ",".join(
@ -827,8 +867,8 @@ class SegText2Image:
updated_image_path = get_new_image_name(image_path, func_name="segment2image")
image.save(updated_image_path)
print(
f"\nProcessed SegText2Image, Input Seg: {image_path}, Input Text: {instruct_text}, "
f"Output Image: {updated_image_path}"
f"\nProcessed SegText2Image, Input Seg: {image_path}, Input Text:"
f" {instruct_text}, Output Image: {updated_image_path}"
)
return updated_image_path
@ -840,9 +880,12 @@ class Image2Depth:
@prompts(
name="Predict Depth On Image",
description="useful when you want to detect depth of the image. like: generate the depth from this image, "
"or detect the depth map on this image, or predict the depth for this image. "
"The input to this tool should be a string, representing the image_path",
description=(
"useful when you want to detect depth of the image. like: generate the"
" depth from this image, or detect the depth map on this image, or predict"
" the depth for this image. The input to this tool should be a string,"
" representing the image_path"
),
)
def inference(self, inputs):
image = Image.open(inputs)
@ -854,7 +897,8 @@ class Image2Depth:
updated_image_path = get_new_image_name(inputs, func_name="depth")
depth.save(updated_image_path)
print(
f"\nProcessed Image2Depth, Input Image: {inputs}, Output Depth: {updated_image_path}"
f"\nProcessed Image2Depth, Input Image: {inputs}, Output Depth:"
f" {updated_image_path}"
)
return updated_image_path
@ -888,11 +932,14 @@ class DepthText2Image:
@prompts(
name="Generate Image Condition On Depth",
description="useful when you want to generate a new real image from both the user description and depth image. "
"like: generate a real image of a object or something from this depth image, "
"or generate a new real image of a object or something from the depth map. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the user description",
description=(
"useful when you want to generate a new real image from both the user"
" description and depth image. like: generate a real image of a object or"
" something from this depth image, or generate a new real image of a object"
" or something from the depth map. The input to this tool should be a comma"
" separated string of two, representing the image_path and the user"
" description"
),
)
def inference(self, inputs):
image_path, instruct_text = inputs.split(",")[0], ",".join(
@ -913,8 +960,8 @@ class DepthText2Image:
updated_image_path = get_new_image_name(image_path, func_name="depth2image")
image.save(updated_image_path)
print(
f"\nProcessed DepthText2Image, Input Depth: {image_path}, Input Text: {instruct_text}, "
f"Output Image: {updated_image_path}"
f"\nProcessed DepthText2Image, Input Depth: {image_path}, Input Text:"
f" {instruct_text}, Output Image: {updated_image_path}"
)
return updated_image_path
@ -929,9 +976,11 @@ class Image2Normal:
@prompts(
name="Predict Normal Map On Image",
description="useful when you want to detect norm map of the image. "
"like: generate normal map from this image, or predict normal map of this image. "
"The input to this tool should be a string, representing the image_path",
description=(
"useful when you want to detect norm map of the image. like: generate"
" normal map from this image, or predict normal map of this image. The"
" input to this tool should be a string, representing the image_path"
),
)
def inference(self, inputs):
image = Image.open(inputs)
@ -954,7 +1003,8 @@ class Image2Normal:
updated_image_path = get_new_image_name(inputs, func_name="normal-map")
image.save(updated_image_path)
print(
f"\nProcessed Image2Normal, Input Image: {inputs}, Output Depth: {updated_image_path}"
f"\nProcessed Image2Normal, Input Image: {inputs}, Output Depth:"
f" {updated_image_path}"
)
return updated_image_path
@ -988,11 +1038,14 @@ class NormalText2Image:
@prompts(
name="Generate Image Condition On Normal Map",
description="useful when you want to generate a new real image from both the user description and normal map. "
"like: generate a real image of a object or something from this normal map, "
"or generate a new real image of a object or something from the normal map. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the user description",
description=(
"useful when you want to generate a new real image from both the user"
" description and normal map. like: generate a real image of a object or"
" something from this normal map, or generate a new real image of a object"
" or something from the normal map. The input to this tool should be a"
" comma separated string of two, representing the image_path and the user"
" description"
),
)
def inference(self, inputs):
image_path, instruct_text = inputs.split(",")[0], ",".join(
@ -1013,8 +1066,8 @@ class NormalText2Image:
updated_image_path = get_new_image_name(image_path, func_name="normal2image")
image.save(updated_image_path)
print(
f"\nProcessed NormalText2Image, Input Normal: {image_path}, Input Text: {instruct_text}, "
f"Output Image: {updated_image_path}"
f"\nProcessed NormalText2Image, Input Normal: {image_path}, Input Text:"
f" {instruct_text}, Output Image: {updated_image_path}"
)
return updated_image_path
@ -1031,9 +1084,12 @@ class VisualQuestionAnswering:
@prompts(
name="Answer Question About The Image",
description="useful when you need an answer for a question based on an image. "
"like: what is the background color of the last image, how many cats in this figure, what is in this figure. "
"The input to this tool should be a comma separated string of two, representing the image_path and the question",
description=(
"useful when you need an answer for a question based on an image. like:"
" what is the background color of the last image, how many cats in this"
" figure, what is in this figure. The input to this tool should be a comma"
" separated string of two, representing the image_path and the question"
),
)
def inference(self, inputs):
image_path, question = inputs.split(",")[0], ",".join(inputs.split(",")[1:])
@ -1044,8 +1100,8 @@ class VisualQuestionAnswering:
out = self.model.generate(**inputs)
answer = self.processor.decode(out[0], skip_special_tokens=True)
print(
f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}, Input Question: {question}, "
f"Output Answer: {answer}"
f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}, Input"
f" Question: {question}, Output Answer: {answer}"
)
return answer
@ -1245,12 +1301,13 @@ class Segmenting:
@prompts(
name="Segment the Image",
description="useful when you want to segment all the part of the image, but not segment a certain object."
"like: segment all the object in this image, or generate segmentations on this image, "
"or segment the image,"
"or perform segmentation on this image, "
"or segment all the object in this image."
"The input to this tool should be a string, representing the image_path",
description=(
"useful when you want to segment all the part of the image, but not segment"
" a certain object.like: segment all the object in this image, or generate"
" segmentations on this image, or segment the image,or perform segmentation"
" on this image, or segment all the object in this image.The input to this"
" tool should be a string, representing the image_path"
),
)
def inference_all(self, image_path):
image = cv2.imread(image_path)
@ -1401,9 +1458,12 @@ class Text2Box:
@prompts(
name="Detect the Give Object",
description="useful when you only want to detect or find out given objects in the picture"
"The input to this tool should be a comma separated string of two, "
"representing the image_path, the text description of the object to be found",
description=(
"useful when you only want to detect or find out given objects in the"
" pictureThe input to this tool should be a comma separated string of two,"
" representing the image_path, the text description of the object to be"
" found"
),
)
def inference(self, inputs):
image_path, det_prompt = inputs.split(",")
@ -1427,8 +1487,8 @@ class Text2Box:
updated_image = image_with_box.resize(size)
updated_image.save(updated_image_path)
print(
f"\nProcessed ObejectDetecting, Input Image: {image_path}, Object to be Detect {det_prompt}, "
f"Output Image: {updated_image_path}"
f"\nProcessed ObejectDetecting, Input Image: {image_path}, Object to be"
f" Detect {det_prompt}, Output Image: {updated_image_path}"
)
return updated_image_path
@ -1483,7 +1543,8 @@ class InfinityOutPainting:
out = self.ImageVQA.model.generate(**inputs)
answer = self.ImageVQA.processor.decode(out[0], skip_special_tokens=True)
print(
f"\nProcessed VisualQuestionAnswering, Input Question: {question}, Output Answer: {answer}"
f"\nProcessed VisualQuestionAnswering, Input Question: {question}, Output"
f" Answer: {answer}"
)
return answer
@ -1499,9 +1560,9 @@ class InfinityOutPainting:
def check_prompt(self, prompt):
check = (
f"Here is a paragraph with adjectives. "
"Here is a paragraph with adjectives. "
f"{prompt} "
f"Please change all plural forms in the adjectives to singular forms. "
"Please change all plural forms in the adjectives to singular forms. "
)
return self.llm(check)
@ -1512,13 +1573,12 @@ class InfinityOutPainting:
)
style = self.get_BLIP_vqa(image, "what is the style of this image")
imagine_prompt = (
f"let's pretend you are an excellent painter and now "
f"there is an incomplete painting with {BLIP_caption} in the center, "
f"please imagine the complete painting and describe it"
f"you should consider the background color is {background_color}, the style is {style}"
f"You should make the painting as vivid and realistic as possible"
f"You can not use words like painting or picture"
f"and you should use no more than 50 words to describe it"
"let's pretend you are an excellent painter and now there is an incomplete"
f" painting with {BLIP_caption} in the center, please imagine the complete"
" painting and describe ityou should consider the background color is"
f" {background_color}, the style is {style}You should make the painting as"
" vivid and realistic as possibleYou can not use words like painting or"
" pictureand you should use no more than 50 words to describe it"
)
caption = self.llm(imagine_prompt) if imagine else BLIP_caption
caption = self.check_prompt(caption)
@ -1580,9 +1640,12 @@ class InfinityOutPainting:
@prompts(
name="Extend An Image",
description="useful when you need to extend an image into a larger image."
"like: extend the image into a resolution of 2048x1024, extend the image into 2048x1024. "
"The input to this tool should be a comma separated string of two, representing the image_path and the resolution of widthxheight",
description=(
"useful when you need to extend an image into a larger image.like: extend"
" the image into a resolution of 2048x1024, extend the image into"
" 2048x1024. The input to this tool should be a comma separated string of"
" two, representing the image_path and the resolution of widthxheight"
),
)
def inference(self, inputs):
image_path, resolution = inputs.split(",")
@ -1594,8 +1657,8 @@ class InfinityOutPainting:
updated_image_path = get_new_image_name(image_path, func_name="outpainting")
out_painted_image.save(updated_image_path)
print(
f"\nProcessed InfinityOutPainting, Input Image: {image_path}, Input Resolution: {resolution}, "
f"Output Image: {updated_image_path}"
f"\nProcessed InfinityOutPainting, Input Image: {image_path}, Input"
f" Resolution: {resolution}, Output Image: {updated_image_path}"
)
return updated_image_path
@ -1610,12 +1673,13 @@ class ObjectSegmenting:
@prompts(
name="Segment the given object",
description="useful when you only want to segment the certain objects in the picture"
"according to the given text"
"like: segment the cat,"
"or can you segment an obeject for me"
"The input to this tool should be a comma separated string of two, "
"representing the image_path, the text description of the object to be found",
description=(
"useful when you only want to segment the certain objects in the"
" pictureaccording to the given textlike: segment the cat,or can you"
" segment an obeject for meThe input to this tool should be a comma"
" separated string of two, representing the image_path, the text"
" description of the object to be found"
),
)
def inference(self, inputs):
image_path, det_prompt = inputs.split(",")
@ -1627,8 +1691,8 @@ class ObjectSegmenting:
image_pil, image_path, boxes_filt, pred_phrases
)
print(
f"\nProcessed ObejectSegmenting, Input Image: {image_path}, Object to be Segment {det_prompt}, "
f"Output Image: {updated_image_path}"
f"\nProcessed ObejectSegmenting, Input Image: {image_path}, Object to be"
f" Segment {det_prompt}, Output Image: {updated_image_path}"
)
return updated_image_path
@ -1710,10 +1774,12 @@ class ImageEditing:
@prompts(
name="Remove Something From The Photo",
description="useful when you want to remove and object or something from the photo "
"from its description or location. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the object need to be removed. ",
description=(
"useful when you want to remove and object or something from the photo "
"from its description or location. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the object need to be removed. "
),
)
def inference_remove(self, inputs):
image_path, to_be_removed_txt = inputs.split(",")[0], ",".join(
@ -1725,10 +1791,12 @@ class ImageEditing:
@prompts(
name="Replace Something From The Photo",
description="useful when you want to replace an object from the object description or "
"location with another object from its description. "
"The input to this tool should be a comma separated string of three, "
"representing the image_path, the object to be replaced, the object to be replaced with ",
description=(
"useful when you want to replace an object from the object description or"
" location with another object from its description. The input to this tool"
" should be a comma separated string of three, representing the image_path,"
" the object to be replaced, the object to be replaced with "
),
)
def inference_replace_sam(self, inputs):
image_path, to_be_replaced_txt, replace_with_txt = inputs.split(",")
@ -1758,8 +1826,9 @@ class ImageEditing:
updated_image = updated_image.resize(image_pil.size)
updated_image.save(updated_image_path)
print(
f"\nProcessed ImageEditing, Input Image: {image_path}, Replace {to_be_replaced_txt} to {replace_with_txt}, "
f"Output Image: {updated_image_path}"
f"\nProcessed ImageEditing, Input Image: {image_path}, Replace"
f" {to_be_replaced_txt} to {replace_with_txt}, Output Image:"
f" {updated_image_path}"
)
return updated_image_path
@ -1782,8 +1851,10 @@ class BackgroundRemoving:
@prompts(
name="Remove the background",
description="useful when you want to extract the object or remove the background,"
"the input should be a string image_path",
description=(
"useful when you want to extract the object or remove the background,"
"the input should be a string image_path"
),
)
def inference(self, image_path):
"""
@ -1833,7 +1904,8 @@ class MultiModalVisualAgent:
if "ImageCaptioning" not in load_dict:
raise ValueError(
"You have to load ImageCaptioning as a basic function for MultiModalVisualAgent"
"You have to load ImageCaptioning as a basic function for"
" MultiModalVisualAgent"
)
self.models = {}
@ -1944,10 +2016,21 @@ class MultiModalVisualAgent:
description = self.models["ImageCaptioning"].inference(image_filename)
if lang == "Chinese":
Human_prompt = f'\nHuman: 提供一张名为 {image_filename}的图片。它的描述是: {description}。 这些信息帮助你理解这个图像,但是你应该使用工具来完成下面的任务,而不是直接从我的描述中想象。 如果你明白了, 说 "收到". \n'
Human_prompt = (
f"\nHuman: 提供一张名为 {image_filename}的图片。它的描述是:"
f" {description}。 这些信息帮助你理解这个图像,"
"但是你应该使用工具来完成下面的任务,而不是直接从我的描述中想象。"
' 如果你明白了, 说 "收到". \n'
)
AI_prompt = "收到。 "
else:
Human_prompt = f'\nHuman: provide a figure named {image_filename}. The description is: {description}. This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say "Received". \n'
Human_prompt = (
f"\nHuman: provide a figure named {image_filename}. The description is:"
f" {description}. This information helps you to understand this image,"
" but you should use tools to finish following tasks, rather than"
" directly imagine from my description. If you understand, say"
' "Received". \n'
)
AI_prompt = "Received. "
self.agent.memory.buffer = (

@ -163,7 +163,9 @@ def get_tools(product_catalog):
Tool(
name="ProductSearch",
func=knowledge_base.run,
description="useful for when you need to answer questions about product information",
description=(
"useful for when you need to answer questions about product information"
),
),
# omnimodal agent
]
@ -224,7 +226,10 @@ class SalesConvoOutputParser(AgentOutputParser):
# TODO - this is not entirely reliable, sometimes results in an error.
return AgentFinish(
{
"output": "I apologize, I was unable to find the answer to your question. Is there anything else I can help with?"
"output": (
"I apologize, I was unable to find the answer to your question."
" Is there anything else I can help with?"
)
},
text,
)
@ -250,21 +255,62 @@ class ProfitPilot(Chain, BaseModel):
use_tools: bool = False
conversation_stage_dict: Dict = {
"1": "Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional. Your greeting should be welcoming. Always clarify in your greeting the reason why you are contacting the prospect.",
"2": "Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.",
"3": "Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.",
"4": "Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.",
"5": "Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points.",
"6": "Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.",
"7": "Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits.",
"1": (
"Introduction: Start the conversation by introducing yourself and your"
" company. Be polite and respectful while keeping the tone of the"
" conversation professional. Your greeting should be welcoming. Always"
" clarify in your greeting the reason why you are contacting the prospect."
),
"2": (
"Qualification: Qualify the prospect by confirming if they are the right"
" person to talk to regarding your product/service. Ensure that they have"
" the authority to make purchasing decisions."
),
"3": (
"Value proposition: Briefly explain how your product/service can benefit"
" the prospect. Focus on the unique selling points and value proposition of"
" your product/service that sets it apart from competitors."
),
"4": (
"Needs analysis: Ask open-ended questions to uncover the prospect's needs"
" and pain points. Listen carefully to their responses and take notes."
),
"5": (
"Solution presentation: Based on the prospect's needs, present your"
" product/service as the solution that can address their pain points."
),
"6": (
"Objection handling: Address any objections that the prospect may have"
" regarding your product/service. Be prepared to provide evidence or"
" testimonials to support your claims."
),
"7": (
"Close: Ask for the sale by proposing a next step. This could be a demo, a"
" trial or a meeting with decision-makers. Ensure to summarize what has"
" been discussed and reiterate the benefits."
),
}
salesperson_name: str = "Ted Lasso"
salesperson_role: str = "Business Development Representative"
company_name: str = "Sleep Haven"
company_business: str = "Sleep Haven is a premium mattress company that provides customers with the most comfortable and supportive sleeping experience possible. We offer a range of high-quality mattresses, pillows, and bedding accessories that are designed to meet the unique needs of our customers."
company_values: str = "Our mission at Sleep Haven is to help people achieve a better night's sleep by providing them with the best possible sleep solutions. We believe that quality sleep is essential to overall health and well-being, and we are committed to helping our customers achieve optimal sleep by offering exceptional products and customer service."
conversation_purpose: str = "find out whether they are looking to achieve better sleep via buying a premier mattress."
company_business: str = (
"Sleep Haven is a premium mattress company that provides customers with the"
" most comfortable and supportive sleeping experience possible. We offer a"
" range of high-quality mattresses, pillows, and bedding accessories that are"
" designed to meet the unique needs of our customers."
)
company_values: str = (
"Our mission at Sleep Haven is to help people achieve a better night's sleep by"
" providing them with the best possible sleep solutions. We believe that"
" quality sleep is essential to overall health and well-being, and we are"
" committed to helping our customers achieve optimal sleep by offering"
" exceptional products and customer service."
)
conversation_purpose: str = (
"find out whether they are looking to achieve better sleep via buying a premier"
" mattress."
)
conversation_type: str = "call"
def retrieve_conversation_stage(self, key):
@ -412,14 +458,32 @@ config = dict(
salesperson_name="Ted Lasso",
salesperson_role="Business Development Representative",
company_name="Sleep Haven",
company_business="Sleep Haven is a premium mattress company that provides customers with the most comfortable and supportive sleeping experience possible. We offer a range of high-quality mattresses, pillows, and bedding accessories that are designed to meet the unique needs of our customers.",
company_values="Our mission at Sleep Haven is to help people achieve a better night's sleep by providing them with the best possible sleep solutions. We believe that quality sleep is essential to overall health and well-being, and we are committed to helping our customers achieve optimal sleep by offering exceptional products and customer service.",
conversation_purpose="find out whether they are looking to achieve better sleep via buying a premier mattress.",
company_business=(
"Sleep Haven is a premium mattress company that provides customers with the"
" most comfortable and supportive sleeping experience possible. We offer a"
" range of high-quality mattresses, pillows, and bedding accessories that are"
" designed to meet the unique needs of our customers."
),
company_values=(
"Our mission at Sleep Haven is to help people achieve a better night's sleep by"
" providing them with the best possible sleep solutions. We believe that"
" quality sleep is essential to overall health and well-being, and we are"
" committed to helping our customers achieve optimal sleep by offering"
" exceptional products and customer service."
),
conversation_purpose=(
"find out whether they are looking to achieve better sleep via buying a premier"
" mattress."
),
conversation_history=[],
conversation_type="call",
conversation_stage=conversation_stages.get(
"1",
"Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional.",
(
"Introduction: Start the conversation by introducing yourself and your"
" company. Be polite and respectful while keeping the tone of the"
" conversation professional."
),
),
use_tools=True,
product_catalog="sample_product_catalog.txt",

@ -19,7 +19,8 @@ class Registry(BaseModel):
def build(self, type: str, **kwargs):
if type not in self.entries:
raise ValueError(
f'{type} is not registered. Please register with the .register("{type}") method provided in {self.name} registry'
f"{type} is not registered. Please register with the"
f' .register("{type}") method provided in {self.name} registry'
)
return self.entries[type](**kwargs)

@ -460,7 +460,7 @@ class Chroma(VectorStore):
"""
if self._embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on" "creation."
"For MMR search, you must specify an embedding function oncreation."
)
embedding = self._embedding_function.embed_query(query)

@ -111,7 +111,10 @@ class Step(StepRequestBody):
output: Optional[str] = Field(
None,
description="Output of the task step.",
example="I am going to use the write_to_file command and write Washington to a file called output.txt <write_to_file('output.txt', 'Washington')",
example=(
"I am going to use the write_to_file command and write Washington to a file"
" called output.txt <write_to_file('output.txt', 'Washington')"
),
)
additional_output: Optional[StepOutput] = None
artifacts: List[Artifact] = Field(

@ -20,7 +20,7 @@ from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA
# from swarms.models.gpt4v import GPT4Vision
# from swarms.models.dalle3 import Dalle3
# from swarms.models.distilled_whisperx import DistilWhisperModel
# from swarms.models.fuyu import Fuyu # Not working, wait until they update
from swarms.models.fuyu import Fuyu # Not working, wait until they update
import sys
@ -47,4 +47,5 @@ __all__ = [
"WizardLLMStoryTeller",
# "GPT4Vision",
# "Dalle3",
"Fuyu",
]

@ -191,7 +191,7 @@ def build_extra_kwargs(
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
"Instead they were passed in as part of `model_kwargs` parameter."
)
return extra_kwargs

@ -3,7 +3,7 @@
BiomedCLIP-PubMedBERT_256-vit_base_patch16_224
https://huggingface.co/microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224
BiomedCLIP is a biomedical vision-language foundation model that is pretrained on PMC-15M,
BiomedCLIP is a biomedical vision-language foundation model that is pretrained on PMC-15M,
a dataset of 15 million figure-caption pairs extracted from biomedical research articles in PubMed Central, using contrastive learning. It uses PubMedBERT as the text encoder and Vision Transformer as the image encoder, with domain-specific adaptations. It can perform various vision-language processing (VLP) tasks such as cross-modal retrieval, image classification, and visual question answering. BiomedCLIP establishes new state of the art in a wide range of standard datasets, and substantially outperforms prior VLP approaches:

@ -1,18 +1,18 @@
"""
BioGPT
Pre-trained language models have attracted increasing attention in the biomedical domain,
inspired by their great success in the general natural language domain.
Among the two main branches of pre-trained language models in the general language domain, i.e. BERT (and its variants) and GPT (and its variants),
the first one has been extensively studied in the biomedical domain, such as BioBERT and PubMedBERT.
While they have achieved great success on a variety of discriminative downstream biomedical tasks,
the lack of generation ability constrains their application scope.
In this paper, we propose BioGPT, a domain-specific generative Transformer language model
pre-trained on large-scale biomedical literature.
We evaluate BioGPT on six biomedical natural language processing tasks
and demonstrate that our model outperforms previous models on most tasks.
Especially, we get 44.98%, 38.42% and 40.76% F1 score on BC5CDR, KD-DTI and DDI
end-to-end relation extraction tasks, respectively, and 78.2% accuracy on PubMedQA,
creating a new record. Our case study on text generation further demonstrates the
Pre-trained language models have attracted increasing attention in the biomedical domain,
inspired by their great success in the general natural language domain.
Among the two main branches of pre-trained language models in the general language domain, i.e. BERT (and its variants) and GPT (and its variants),
the first one has been extensively studied in the biomedical domain, such as BioBERT and PubMedBERT.
While they have achieved great success on a variety of discriminative downstream biomedical tasks,
the lack of generation ability constrains their application scope.
In this paper, we propose BioGPT, a domain-specific generative Transformer language model
pre-trained on large-scale biomedical literature.
We evaluate BioGPT on six biomedical natural language processing tasks
and demonstrate that our model outperforms previous models on most tasks.
Especially, we get 44.98%, 38.42% and 40.76% F1 score on BC5CDR, KD-DTI and DDI
end-to-end relation extraction tasks, respectively, and 78.2% accuracy on PubMedQA,
creating a new record. Our case study on text generation further demonstrates the
advantage of BioGPT on biomedical literature to generate fluent descriptions for biomedical terms.

@ -124,7 +124,10 @@ class Dalle3:
# Handling exceptions and printing the errors details
print(
colored(
f"Error running Dalle3: {error} try optimizing your api key and or try again",
(
f"Error running Dalle3: {error} try optimizing your api key and"
" or try again"
),
"red",
)
)
@ -163,7 +166,10 @@ class Dalle3:
except (Exception, openai.OpenAIError) as error:
print(
colored(
f"Error running Dalle3: {error} try optimizing your api key and or try again",
(
f"Error running Dalle3: {error} try optimizing your api key and"
" or try again"
),
"red",
)
)

@ -1,11 +1,13 @@
"""Fuyu model by Kye"""
from io import BytesIO
import requests
from PIL import Image
from transformers import (
FuyuProcessor,
FuyuForCausalLM,
AutoTokenizer,
FuyuForCausalLM,
FuyuImageProcessor,
FuyuProcessor,
)
from PIL import Image
class Fuyu:
@ -28,14 +30,15 @@ class Fuyu:
>>> fuyu("Hello, my name is", "path/to/image.png")
"""
def __init__(
self,
pretrained_path: str = "adept/fuyu-8b",
device_map: str = "cuda:0",
max_new_tokens: int = 7,
device_map: str = "auto",
max_new_tokens: int = 500,
*args,
**kwargs,
):
self.pretrained_path = pretrained_path
self.device_map = device_map
@ -44,12 +47,19 @@ class Fuyu:
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_path)
self.image_processor = FuyuImageProcessor()
self.processor = FuyuProcessor(
image_procesor=self.image_processor, tokenizer=self.tokenizer
image_processor=self.image_processor, tokenizer=self.tokenizer, **kwargs
)
self.model = FuyuForCausalLM.from_pretrained(
pretrained_path, device_map=device_map
pretrained_path,
device_map=device_map,
**kwargs,
)
def get_img(self, img: str):
"""Get the image from the path"""
image_pil = Image.open(img)
return image_pil
def __call__(self, text: str, img: str):
"""Call the model with text and img paths"""
image_pil = Image.open(img)
@ -63,3 +73,9 @@ class Fuyu:
output = self.model.generate(**model_inputs, max_new_tokens=self.max_new_tokens)
text = self.processor.batch_decode(output[:, -7:], skip_special_tokens=True)
return print(str(text))
def get_img_from_web(self, img_url: str):
"""Get the image from the web"""
response = requests.get(img_url)
image_pil = Image.open(BytesIO(response.content))
return image_pil

@ -178,7 +178,8 @@ class GPT4Vision:
time.sleep(self.backoff_factor**attempt)
except Exception as error:
self.logger.error(
f"Unexpected Error: {error} try optimizing your api key and try again"
f"Unexpected Error: {error} try optimizing your api key and try"
" again"
)
raise error from None
@ -231,7 +232,10 @@ class GPT4Vision:
except Exception as error:
print(
colored(
f"Error when calling GPT4Vision, Error: {error} Try optimizing your key, and try again",
(
f"Error when calling GPT4Vision, Error: {error} Try optimizing"
" your key, and try again"
),
"red",
)
)
@ -282,7 +286,10 @@ class GPT4Vision:
except Exception as error:
print(
colored(
f"Error when calling GPT4Vision, Error: {error} Try optimizing your key, and try again",
(
f"Error when calling GPT4Vision, Error: {error} Try optimizing"
" your key, and try again"
),
"red",
)
)

@ -166,7 +166,10 @@ class HuggingfaceLLM:
except Exception as e:
print(
colored(
f"HuggingfaceLLM could not generate text because of error: {e}, try optimizing your arguments",
(
f"HuggingfaceLLM could not generate text because of error: {e},"
" try optimizing your arguments"
),
"red",
)
)
@ -299,7 +302,7 @@ class HuggingfaceLLM:
Task Environment:
Task: {task}
""",
"red",
)

@ -106,7 +106,10 @@ class Kosmos:
self.run(prompt, image_url)
def referring_expression_generation(self, phrase, image_url):
prompt = "<grounding><phrase> It</phrase><object><patch_index_0044><patch_index_0863></object> is"
prompt = (
"<grounding><phrase>"
" It</phrase><object><patch_index_0044><patch_index_0863></object> is"
)
self.run(prompt, image_url)
def grounded_vqa(self, question, image_url):

@ -233,7 +233,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra

@ -593,7 +593,8 @@ class BaseOpenAI(BaseLLM):
if context_size is None:
raise ValueError(
f"Unknown model: {modelname}. Please provide a valid OpenAI model name."
"Known models are: " + ", ".join(model_token_mapping.keys())
"Known models are: "
+ ", ".join(model_token_mapping.keys())
)
return context_size

@ -112,18 +112,20 @@ class OpenAITokenizer(BaseTokenizer):
tokens_per_name = -1
elif "gpt-3.5-turbo" in model or "gpt-35-turbo" in model:
logging.info(
"gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613."
"gpt-3.5-turbo may update over time. Returning num tokens assuming"
" gpt-3.5-turbo-0613."
)
return self.count_tokens(text, model="gpt-3.5-turbo-0613")
elif "gpt-4" in model:
logging.info(
"gpt-4 may update over time. Returning num tokens assuming gpt-4-0613."
"gpt-4 may update over time. Returning num tokens assuming"
" gpt-4-0613."
)
return self.count_tokens(text, model="gpt-4-0613")
else:
raise NotImplementedError(
f"""token_count() is not implemented for model {model}.
See https://github.com/openai/openai-python/blob/main/chatml.md for
f"""token_count() is not implemented for model {model}.
See https://github.com/openai/openai-python/blob/main/chatml.md for
information on how messages are converted to tokens."""
)

@ -33,6 +33,7 @@ class Zephyr:
temperature: float = 0.5,
top_k: float = 50,
top_p: float = 0.95,
do_sample: bool = True,
*args,
**kwargs,
):
@ -45,6 +46,7 @@ class Zephyr:
self.temperature = temperature
self.top_k = top_k
self.top_p = top_p
self.do_sample = do_sample
self.pipe = pipeline(
"text-generation",
@ -57,10 +59,6 @@ class Zephyr:
"role": "system",
"content": f"{self.system_prompt}\n\nUser:",
},
{
"role": "user",
"content": "How many helicopters can a human eat in one sitting?",
},
]
def __call__(self, task: str):
@ -82,14 +80,16 @@ class Zephyr:
# Apply the chat template to format the messages
prompt = self.pipe.tokenizer.apply_chat_template(
self.messages, tokenize=False, add_generation_prompt=True
self.messages,
tokenize=self.tokenize,
add_generation_prompt=self.add_generation_prompt,
)
# Generate a response
outputs = self.pipe(
prompt,
max_new_tokens=self.max_new_tokens,
do_sample=True,
do_sample=self.do_sample,
temperature=self.temperature,
top_k=self.top_k,
top_p=self.top_p,
@ -101,5 +101,4 @@ class Zephyr:
# Optionally, you could also add the chatbot's response to the messages list
# However, the below line should be adjusted to extract the chatbot's response only
# self.messages.append({"role": "bot", "content": generated_text})
return generated_text

@ -70,9 +70,9 @@ class PromptGenerator:
f"Commands:\n{''.join(self.commands)}\n\n"
f"Resources:\n{''.join(self.resources)}\n\n"
f"Performance Evaluation:\n{''.join(self.performance_evaluation)}\n\n"
f"You should only respond in JSON format as described below "
"You should only respond in JSON format as described below "
f"\nResponse Format: \n{formatted_response_format} "
f"\nEnsure the response can be parsed by Python json.loads"
"\nEnsure the response can be parsed by Python json.loads"
)
return prompt_string

@ -4,10 +4,28 @@ def generate_agent_role_prompt(agent):
Returns: str: The agent role prompt.
"""
prompts = {
"Finance Agent": "You are a seasoned finance analyst AI assistant. Your primary goal is to compose comprehensive, astute, impartial, and methodically arranged financial reports based on provided data and trends.",
"Travel Agent": "You are a world-travelled AI tour guide assistant. Your main purpose is to draft engaging, insightful, unbiased, and well-structured travel reports on given locations, including history, attractions, and cultural insights.",
"Academic Research Agent": "You are an AI academic research assistant. Your primary responsibility is to create thorough, academically rigorous, unbiased, and systematically organized reports on a given research topic, following the standards of scholarly work.",
"Default Agent": "You are an AI critical thinker research assistant. Your sole purpose is to write well written, critically acclaimed, objective and structured reports on given text.",
"Finance Agent": (
"You are a seasoned finance analyst AI assistant. Your primary goal is to"
" compose comprehensive, astute, impartial, and methodically arranged"
" financial reports based on provided data and trends."
),
"Travel Agent": (
"You are a world-travelled AI tour guide assistant. Your main purpose is to"
" draft engaging, insightful, unbiased, and well-structured travel reports"
" on given locations, including history, attractions, and cultural"
" insights."
),
"Academic Research Agent": (
"You are an AI academic research assistant. Your primary responsibility is"
" to create thorough, academically rigorous, unbiased, and systematically"
" organized reports on a given research topic, following the standards of"
" scholarly work."
),
"Default Agent": (
"You are an AI critical thinker research assistant. Your sole purpose is to"
" write well written, critically acclaimed, objective and structured"
" reports on given text."
),
}
return prompts.get(agent, "No such agent")
@ -22,10 +40,11 @@ def generate_report_prompt(question, research_summary):
return (
f'"""{research_summary}""" Using the above information, answer the following'
f' question or topic: "{question}" in a detailed report --'
" The report should focus on the answer to the question, should be well structured, informative,"
" in depth, with facts and numbers if available, a minimum of 1,200 words and with markdown syntax and apa format. "
"Write all source urls at the end of the report in apa format"
f' question or topic: "{question}" in a detailed report -- The report should'
" focus on the answer to the question, should be well structured, informative,"
" in depth, with facts and numbers if available, a minimum of 1,200 words and"
" with markdown syntax and apa format. Write all source urls at the end of the"
" report in apa format"
)
@ -36,8 +55,9 @@ def generate_search_queries_prompt(question):
"""
return (
f'Write 4 google search queries to search online that form an objective opinion from the following: "{question}"'
f'You must respond with a list of strings in the following format: ["query 1", "query 2", "query 3", "query 4"]'
"Write 4 google search queries to search online that form an objective opinion"
f' from the following: "{question}"You must respond with a list of strings in'
' the following format: ["query 1", "query 2", "query 3", "query 4"]'
)
@ -52,13 +72,15 @@ def generate_resource_report_prompt(question, research_summary):
str: The resource report prompt for the given question and research summary.
"""
return (
f'"""{research_summary}""" Based on the above information, generate a bibliography recommendation report for the following'
f' question or topic: "{question}". The report should provide a detailed analysis of each recommended resource,'
" explaining how each source can contribute to finding answers to the research question."
" Focus on the relevance, reliability, and significance of each source."
" Ensure that the report is well-structured, informative, in-depth, and follows Markdown syntax."
" Include relevant facts, figures, and numbers whenever available."
" The report should have a minimum length of 1,200 words."
f'"""{research_summary}""" Based on the above information, generate a'
" bibliography recommendation report for the following question or topic:"
f' "{question}". The report should provide a detailed analysis of each'
" recommended resource, explaining how each source can contribute to finding"
" answers to the research question. Focus on the relevance, reliability, and"
" significance of each source. Ensure that the report is well-structured,"
" informative, in-depth, and follows Markdown syntax. Include relevant facts,"
" figures, and numbers whenever available. The report should have a minimum"
" length of 1,200 words."
)
@ -70,11 +92,13 @@ def generate_outline_report_prompt(question, research_summary):
"""
return (
f'"""{research_summary}""" Using the above information, generate an outline for a research report in Markdown syntax'
f' for the following question or topic: "{question}". The outline should provide a well-structured framework'
" for the research report, including the main sections, subsections, and key points to be covered."
" The research report should be detailed, informative, in-depth, and a minimum of 1,200 words."
" Use appropriate Markdown syntax to format the outline and ensure readability."
f'"""{research_summary}""" Using the above information, generate an outline for'
" a research report in Markdown syntax for the following question or topic:"
f' "{question}". The outline should provide a well-structured framework for the'
" research report, including the main sections, subsections, and key points to"
" be covered. The research report should be detailed, informative, in-depth,"
" and a minimum of 1,200 words. Use appropriate Markdown syntax to format the"
" outline and ensure readability."
)
@ -86,9 +110,11 @@ def generate_concepts_prompt(question, research_summary):
"""
return (
f'"""{research_summary}""" Using the above information, generate a list of 5 main concepts to learn for a research report'
f' on the following question or topic: "{question}". The outline should provide a well-structured framework'
'You must respond with a list of strings in the following format: ["concepts 1", "concepts 2", "concepts 3", "concepts 4, concepts 5"]'
f'"""{research_summary}""" Using the above information, generate a list of 5'
" main concepts to learn for a research report on the following question or"
f' topic: "{question}". The outline should provide a well-structured'
" frameworkYou must respond with a list of strings in the following format:"
' ["concepts 1", "concepts 2", "concepts 3", "concepts 4, concepts 5"]'
)
@ -102,9 +128,10 @@ def generate_lesson_prompt(concept):
"""
prompt = (
f"generate a comprehensive lesson about {concept} in Markdown syntax. This should include the definition"
f"of {concept}, its historical background and development, its applications or uses in different"
f"fields, and notable events or facts related to {concept}."
f"generate a comprehensive lesson about {concept} in Markdown syntax. This"
f" should include the definitionof {concept}, its historical background and"
" development, its applications or uses in differentfields, and notable events"
f" or facts related to {concept}."
)
return prompt

@ -46,47 +46,47 @@ Growth Agent is a dynamic fusion of digital marketing, content creation, and cus
- **3.1 Data Assimilation and Interpretation**
- *3.1.1* Efficiently process vast volumes of data using state-of-the-art algorithms.
- *3.1.2* Identify key patterns, trends, and anomalies to derive actionable insights.
- *3.1.3* Use these insights to predict future trends and user behaviors.
- **3.2 Ad Generation**
- *3.2.1* Leverage Generative Adversarial Networks (GANs) to craft engaging ads.
- *3.2.2* Implement A/B testing mechanisms to select high-performing ads.
- *3.2.3* Continuously refine ad generation based on user feedback and interactions.
- **3.3 Website Creation and Optimization**
- *3.3.1* Use responsive design principles for accessibility across devices.
- *3.3.2* Integrate user tracking tools to gain insights into navigation patterns.
- *3.3.3* Leverage AI-driven chatbots and interactive elements to improve user engagement and retention.
- **3.4 Messaging Sequences**
- *3.4.1* Craft sequences tailored to individual user behaviors and interactions.
- *3.4.2* Harness advanced Natural Language Processing (NLP) tools for optimal communication.
- *3.4.3* Periodically update sequences based on user feedback and evolving market trends.
- **3.5 Systematic Growth and Enhancement**
- *3.5.1* Implement reinforcement learning for real-time adaptation and strategy refinement.
- *3.5.2* Engage in regular feedback loops with users to understand needs and pain points.
- *3.5.3* Benchmark performance against industry leaders to identify areas of improvement.
- **3.6 Integration and Collaboration**
- *3.6.1* Seamlessly integrate with other digital platforms and tools.
- *3.6.2* Collaborate with other AI models or systems to harness collective intelligence.
---
@ -96,9 +96,9 @@ Growth Agent is a dynamic fusion of digital marketing, content creation, and cus
Achieving world-class expertise is a journey, not a destination. Ensure:
- **4.1** Regular system diagnostics and optimization checks.
- **4.2** Inclusion of emerging platforms and technologies into the learning paradigm.
- **4.3** Frequent benchmarking against top industry standards.
---

@ -1,4 +1,7 @@
ERROR_PROMPT = "An error has occurred for the following text: \n{promptedQuery} Please explain this error.\n {e}"
ERROR_PROMPT = (
"An error has occurred for the following text: \n{promptedQuery} Please explain"
" this error.\n {e}"
)
IMAGE_PROMPT = """
provide a figure named {filename}. The description is: {description}.

@ -1,12 +1,43 @@
PY_SIMPLE_COMPLETION_INSTRUCTION = "# Write the body of this function only."
PY_REFLEXION_COMPLETION_INSTRUCTION = "You are a Python writing assistant. You will be given your past function implementation, a series of unit tests, and a hint to change the implementation appropriately. Write your full implementation (restate the function signature).\n\n-----"
PY_SELF_REFLECTION_COMPLETION_INSTRUCTION = "You are a Python writing assistant. You will be given a function implementation and a series of unit tests. Your goal is to write a few sentences to explain why your implementation is wrong as indicated by the tests. You will need this as a hint when you try again later. Only provide the few sentence description in your answer, not the implementation.\n\n-----"
USE_PYTHON_CODEBLOCK_INSTRUCTION = "Use a Python code block to write your response. For example:\n```python\nprint('Hello world!')\n```"
PY_SIMPLE_CHAT_INSTRUCTION = "You are an AI that only responds with python code, NOT ENGLISH. You will be given a function signature and its docstring by the user. Write your full implementation (restate the function signature)."
PY_SIMPLE_CHAT_INSTRUCTION_V2 = "You are an AI that only responds with only python code. You will be given a function signature and its docstring by the user. Write your full implementation (restate the function signature)."
PY_REFLEXION_CHAT_INSTRUCTION = "You are an AI Python assistant. You will be given your past function implementation, a series of unit tests, and a hint to change the implementation appropriately. Write your full implementation (restate the function signature)."
PY_REFLEXION_CHAT_INSTRUCTION_V2 = "You are an AI Python assistant. You will be given your previous implementation of a function, a series of unit tests results, and your self-reflection on your previous implementation. Write your full implementation (restate the function signature)."
PY_REFLEXION_COMPLETION_INSTRUCTION = (
"You are a Python writing assistant. You will be given your past function"
" implementation, a series of unit tests, and a hint to change the implementation"
" appropriately. Write your full implementation (restate the function"
" signature).\n\n-----"
)
PY_SELF_REFLECTION_COMPLETION_INSTRUCTION = (
"You are a Python writing assistant. You will be given a function implementation"
" and a series of unit tests. Your goal is to write a few sentences to explain why"
" your implementation is wrong as indicated by the tests. You will need this as a"
" hint when you try again later. Only provide the few sentence description in your"
" answer, not the implementation.\n\n-----"
)
USE_PYTHON_CODEBLOCK_INSTRUCTION = (
"Use a Python code block to write your response. For"
" example:\n```python\nprint('Hello world!')\n```"
)
PY_SIMPLE_CHAT_INSTRUCTION = (
"You are an AI that only responds with python code, NOT ENGLISH. You will be given"
" a function signature and its docstring by the user. Write your full"
" implementation (restate the function signature)."
)
PY_SIMPLE_CHAT_INSTRUCTION_V2 = (
"You are an AI that only responds with only python code. You will be given a"
" function signature and its docstring by the user. Write your full implementation"
" (restate the function signature)."
)
PY_REFLEXION_CHAT_INSTRUCTION = (
"You are an AI Python assistant. You will be given your past function"
" implementation, a series of unit tests, and a hint to change the implementation"
" appropriately. Write your full implementation (restate the function signature)."
)
PY_REFLEXION_CHAT_INSTRUCTION_V2 = (
"You are an AI Python assistant. You will be given your previous implementation of"
" a function, a series of unit tests results, and your self-reflection on your"
" previous implementation. Write your full implementation (restate the function"
" signature)."
)
PY_REFLEXION_FEW_SHOT_ADD = '''Example 1:
[previous impl]:
```python
@ -139,8 +170,21 @@ def fullJustify(words: List[str], maxWidth: int) -> List[str]:
END EXAMPLES
'''
PY_SELF_REFLECTION_CHAT_INSTRUCTION = "You are a Python programming assistant. You will be given a function implementation and a series of unit tests. Your goal is to write a few sentences to explain why your implementation is wrong as indicated by the tests. You will need this as a hint when you try again later. Only provide the few sentence description in your answer, not the implementation."
PY_SELF_REFLECTION_CHAT_INSTRUCTION_V2 = "You are a Python programming assistant. You will be given a function implementation and a series of unit test results. Your goal is to write a few sentences to explain why your implementation is wrong as indicated by the tests. You will need this as guidance when you try again later. Only provide the few sentence description in your answer, not the implementation. You will be given a few examples by the user."
PY_SELF_REFLECTION_CHAT_INSTRUCTION = (
"You are a Python programming assistant. You will be given a function"
" implementation and a series of unit tests. Your goal is to write a few sentences"
" to explain why your implementation is wrong as indicated by the tests. You will"
" need this as a hint when you try again later. Only provide the few sentence"
" description in your answer, not the implementation."
)
PY_SELF_REFLECTION_CHAT_INSTRUCTION_V2 = (
"You are a Python programming assistant. You will be given a function"
" implementation and a series of unit test results. Your goal is to write a few"
" sentences to explain why your implementation is wrong as indicated by the tests."
" You will need this as guidance when you try again later. Only provide the few"
" sentence description in your answer, not the implementation. You will be given a"
" few examples by the user."
)
PY_SELF_REFLECTION_FEW_SHOT = """Example 1:
[function impl]:
```python

@ -1,11 +1,38 @@
conversation_stages = {
"1": "Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional. Your greeting should be welcoming. Always clarify in your greeting the reason why you are contacting the prospect.",
"2": "Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.",
"3": "Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.",
"4": "Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.",
"5": "Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points.",
"6": "Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.",
"7": "Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits.",
"1": (
"Introduction: Start the conversation by introducing yourself and your company."
" Be polite and respectful while keeping the tone of the conversation"
" professional. Your greeting should be welcoming. Always clarify in your"
" greeting the reason why you are contacting the prospect."
),
"2": (
"Qualification: Qualify the prospect by confirming if they are the right person"
" to talk to regarding your product/service. Ensure that they have the"
" authority to make purchasing decisions."
),
"3": (
"Value proposition: Briefly explain how your product/service can benefit the"
" prospect. Focus on the unique selling points and value proposition of your"
" product/service that sets it apart from competitors."
),
"4": (
"Needs analysis: Ask open-ended questions to uncover the prospect's needs and"
" pain points. Listen carefully to their responses and take notes."
),
"5": (
"Solution presentation: Based on the prospect's needs, present your"
" product/service as the solution that can address their pain points."
),
"6": (
"Objection handling: Address any objections that the prospect may have"
" regarding your product/service. Be prepared to provide evidence or"
" testimonials to support your claims."
),
"7": (
"Close: Ask for the sale by proposing a next step. This could be a demo, a"
" trial or a meeting with decision-makers. Ensure to summarize what has been"
" discussed and reiterate the benefits."
),
}

@ -46,11 +46,38 @@ Conversation history:
"""
conversation_stages = {
"1": "Introduction: Start the conversation by introducing yourself and your company. Be polite and respectful while keeping the tone of the conversation professional. Your greeting should be welcoming. Always clarify in your greeting the reason why you are contacting the prospect.",
"2": "Qualification: Qualify the prospect by confirming if they are the right person to talk to regarding your product/service. Ensure that they have the authority to make purchasing decisions.",
"3": "Value proposition: Briefly explain how your product/service can benefit the prospect. Focus on the unique selling points and value proposition of your product/service that sets it apart from competitors.",
"4": "Needs analysis: Ask open-ended questions to uncover the prospect's needs and pain points. Listen carefully to their responses and take notes.",
"5": "Solution presentation: Based on the prospect's needs, present your product/service as the solution that can address their pain points.",
"6": "Objection handling: Address any objections that the prospect may have regarding your product/service. Be prepared to provide evidence or testimonials to support your claims.",
"7": "Close: Ask for the sale by proposing a next step. This could be a demo, a trial or a meeting with decision-makers. Ensure to summarize what has been discussed and reiterate the benefits.",
"1": (
"Introduction: Start the conversation by introducing yourself and your company."
" Be polite and respectful while keeping the tone of the conversation"
" professional. Your greeting should be welcoming. Always clarify in your"
" greeting the reason why you are contacting the prospect."
),
"2": (
"Qualification: Qualify the prospect by confirming if they are the right person"
" to talk to regarding your product/service. Ensure that they have the"
" authority to make purchasing decisions."
),
"3": (
"Value proposition: Briefly explain how your product/service can benefit the"
" prospect. Focus on the unique selling points and value proposition of your"
" product/service that sets it apart from competitors."
),
"4": (
"Needs analysis: Ask open-ended questions to uncover the prospect's needs and"
" pain points. Listen carefully to their responses and take notes."
),
"5": (
"Solution presentation: Based on the prospect's needs, present your"
" product/service as the solution that can address their pain points."
),
"6": (
"Objection handling: Address any objections that the prospect may have"
" regarding your product/service. Be prepared to provide evidence or"
" testimonials to support your claims."
),
"7": (
"Close: Ask for the sale by proposing a next step. This could be a demo, a"
" trial or a meeting with decision-makers. Ensure to summarize what has been"
" discussed and reiterate the benefits."
),
}

@ -1,10 +1,10 @@
def task_planner_prompt(objective):
return f"""
You are a planner who is an expert at coming up with a todo list for a given objective.
useful for when you need to come up with todo lists.
You are a planner who is an expert at coming up with a todo list for a given objective.
useful for when you need to come up with todo lists.
Input: an objective to create a todo list for. Output: a todo list for that objective. For the main objective
Input: an objective to create a todo list for. Output: a todo list for that objective. For the main objective
layout each import subtask that needs to be accomplished and provide all subtasks with a ranking system prioritizing the
most important subtasks first that are likely to accomplish the main objective. Use the following ranking system:
0.0 -> 1.0, 1.0 being the most important subtask.

@ -20,7 +20,10 @@ class ChatbotError(Exception):
def __init__(self, *args: object) -> None:
if SUPPORT_ADD_NOTES:
super().add_note(
"Please check that the input is correct, or you can resolve this issue by filing an issue",
(
"Please check that the input is correct, or you can resolve this"
" issue by filing an issue"
),
)
super().add_note("Project URL: https://github.com/acheong08/ChatGPT")
super().__init__(*args)

@ -1,5 +1,5 @@
"""
Base Structure for all Swarm Structures
"""

@ -8,7 +8,7 @@ TODO:
- add async processing for run and batch run
- add plan module
- concurrent
-
-
"""
import json
@ -30,9 +30,9 @@ This will enable you to leave the autonomous loop.
# Constants
FLOW_SYSTEM_PROMPT = f"""
You are an autonomous agent granted autonomy from a Flow structure.
Your role is to engage in multi-step conversations with your self or the user,
generate long-form content like blogs, screenplays, or SOPs,
and accomplish tasks. You can have internal dialogues with yourself or can interact with the user
Your role is to engage in multi-step conversations with your self or the user,
generate long-form content like blogs, screenplays, or SOPs,
and accomplish tasks. You can have internal dialogues with yourself or can interact with the user
to aid in these complex tasks. Your responses should be coherent, contextually relevant, and tailored to the task at hand.
@ -239,7 +239,7 @@ class Flow:
Dynamic Temperature: {self.dynamic_temperature}
Autosave: {self.autosave}
Saved State: {self.saved_state_path}
----------------------------------------
""",
"green",
@ -259,7 +259,10 @@ class Flow:
except Exception as error:
print(
colored(
"Error activating autonomous agent. Try optimizing your parameters...",
(
"Error activating autonomous agent. Try optimizing your"
" parameters..."
),
"red",
)
)

@ -257,9 +257,9 @@ class SequentialWorkflow:
Metadata:
kwargs: {kwargs}
""",
"cyan",
attrs=["bold", "underline"],
@ -348,7 +348,8 @@ class SequentialWorkflow:
# Ensure that 'task' is provided in the kwargs
if "task" not in task.kwargs:
raise ValueError(
f"The 'task' argument is required for the Flow flow execution in '{task.description}'"
"The 'task' argument is required for the Flow flow"
f" execution in '{task.description}'"
)
# Separate the 'task' argument from other kwargs
flow_task_arg = task.kwargs.pop("task")
@ -376,7 +377,11 @@ class SequentialWorkflow:
except Exception as e:
print(
colored(
f"Error initializing the Sequential workflow: {e} try optimizing your inputs like the flow class and task description",
(
f"Error initializing the Sequential workflow: {e} try"
" optimizing your inputs like the flow class and task"
" description"
),
"red",
attrs=["bold", "underline"],
)
@ -399,7 +404,8 @@ class SequentialWorkflow:
# Ensure that 'task' is provided in the kwargs
if "task" not in task.kwargs:
raise ValueError(
f"The 'task' argument is required for the Flow flow execution in '{task.description}'"
"The 'task' argument is required for the Flow flow"
f" execution in '{task.description}'"
)
# Separate the 'task' argument from other kwargs
flow_task_arg = task.kwargs.pop("task")

@ -54,7 +54,8 @@ class GroupChat:
n_agents = len(self.agent_names)
if n_agents < 3:
logger.warning(
f"GroupChat is underpopulated with {n_agents} agents. Direct communication would be more efficient."
f"GroupChat is underpopulated with {n_agents} agents. Direct"
" communication would be more efficient."
)
name = selector.generate_reply(
@ -63,7 +64,11 @@ class GroupChat:
+ [
{
"role": "system",
"content": f"Read the above conversation. Then select the next most suitable role from {self.agent_names} to play. Only return the role.",
"content": (
"Read the above conversation. Then select the next most"
f" suitable role from {self.agent_names} to play. Only"
" return the role."
),
}
]
)

@ -6,7 +6,10 @@ from langchain.output_parsers import RegexParser
# utils
class BidOutputParser(RegexParser):
def get_format_instructions(self) -> str:
return "Your response should be an integrater delimited by angled brackets like this: <int>"
return (
"Your response should be an integrater delimited by angled brackets like"
" this: <int>"
)
bid_parser = BidOutputParser(

@ -153,7 +153,8 @@ class Orchestrator:
except Exception as error:
logging.error(
f"Failed to process task {id(task)} by agent {id(agent)}. Error: {error}"
f"Failed to process task {id(task)} by agent {id(agent)}. Error:"
f" {error}"
)
finally:
with self.condition:

@ -181,8 +181,8 @@ def VQAinference(self, inputs):
answer = processor.decode(out[0], skip_special_tokens=True)
logger.debug(
f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}, Input Question: {question}, "
f"Output Answer: {answer}"
f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}, Input"
f" Question: {question}, Output Answer: {answer}"
)
return answer

@ -75,10 +75,12 @@ class ImageEditing:
@tool(
name="Remove Something From The Photo",
description="useful when you want to remove and object or something from the photo "
"from its description or location. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the object need to be removed. ",
description=(
"useful when you want to remove and object or something from the photo "
"from its description or location. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the object need to be removed. "
),
)
def inference_remove(self, inputs):
image_path, to_be_removed_txt = inputs.split(",")
@ -86,10 +88,12 @@ class ImageEditing:
@tool(
name="Replace Something From The Photo",
description="useful when you want to replace an object from the object description or "
"location with another object from its description. "
"The input to this tool should be a comma separated string of three, "
"representing the image_path, the object to be replaced, the object to be replaced with ",
description=(
"useful when you want to replace an object from the object description or"
" location with another object from its description. The input to this tool"
" should be a comma separated string of three, representing the image_path,"
" the object to be replaced, the object to be replaced with "
),
)
def inference_replace(self, inputs):
image_path, to_be_replaced_txt, replace_with_txt = inputs.split(",")
@ -108,8 +112,9 @@ class ImageEditing:
updated_image.save(updated_image_path)
logger.debug(
f"\nProcessed ImageEditing, Input Image: {image_path}, Replace {to_be_replaced_txt} to {replace_with_txt}, "
f"Output Image: {updated_image_path}"
f"\nProcessed ImageEditing, Input Image: {image_path}, Replace"
f" {to_be_replaced_txt} to {replace_with_txt}, Output Image:"
f" {updated_image_path}"
)
return updated_image_path
@ -131,10 +136,12 @@ class InstructPix2Pix:
@tool(
name="Instruct Image Using Text",
description="useful when you want to the style of the image to be like the text. "
"like: make it look like a painting. or make it like a robot. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the text. ",
description=(
"useful when you want to the style of the image to be like the text. "
"like: make it look like a painting. or make it like a robot. "
"The input to this tool should be a comma separated string of two, "
"representing the image_path and the text. "
),
)
def inference(self, inputs):
"""Change style of image."""
@ -148,8 +155,8 @@ class InstructPix2Pix:
image.save(updated_image_path)
logger.debug(
f"\nProcessed InstructPix2Pix, Input Image: {image_path}, Instruct Text: {text}, "
f"Output Image: {updated_image_path}"
f"\nProcessed InstructPix2Pix, Input Image: {image_path}, Instruct Text:"
f" {text}, Output Image: {updated_image_path}"
)
return updated_image_path
@ -172,9 +179,12 @@ class Text2Image:
@tool(
name="Generate Image From User Input Text",
description="useful when you want to generate an image from a user input text and save it to a file. "
"like: generate an image of an object or something, or generate an image that includes some objects. "
"The input to this tool should be a string, representing the text used to generate image. ",
description=(
"useful when you want to generate an image from a user input text and save"
" it to a file. like: generate an image of an object or something, or"
" generate an image that includes some objects. The input to this tool"
" should be a string, representing the text used to generate image. "
),
)
def inference(self, text):
image_filename = os.path.join("image", str(uuid.uuid4())[0:8] + ".png")
@ -183,7 +193,8 @@ class Text2Image:
image.save(image_filename)
logger.debug(
f"\nProcessed Text2Image, Input Text: {text}, Output Image: {image_filename}"
f"\nProcessed Text2Image, Input Text: {text}, Output Image:"
f" {image_filename}"
)
return image_filename
@ -201,9 +212,12 @@ class VisualQuestionAnswering:
@tool(
name="Answer Question About The Image",
description="useful when you need an answer for a question based on an image. "
"like: what is the background color of the last image, how many cats in this figure, what is in this figure. "
"The input to this tool should be a comma separated string of two, representing the image_path and the question",
description=(
"useful when you need an answer for a question based on an image. like:"
" what is the background color of the last image, how many cats in this"
" figure, what is in this figure. The input to this tool should be a comma"
" separated string of two, representing the image_path and the question"
),
)
def inference(self, inputs):
image_path, question = inputs.split(",")
@ -215,8 +229,8 @@ class VisualQuestionAnswering:
answer = self.processor.decode(out[0], skip_special_tokens=True)
logger.debug(
f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}, Input Question: {question}, "
f"Output Answer: {answer}"
f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}, Input"
f" Question: {question}, Output Answer: {answer}"
)
return answer
@ -250,7 +264,8 @@ class ImageCaptioning(BaseHandler):
out = self.model.generate(**inputs)
description = self.processor.decode(out[0], skip_special_tokens=True)
print(
f"\nProcessed ImageCaptioning, Input Image: {filename}, Output Text: {description}"
f"\nProcessed ImageCaptioning, Input Image: {filename}, Output Text:"
f" {description}"
)
return IMAGE_PROMPT.format(filename=filename, description=description)

@ -121,10 +121,10 @@ class ChildTool(BaseTool):
name = cls.__name__
raise SchemaAnnotationError(
f"Tool definition for {name} must include valid type annotations"
f" for argument 'args_schema' to behave as expected.\n"
f"Expected annotation of 'Type[BaseModel]'"
" for argument 'args_schema' to behave as expected.\n"
"Expected annotation of 'Type[BaseModel]'"
f" but got '{args_schema_type}'.\n"
f"Expected class looks like:\n"
"Expected class looks like:\n"
f"{typehint_mandate}"
)
@ -353,7 +353,7 @@ class ChildTool(BaseTool):
observation = self.handle_tool_error(e)
else:
raise ValueError(
f"Got unexpected type of `handle_tool_error`. Expected bool, str "
"Got unexpected type of `handle_tool_error`. Expected bool, str "
f"or callable. Received: {self.handle_tool_error}"
)
run_manager.on_tool_end(
@ -428,7 +428,7 @@ class ChildTool(BaseTool):
observation = self.handle_tool_error(e)
else:
raise ValueError(
f"Got unexpected type of `handle_tool_error`. Expected bool, str "
"Got unexpected type of `handle_tool_error`. Expected bool, str "
f"or callable. Received: {self.handle_tool_error}"
)
await run_manager.on_tool_end(
@ -492,8 +492,7 @@ class Tool(BaseTool):
all_args = list(args) + list(kwargs.values())
if len(all_args) != 1:
raise ToolException(
f"Too many arguments to single-input tool {self.name}."
f" Args: {all_args}"
f"Too many arguments to single-input tool {self.name}. Args: {all_args}"
)
return tuple(all_args), {}

@ -98,7 +98,7 @@ class SubprocessCodeInterpreter(BaseCodeInterpreter):
code = self.preprocess_code(code)
if not self.process:
self.start_process()
except:
except BaseException:
yield {"output": traceback.format_exc()}
return
@ -112,7 +112,7 @@ class SubprocessCodeInterpreter(BaseCodeInterpreter):
self.process.stdin.write(code + "\n")
self.process.stdin.flush()
break
except:
except BaseException:
if retry_count != 0:
# For UX, I like to hide this if it happens once. Obviously feels better to not see errors
# Most of the time it doesn't matter, but we should figure out why it happens frequently with:

@ -383,7 +383,7 @@ class FileHandler:
if FileType.from_url(url) == FileType.IMAGE:
raise Exception(
f"No handler for {FileType.from_url(url)}. "
f"Please set USE_GPU to True in env/settings.py"
"Please set USE_GPU to True in env/settings.py"
)
else:
raise Exception(f"No handler for {FileType.from_url(url)}")
@ -408,7 +408,8 @@ class CsvToDataframe(BaseHandler):
)
print(
f"\nProcessed CsvToDataframe, Input CSV: {filename}, Output Description: {description}"
f"\nProcessed CsvToDataframe, Input CSV: {filename}, Output Description:"
f" {description}"
)
return DATAFRAME_PROMPT.format(filename=filename, description=description)

@ -163,7 +163,8 @@ class Worker:
except Exception as error:
raise RuntimeError(
f"Error setting up memory perhaps try try tuning the embedding size: {error}"
"Error setting up memory perhaps try try tuning the embedding size:"
f" {error}"
)
def setup_agent(self):

Loading…
Cancel
Save