code quality fixes: line length = 80

pull/180/head
Kye 1 year ago
parent d97de1c009
commit 49c7b97c09

@ -9,6 +9,9 @@ text = node.run_text("What is your name? Generate a picture of yourself")
img = node.run_img("/image1", "What is this image about?") img = node.run_img("/image1", "What is this image about?")
chat = node.chat( chat = node.chat(
"What is your name? Generate a picture of yourself. What is this image about?", (
"What is your name? Generate a picture of yourself. What is this image"
" about?"
),
streaming=True, streaming=True,
) )

@ -10,13 +10,19 @@ config = {
"plugin_ids": [os.getenv("REVGPT_PLUGIN_IDS")], "plugin_ids": [os.getenv("REVGPT_PLUGIN_IDS")],
"disable_history": os.getenv("REVGPT_DISABLE_HISTORY") == "True", "disable_history": os.getenv("REVGPT_DISABLE_HISTORY") == "True",
"PUID": os.getenv("REVGPT_PUID"), "PUID": os.getenv("REVGPT_PUID"),
"unverified_plugin_domains": [os.getenv("REVGPT_UNVERIFIED_PLUGIN_DOMAINS")], "unverified_plugin_domains": [
os.getenv("REVGPT_UNVERIFIED_PLUGIN_DOMAINS")
],
} }
llm = RevChatGPTModel(access_token=os.getenv("ACCESS_TOKEN"), **config) llm = RevChatGPTModel(access_token=os.getenv("ACCESS_TOKEN"), **config)
worker = Worker(ai_name="Optimus Prime", llm=llm) worker = Worker(ai_name="Optimus Prime", llm=llm)
task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times." task = (
"What were the winning boston marathon times for the past 5 years (ending"
" in 2022)? Generate a table of the year, name, country of origin, and"
" times."
)
response = worker.run(task) response = worker.run(task)
print(response) print(response)

@ -103,7 +103,8 @@ class AccountantSwarms:
# Provide decision making support to the accountant # Provide decision making support to the accountant
decision_making_support_agent_output = decision_making_support_agent.run( decision_making_support_agent_output = decision_making_support_agent.run(
f"{self.decision_making_support_agent_instructions}: {summary_agent_output}" f"{self.decision_making_support_agent_instructions}:"
f" {summary_agent_output}"
) )
return decision_making_support_agent_output return decision_making_support_agent_output
@ -113,5 +114,7 @@ swarm = AccountantSwarms(
pdf_path="tesla.pdf", pdf_path="tesla.pdf",
fraud_detection_instructions="Detect fraud in the document", fraud_detection_instructions="Detect fraud in the document",
summary_agent_instructions="Generate an actionable summary of the document", summary_agent_instructions="Generate an actionable summary of the document",
decision_making_support_agent_instructions="Provide decision making support to the business owner:", decision_making_support_agent_instructions=(
"Provide decision making support to the business owner:"
),
) )

@ -48,6 +48,7 @@ paper_implementor_agent = Flow(
paper = pdf_to_text(PDF_PATH) paper = pdf_to_text(PDF_PATH)
algorithmic_psuedocode_agent = paper_summarizer_agent.run( algorithmic_psuedocode_agent = paper_summarizer_agent.run(
f"Focus on creating the algorithmic pseudocode for the novel method in this paper: {paper}" "Focus on creating the algorithmic pseudocode for the novel method in this"
f" paper: {paper}"
) )
pytorch_code = paper_implementor_agent.run(algorithmic_psuedocode_agent) pytorch_code = paper_implementor_agent.run(algorithmic_psuedocode_agent)

@ -9,11 +9,18 @@ class AutoTemp:
""" """
def __init__( def __init__(
self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6 self,
api_key,
default_temp=0.0,
alt_temps=None,
auto_select=True,
max_workers=6,
): ):
self.api_key = api_key self.api_key = api_key
self.default_temp = default_temp self.default_temp = default_temp
self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] self.alt_temps = (
alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4]
)
self.auto_select = auto_select self.auto_select = auto_select
self.max_workers = max_workers self.max_workers = max_workers
self.llm = OpenAIChat( self.llm = OpenAIChat(
@ -62,12 +69,15 @@ class AutoTemp:
if not scores: if not scores:
return "No valid outputs generated.", None return "No valid outputs generated.", None
sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) sorted_scores = sorted(
scores.items(), key=lambda item: item[1], reverse=True
)
best_temp, best_score = sorted_scores[0] best_temp, best_score = sorted_scores[0]
best_output = outputs[best_temp] best_output = outputs[best_temp]
return ( return (
f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" f"Best AutoTemp Output (Temp {best_temp} | Score:"
f" {best_score}):\n{best_output}"
if self.auto_select if self.auto_select
else "\n".join( else "\n".join(
f"Temp {temp} | Score: {score}:\n{outputs[temp]}" f"Temp {temp} | Score: {score}:\n{outputs[temp]}"

@ -7,7 +7,10 @@ from swarms.structs import SequentialWorkflow
class BlogGen: class BlogGen:
def __init__( def __init__(
self, api_key, blog_topic, temperature_range: str = "0.4,0.6,0.8,1.0,1.2" self,
api_key,
blog_topic,
temperature_range: str = "0.4,0.6,0.8,1.0,1.2",
): # Add blog_topic as an argument ): # Add blog_topic as an argument
self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.8) self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.8)
self.auto_temp = AutoTemp(api_key) self.auto_temp = AutoTemp(api_key)
@ -40,7 +43,10 @@ class BlogGen:
topic_output = topic_result.generations[0][0].text topic_output = topic_result.generations[0][0].text
print( print(
colored( colored(
f"\nTopic Selection Task Output:\n----------------------------\n{topic_output}\n", (
"\nTopic Selection Task"
f" Output:\n----------------------------\n{topic_output}\n"
),
"white", "white",
) )
) )
@ -58,7 +64,10 @@ class BlogGen:
initial_draft_output = auto_temp_output # Assuming AutoTemp.run returns the best output directly initial_draft_output = auto_temp_output # Assuming AutoTemp.run returns the best output directly
print( print(
colored( colored(
f"\nInitial Draft Output:\n----------------------------\n{initial_draft_output}\n", (
"\nInitial Draft"
f" Output:\n----------------------------\n{initial_draft_output}\n"
),
"white", "white",
) )
) )
@ -71,7 +80,10 @@ class BlogGen:
review_output = review_result.generations[0][0].text review_output = review_result.generations[0][0].text
print( print(
colored( colored(
f"\nReview Output:\n----------------------------\n{review_output}\n", (
"\nReview"
f" Output:\n----------------------------\n{review_output}\n"
),
"white", "white",
) )
) )
@ -80,22 +92,28 @@ class BlogGen:
distribution_prompt = self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace( distribution_prompt = self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace(
"{{ARTICLE_TOPIC}}", chosen_topic "{{ARTICLE_TOPIC}}", chosen_topic
) )
distribution_result = self.openai_chat.generate([distribution_prompt]) distribution_result = self.openai_chat.generate(
[distribution_prompt]
)
distribution_output = distribution_result.generations[0][0].text distribution_output = distribution_result.generations[0][0].text
print( print(
colored( colored(
f"\nDistribution Output:\n----------------------------\n{distribution_output}\n", (
"\nDistribution"
f" Output:\n----------------------------\n{distribution_output}\n"
),
"white", "white",
) )
) )
# Final compilation of the blog # Final compilation of the blog
final_blog_content = ( final_blog_content = f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}"
f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}"
)
print( print(
colored( colored(
f"\nFinal Blog Content:\n----------------------------\n{final_blog_content}\n", (
"\nFinal Blog"
f" Content:\n----------------------------\n{final_blog_content}\n"
),
"green", "green",
) )
) )

@ -4,7 +4,10 @@ from swarms.models import Idefics
# Multi Modality Auto Agent # Multi Modality Auto Agent
llm = Idefics(max_length=2000) llm = Idefics(max_length=2000)
task = "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" task = (
"User: What is in this image?"
" https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"
)
## Initialize the workflow ## Initialize the workflow
flow = Flow( flow = Flow(

@ -10,9 +10,16 @@ load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY") openai_api_key = os.getenv("OPENAI_API_KEY")
# Define prompts for various tasks # Define prompts for various tasks
MEAL_PLAN_PROMPT = "Based on the following user preferences: dietary restrictions as vegetarian, preferred cuisines as Italian and Indian, a total caloric intake of around 2000 calories per day, and an exclusion of legumes, create a detailed weekly meal plan. Include a variety of meals for breakfast, lunch, dinner, and optional snacks." MEAL_PLAN_PROMPT = (
"Based on the following user preferences: dietary restrictions as"
" vegetarian, preferred cuisines as Italian and Indian, a total caloric"
" intake of around 2000 calories per day, and an exclusion of legumes,"
" create a detailed weekly meal plan. Include a variety of meals for"
" breakfast, lunch, dinner, and optional snacks."
)
IMAGE_ANALYSIS_PROMPT = ( IMAGE_ANALYSIS_PROMPT = (
"Identify the items in this fridge, including their quantities and condition." "Identify the items in this fridge, including their quantities and"
" condition."
) )
@ -45,7 +52,9 @@ def create_vision_agent(image_path):
{"type": "text", "text": IMAGE_ANALYSIS_PROMPT}, {"type": "text", "text": IMAGE_ANALYSIS_PROMPT},
{ {
"type": "image_url", "type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}, "image_url": {
"url": f"data:image/jpeg;base64,{base64_image}"
},
}, },
], ],
} }
@ -53,7 +62,9 @@ def create_vision_agent(image_path):
"max_tokens": 300, "max_tokens": 300,
} }
response = requests.post( response = requests.post(
"https://api.openai.com/v1/chat/completions", headers=headers, json=payload "https://api.openai.com/v1/chat/completions",
headers=headers,
json=payload,
) )
return response.json() return response.json()
@ -65,10 +76,11 @@ def generate_integrated_shopping_list(
# Prepare the prompt for the LLM # Prepare the prompt for the LLM
fridge_contents = image_analysis["choices"][0]["message"]["content"] fridge_contents = image_analysis["choices"][0]["message"]["content"]
prompt = ( prompt = (
f"Based on this meal plan: {meal_plan_output}, " f"Based on this meal plan: {meal_plan_output}, and the following items"
f"and the following items in the fridge: {fridge_contents}, " f" in the fridge: {fridge_contents}, considering dietary preferences as"
f"considering dietary preferences as vegetarian with a preference for Italian and Indian cuisines, " " vegetarian with a preference for Italian and Indian cuisines,"
f"generate a comprehensive shopping list that includes only the items needed." " generate a comprehensive shopping list that includes only the items"
" needed."
) )
# Send the prompt to the LLM and return the response # Send the prompt to the LLM and return the response
@ -94,7 +106,9 @@ user_preferences = {
} }
# Generate Meal Plan # Generate Meal Plan
meal_plan_output = meal_plan_agent.run(f"Generate a meal plan: {user_preferences}") meal_plan_output = meal_plan_agent.run(
f"Generate a meal plan: {user_preferences}"
)
# Vision Agent - Analyze an Image # Vision Agent - Analyze an Image
image_analysis_output = create_vision_agent("full_fridge.jpg") image_analysis_output = create_vision_agent("full_fridge.jpg")

@ -39,9 +39,9 @@ def get_review_prompt(article):
def social_media_prompt(article: str, goal: str = "Clicks and engagement"): def social_media_prompt(article: str, goal: str = "Clicks and engagement"):
prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace("{{ARTICLE}}", article).replace( prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace(
"{{GOAL}}", goal "{{ARTICLE}}", article
) ).replace("{{GOAL}}", goal)
return prompt return prompt
@ -50,7 +50,8 @@ topic_selection_task = (
"Generate 10 topics on gaining mental clarity using ancient practices" "Generate 10 topics on gaining mental clarity using ancient practices"
) )
topics = llm( topics = llm(
f"Your System Instructions: {TOPIC_GENERATOR}, Your current task: {topic_selection_task}" f"Your System Instructions: {TOPIC_GENERATOR}, Your current task:"
f" {topic_selection_task}"
) )
dashboard = print( dashboard = print(
@ -109,7 +110,9 @@ reviewed_draft = print(
# Agent that publishes on social media # Agent that publishes on social media
distribution_agent = llm(social_media_prompt(draft_blog, goal="Clicks and engagement")) distribution_agent = llm(
social_media_prompt(draft_blog, goal="Clicks and engagement")
)
distribution_agent_out = print( distribution_agent_out = print(
colored( colored(
f""" f"""

@ -1,6 +1,8 @@
from swarms.models.bioclip import BioClip from swarms.models.bioclip import BioClip
clip = BioClip("hf-hub:microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224") clip = BioClip(
"hf-hub:microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224"
)
labels = [ labels = [
"adenocarcinoma histopathology", "adenocarcinoma histopathology",

@ -2,11 +2,17 @@ from swarms.models import idefics
model = idefics() model = idefics()
user_input = "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" user_input = (
"User: What is in this image?"
" https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"
)
response = model.chat(user_input) response = model.chat(user_input)
print(response) print(response)
user_input = "User: And who is that? https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052" user_input = (
"User: And who is that?"
" https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052"
)
response = model.chat(user_input) response = model.chat(user_input)
print(response) print(response)

@ -28,7 +28,9 @@ llama_caller.add_func(
) )
# Call the function # Call the function
result = llama_caller.call_function("get_weather", location="Paris", format="Celsius") result = llama_caller.call_function(
"get_weather", location="Paris", format="Celsius"
)
print(result) print(result)
# Stream a user prompt # Stream a user prompt

@ -3,5 +3,6 @@ from swarms.models.vilt import Vilt
model = Vilt() model = Vilt()
output = model( output = model(
"What is this image", "http://images.cocodataset.org/val2017/000000039769.jpg" "What is this image",
"http://images.cocodataset.org/val2017/000000039769.jpg",
) )

@ -30,7 +30,9 @@ async def async_load_playwright(url: str) -> str:
text = soup.get_text() text = soup.get_text()
lines = (line.strip() for line in text.splitlines()) lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) chunks = (
phrase.strip() for line in lines for phrase in line.split(" ")
)
results = "\n".join(chunk for chunk in chunks if chunk) results = "\n".join(chunk for chunk in chunks if chunk)
except Exception as e: except Exception as e:
results = f"Error: {e}" results = f"Error: {e}"
@ -58,5 +60,6 @@ flow = Flow(
) )
out = flow.run( out = flow.run(
"Generate a 10,000 word blog on mental clarity and the benefits of meditation." "Generate a 10,000 word blog on mental clarity and the benefits of"
" meditation."
) )

@ -36,7 +36,9 @@ class DialogueAgent:
message = self.model( message = self.model(
[ [
self.system_message, self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])), HumanMessage(
content="\n".join(self.message_history + [self.prefix])
),
] ]
) )
return message.content return message.content
@ -124,19 +126,19 @@ game_description = f"""Here is the topic for the presidential debate: {topic}.
The presidential candidates are: {', '.join(character_names)}.""" The presidential candidates are: {', '.join(character_names)}."""
player_descriptor_system_message = SystemMessage( player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of each presidential candidate." content=(
"You can add detail to the description of each presidential candidate."
)
) )
def generate_character_description(character_name): def generate_character_description(character_name):
character_specifier_prompt = [ character_specifier_prompt = [
player_descriptor_system_message, player_descriptor_system_message,
HumanMessage( HumanMessage(content=f"""{game_description}
content=f"""{game_description}
Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities. Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities.
Speak directly to {character_name}. Speak directly to {character_name}.
Do not add anything else.""" Do not add anything else."""),
),
] ]
character_description = ChatOpenAI(temperature=1.0)( character_description = ChatOpenAI(temperature=1.0)(
character_specifier_prompt character_specifier_prompt
@ -155,9 +157,7 @@ Your goal is to be as creative as possible and make the voters think you are the
def generate_character_system_message(character_name, character_header): def generate_character_system_message(character_name, character_header):
return SystemMessage( return SystemMessage(content=f"""{character_header}
content=(
f"""{character_header}
You will speak in the style of {character_name}, and exaggerate their personality. You will speak in the style of {character_name}, and exaggerate their personality.
You will come up with creative ideas related to {topic}. You will come up with creative ideas related to {topic}.
Do not say the same things over and over again. Do not say the same things over and over again.
@ -169,13 +169,12 @@ Speak only from the perspective of {character_name}.
Stop speaking the moment you finish speaking from your perspective. Stop speaking the moment you finish speaking from your perspective.
Never forget to keep your response to {word_limit} words! Never forget to keep your response to {word_limit} words!
Do not add anything else. Do not add anything else.
""" """)
)
)
character_descriptions = [ character_descriptions = [
generate_character_description(character_name) for character_name in character_names generate_character_description(character_name)
for character_name in character_names
] ]
character_headers = [ character_headers = [
generate_character_header(character_name, character_description) generate_character_header(character_name, character_description)
@ -185,7 +184,9 @@ character_headers = [
] ]
character_system_messages = [ character_system_messages = [
generate_character_system_message(character_name, character_headers) generate_character_system_message(character_name, character_headers)
for character_name, character_headers in zip(character_names, character_headers) for character_name, character_headers in zip(
character_names, character_headers
)
] ]
for ( for (
@ -207,7 +208,10 @@ for (
class BidOutputParser(RegexParser): class BidOutputParser(RegexParser):
def get_format_instructions(self) -> str: def get_format_instructions(self) -> str:
return "Your response should be an integer delimited by angled brackets, like this: <int>." return (
"Your response should be an integer delimited by angled brackets,"
" like this: <int>."
)
bid_parser = BidOutputParser( bid_parser = BidOutputParser(
@ -248,8 +252,7 @@ for character_name, bidding_template in zip(
topic_specifier_prompt = [ topic_specifier_prompt = [
SystemMessage(content="You can make a task more specific."), SystemMessage(content="You can make a task more specific."),
HumanMessage( HumanMessage(content=f"""{game_description}
content=f"""{game_description}
You are the debate moderator. You are the debate moderator.
Please make the debate topic more specific. Please make the debate topic more specific.
@ -257,8 +260,7 @@ topic_specifier_prompt = [
Be creative and imaginative. Be creative and imaginative.
Please reply with the specified topic in {word_limit} words or less. Please reply with the specified topic in {word_limit} words or less.
Speak directly to the presidential candidates: {*character_names,}. Speak directly to the presidential candidates: {*character_names,}.
Do not add anything else.""" Do not add anything else."""),
),
] ]
specified_topic = ChatOpenAI(temperature=1.0)(topic_specifier_prompt).content specified_topic = ChatOpenAI(temperature=1.0)(topic_specifier_prompt).content
@ -321,7 +323,9 @@ for character_name, character_system_message, bidding_template in zip(
max_iters = 10 max_iters = 10
n = 0 n = 0
simulator = DialogueSimulator(agents=characters, selection_function=select_next_speaker) simulator = DialogueSimulator(
agents=characters, selection_function=select_next_speaker
)
simulator.reset() simulator.reset()
simulator.inject("Debate Moderator", specified_topic) simulator.inject("Debate Moderator", specified_topic)
print(f"(Debate Moderator): {specified_topic}") print(f"(Debate Moderator): {specified_topic}")

@ -36,7 +36,11 @@ agents = [worker1, worker2, worker3]
debate = MultiAgentDebate(agents, select_speaker) debate = MultiAgentDebate(agents, select_speaker)
# Run task # Run task
task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times." task = (
"What were the winning boston marathon times for the past 5 years (ending"
" in 2022)? Generate a table of the year, name, country of origin, and"
" times."
)
results = debate.run(task, max_iters=4) results = debate.run(task, max_iters=4)
# Print results # Print results

@ -10,4 +10,6 @@ node = Worker(
orchestrator = Orchestrator(node, agent_list=[node] * 10, task_queue=[]) orchestrator = Orchestrator(node, agent_list=[node] * 10, task_queue=[])
# Agent 7 sends a message to Agent 9 # Agent 7 sends a message to Agent 9
orchestrator.chat(sender_id=7, receiver_id=9, message="Can you help me with this task?") orchestrator.chat(
sender_id=7, receiver_id=9, message="Can you help me with this task?"
)

@ -10,4 +10,6 @@ node = Worker(
orchestrator = Orchestrator(node, agent_list=[node] * 10, task_queue=[]) orchestrator = Orchestrator(node, agent_list=[node] * 10, task_queue=[])
# Agent 7 sends a message to Agent 9 # Agent 7 sends a message to Agent 9
orchestrator.chat(sender_id=7, receiver_id=9, message="Can you help me with this task?") orchestrator.chat(
sender_id=7, receiver_id=9, message="Can you help me with this task?"
)

@ -7,7 +7,10 @@ api_key = ""
swarm = HierarchicalSwarm(api_key) swarm = HierarchicalSwarm(api_key)
# Define an objective # Define an objective
objective = "Find 20 potential customers for a HierarchicalSwarm based AI Agent automation infrastructure" objective = (
"Find 20 potential customers for a HierarchicalSwarm based AI Agent"
" automation infrastructure"
)
# Run HierarchicalSwarm # Run HierarchicalSwarm
swarm.run(objective) swarm.run(objective)

@ -85,3 +85,10 @@ aggressive = 3
[tool.ruff] [tool.ruff]
line-length = 80 line-length = 80
[tool.black]
line-length = 80
target-version = ['py38']
preview = true

@ -18,7 +18,12 @@ from swarms.agents.message import Message
class Step: class Step:
def __init__( def __init__(
self, task: str, id: int, dep: List[int], args: Dict[str, str], tool: BaseTool self,
task: str,
id: int,
dep: List[int],
args: Dict[str, str],
tool: BaseTool,
): ):
self.task = task self.task = task
self.id = id self.id = id

@ -37,7 +37,7 @@ class BaseVectorStore(ABC):
self, self,
artifacts: dict[str, list[TextArtifact]], artifacts: dict[str, list[TextArtifact]],
meta: Optional[dict] = None, meta: Optional[dict] = None,
**kwargs **kwargs,
) -> None: ) -> None:
execute_futures_dict( execute_futures_dict(
{ {
@ -54,7 +54,7 @@ class BaseVectorStore(ABC):
artifact: TextArtifact, artifact: TextArtifact,
namespace: Optional[str] = None, namespace: Optional[str] = None,
meta: Optional[dict] = None, meta: Optional[dict] = None,
**kwargs **kwargs,
) -> str: ) -> str:
if not meta: if not meta:
meta = {} meta = {}
@ -67,7 +67,11 @@ class BaseVectorStore(ABC):
vector = artifact.generate_embedding(self.embedding_driver) vector = artifact.generate_embedding(self.embedding_driver)
return self.upsert_vector( return self.upsert_vector(
vector, vector_id=artifact.id, namespace=namespace, meta=meta, **kwargs vector,
vector_id=artifact.id,
namespace=namespace,
meta=meta,
**kwargs,
) )
def upsert_text( def upsert_text(
@ -76,14 +80,14 @@ class BaseVectorStore(ABC):
vector_id: Optional[str] = None, vector_id: Optional[str] = None,
namespace: Optional[str] = None, namespace: Optional[str] = None,
meta: Optional[dict] = None, meta: Optional[dict] = None,
**kwargs **kwargs,
) -> str: ) -> str:
return self.upsert_vector( return self.upsert_vector(
self.embedding_driver.embed_string(string), self.embedding_driver.embed_string(string),
vector_id=vector_id, vector_id=vector_id,
namespace=namespace, namespace=namespace,
meta=meta if meta else {}, meta=meta if meta else {},
**kwargs **kwargs,
) )
@abstractmethod @abstractmethod
@ -93,12 +97,14 @@ class BaseVectorStore(ABC):
vector_id: Optional[str] = None, vector_id: Optional[str] = None,
namespace: Optional[str] = None, namespace: Optional[str] = None,
meta: Optional[dict] = None, meta: Optional[dict] = None,
**kwargs **kwargs,
) -> str: ) -> str:
... ...
@abstractmethod @abstractmethod
def load_entry(self, vector_id: str, namespace: Optional[str] = None) -> Entry: def load_entry(
self, vector_id: str, namespace: Optional[str] = None
) -> Entry:
... ...
@abstractmethod @abstractmethod
@ -112,6 +118,6 @@ class BaseVectorStore(ABC):
count: Optional[int] = None, count: Optional[int] = None,
namespace: Optional[str] = None, namespace: Optional[str] = None,
include_vectors: bool = False, include_vectors: bool = False,
**kwargs **kwargs,
) -> list[QueryResult]: ) -> list[QueryResult]:
... ...

@ -111,7 +111,9 @@ class Chroma(VectorStore):
chroma_db_impl="duckdb+parquet", chroma_db_impl="duckdb+parquet",
) )
else: else:
_client_settings = chromadb.config.Settings(is_persistent=True) _client_settings = chromadb.config.Settings(
is_persistent=True
)
_client_settings.persist_directory = persist_directory _client_settings.persist_directory = persist_directory
else: else:
_client_settings = chromadb.config.Settings() _client_settings = chromadb.config.Settings()
@ -124,9 +126,11 @@ class Chroma(VectorStore):
self._embedding_function = embedding_function self._embedding_function = embedding_function
self._collection = self._client.get_or_create_collection( self._collection = self._client.get_or_create_collection(
name=collection_name, name=collection_name,
embedding_function=self._embedding_function.embed_documents embedding_function=(
if self._embedding_function is not None self._embedding_function.embed_documents
else None, if self._embedding_function is not None
else None
),
metadata=collection_metadata, metadata=collection_metadata,
) )
self.override_relevance_score_fn = relevance_score_fn self.override_relevance_score_fn = relevance_score_fn
@ -203,7 +207,9 @@ class Chroma(VectorStore):
metadatas = [metadatas[idx] for idx in non_empty_ids] metadatas = [metadatas[idx] for idx in non_empty_ids]
texts_with_metadatas = [texts[idx] for idx in non_empty_ids] texts_with_metadatas = [texts[idx] for idx in non_empty_ids]
embeddings_with_metadatas = ( embeddings_with_metadatas = (
[embeddings[idx] for idx in non_empty_ids] if embeddings else None [embeddings[idx] for idx in non_empty_ids]
if embeddings
else None
) )
ids_with_metadata = [ids[idx] for idx in non_empty_ids] ids_with_metadata = [ids[idx] for idx in non_empty_ids]
try: try:
@ -216,7 +222,8 @@ class Chroma(VectorStore):
except ValueError as e: except ValueError as e:
if "Expected metadata value to be" in str(e): if "Expected metadata value to be" in str(e):
msg = ( msg = (
"Try filtering complex metadata from the document using " "Try filtering complex metadata from the document"
" using "
"langchain.vectorstores.utils.filter_complex_metadata." "langchain.vectorstores.utils.filter_complex_metadata."
) )
raise ValueError(e.args[0] + "\n\n" + msg) raise ValueError(e.args[0] + "\n\n" + msg)
@ -258,7 +265,9 @@ class Chroma(VectorStore):
Returns: Returns:
List[Document]: List of documents most similar to the query text. List[Document]: List of documents most similar to the query text.
""" """
docs_and_scores = self.similarity_search_with_score(query, k, filter=filter) docs_and_scores = self.similarity_search_with_score(
query, k, filter=filter
)
return [doc for doc, _ in docs_and_scores] return [doc for doc, _ in docs_and_scores]
def similarity_search_by_vector( def similarity_search_by_vector(
@ -428,7 +437,9 @@ class Chroma(VectorStore):
candidates = _results_to_docs(results) candidates = _results_to_docs(results)
selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected] selected_results = [
r for i, r in enumerate(candidates) if i in mmr_selected
]
return selected_results return selected_results
def max_marginal_relevance_search( def max_marginal_relevance_search(
@ -460,7 +471,8 @@ class Chroma(VectorStore):
""" """
if self._embedding_function is None: if self._embedding_function is None:
raise ValueError( raise ValueError(
"For MMR search, you must specify an embedding function oncreation." "For MMR search, you must specify an embedding function"
" oncreation."
) )
embedding = self._embedding_function.embed_query(query) embedding = self._embedding_function.embed_query(query)
@ -543,7 +555,9 @@ class Chroma(VectorStore):
""" """
return self.update_documents([document_id], [document]) return self.update_documents([document_id], [document])
def update_documents(self, ids: List[str], documents: List[Document]) -> None: def update_documents(
self, ids: List[str], documents: List[Document]
) -> None:
"""Update a document in the collection. """Update a document in the collection.
Args: Args:
@ -554,7 +568,8 @@ class Chroma(VectorStore):
metadata = [document.metadata for document in documents] metadata = [document.metadata for document in documents]
if self._embedding_function is None: if self._embedding_function is None:
raise ValueError( raise ValueError(
"For update, you must specify an embedding function on creation." "For update, you must specify an embedding function on"
" creation."
) )
embeddings = self._embedding_function.embed_documents(text) embeddings = self._embedding_function.embed_documents(text)
@ -645,7 +660,9 @@ class Chroma(VectorStore):
ids=batch[0], ids=batch[0],
) )
else: else:
chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids) chroma_collection.add_texts(
texts=texts, metadatas=metadatas, ids=ids
)
return chroma_collection return chroma_collection
@classmethod @classmethod

@ -18,8 +18,8 @@ def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
Y = np.array(Y) Y = np.array(Y)
if X.shape[1] != Y.shape[1]: if X.shape[1] != Y.shape[1]:
raise ValueError( raise ValueError(
f"Number of columns in X and Y must be the same. X has shape {X.shape} " "Number of columns in X and Y must be the same. X has shape"
f"and Y has shape {Y.shape}." f" {X.shape} and Y has shape {Y.shape}."
) )
try: try:
import simsimd as simd import simsimd as simd
@ -32,8 +32,9 @@ def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray:
return Z return Z
except ImportError: except ImportError:
logger.info( logger.info(
"Unable to import simsimd, defaulting to NumPy implementation. If you want " "Unable to import simsimd, defaulting to NumPy implementation. If"
"to use simsimd please install with `pip install simsimd`." " you want to use simsimd please install with `pip install"
" simsimd`."
) )
X_norm = np.linalg.norm(X, axis=1) X_norm = np.linalg.norm(X, axis=1)
Y_norm = np.linalg.norm(Y, axis=1) Y_norm = np.linalg.norm(Y, axis=1)

@ -151,7 +151,9 @@ class InMemoryTaskDB(TaskDB):
) -> Artifact: ) -> Artifact:
artifact_id = str(uuid.uuid4()) artifact_id = str(uuid.uuid4())
artifact = Artifact( artifact = Artifact(
artifact_id=artifact_id, file_name=file_name, relative_path=relative_path artifact_id=artifact_id,
file_name=file_name,
relative_path=relative_path,
) )
task = await self.get_task(task_id) task = await self.get_task(task_id)
task.artifacts.append(artifact) task.artifacts.append(artifact)

@ -91,7 +91,9 @@ class OceanDB:
try: try:
return collection.add(documents=[document], ids=[id]) return collection.add(documents=[document], ids=[id])
except Exception as e: except Exception as e:
logging.error(f"Failed to append document to the collection. Error {e}") logging.error(
f"Failed to append document to the collection. Error {e}"
)
raise raise
def add_documents(self, collection, documents: List[str], ids: List[str]): def add_documents(self, collection, documents: List[str], ids: List[str]):
@ -137,7 +139,9 @@ class OceanDB:
the results of the query the results of the query
""" """
try: try:
results = collection.query(query_texts=query_texts, n_results=n_results) results = collection.query(
query_texts=query_texts, n_results=n_results
)
return results return results
except Exception as e: except Exception as e:
logging.error(f"Failed to query the collection. Error {e}") logging.error(f"Failed to query the collection. Error {e}")

@ -89,11 +89,15 @@ class PgVectorVectorStore(BaseVectorStore):
engine: Optional[Engine] = field(default=None, kw_only=True) engine: Optional[Engine] = field(default=None, kw_only=True)
table_name: str = field(kw_only=True) table_name: str = field(kw_only=True)
_model: any = field( _model: any = field(
default=Factory(lambda self: self.default_vector_model(), takes_self=True) default=Factory(
lambda self: self.default_vector_model(), takes_self=True
)
) )
@connection_string.validator @connection_string.validator
def validate_connection_string(self, _, connection_string: Optional[str]) -> None: def validate_connection_string(
self, _, connection_string: Optional[str]
) -> None:
# If an engine is provided, the connection string is not used. # If an engine is provided, the connection string is not used.
if self.engine is not None: if self.engine is not None:
return return
@ -104,7 +108,8 @@ class PgVectorVectorStore(BaseVectorStore):
if not connection_string.startswith("postgresql://"): if not connection_string.startswith("postgresql://"):
raise ValueError( raise ValueError(
"The connection string must describe a Postgres database connection" "The connection string must describe a Postgres database"
" connection"
) )
@engine.validator @engine.validator
@ -148,7 +153,7 @@ class PgVectorVectorStore(BaseVectorStore):
vector_id: Optional[str] = None, vector_id: Optional[str] = None,
namespace: Optional[str] = None, namespace: Optional[str] = None,
meta: Optional[dict] = None, meta: Optional[dict] = None,
**kwargs **kwargs,
) -> str: ) -> str:
"""Inserts or updates a vector in the collection.""" """Inserts or updates a vector in the collection."""
with Session(self.engine) as session: with Session(self.engine) as session:
@ -208,7 +213,7 @@ class PgVectorVectorStore(BaseVectorStore):
namespace: Optional[str] = None, namespace: Optional[str] = None,
include_vectors: bool = False, include_vectors: bool = False,
distance_metric: str = "cosine_distance", distance_metric: str = "cosine_distance",
**kwargs **kwargs,
) -> list[BaseVectorStore.QueryResult]: ) -> list[BaseVectorStore.QueryResult]:
"""Performs a search on the collection to find vectors similar to the provided input vector, """Performs a search on the collection to find vectors similar to the provided input vector,
optionally filtering to only those that match the provided namespace. optionally filtering to only those that match the provided namespace.

@ -108,7 +108,7 @@ class PineconeVectorStoreStore(BaseVector):
vector_id: Optional[str] = None, vector_id: Optional[str] = None,
namespace: Optional[str] = None, namespace: Optional[str] = None,
meta: Optional[dict] = None, meta: Optional[dict] = None,
**kwargs **kwargs,
) -> str: ) -> str:
"""Upsert vector""" """Upsert vector"""
vector_id = vector_id if vector_id else str_to_hash(str(vector)) vector_id = vector_id if vector_id else str_to_hash(str(vector))
@ -123,7 +123,9 @@ class PineconeVectorStoreStore(BaseVector):
self, vector_id: str, namespace: Optional[str] = None self, vector_id: str, namespace: Optional[str] = None
) -> Optional[BaseVector.Entry]: ) -> Optional[BaseVector.Entry]:
"""Load entry""" """Load entry"""
result = self.index.fetch(ids=[vector_id], namespace=namespace).to_dict() result = self.index.fetch(
ids=[vector_id], namespace=namespace
).to_dict()
vectors = list(result["vectors"].values()) vectors = list(result["vectors"].values())
if len(vectors) > 0: if len(vectors) > 0:
@ -138,7 +140,9 @@ class PineconeVectorStoreStore(BaseVector):
else: else:
return None return None
def load_entries(self, namespace: Optional[str] = None) -> list[BaseVector.Entry]: def load_entries(
self, namespace: Optional[str] = None
) -> list[BaseVector.Entry]:
"""Load entries""" """Load entries"""
# This is a hacky way to query up to 10,000 values from Pinecone. Waiting on an official API for fetching # This is a hacky way to query up to 10,000 values from Pinecone. Waiting on an official API for fetching
# all values from a namespace: # all values from a namespace:
@ -169,7 +173,7 @@ class PineconeVectorStoreStore(BaseVector):
include_vectors: bool = False, include_vectors: bool = False,
# PineconeVectorStoreStorageDriver-specific params: # PineconeVectorStoreStorageDriver-specific params:
include_metadata=True, include_metadata=True,
**kwargs **kwargs,
) -> list[BaseVector.QueryResult]: ) -> list[BaseVector.QueryResult]:
"""Query vectors""" """Query vectors"""
vector = self.embedding_driver.embed_string(query) vector = self.embedding_driver.embed_string(query)
@ -196,6 +200,9 @@ class PineconeVectorStoreStore(BaseVector):
def create_index(self, name: str, **kwargs) -> None: def create_index(self, name: str, **kwargs) -> None:
"""Create index""" """Create index"""
params = {"name": name, "dimension": self.embedding_driver.dimensions} | kwargs params = {
"name": name,
"dimension": self.embedding_driver.dimensions,
} | kwargs
pinecone.create_index(**params) pinecone.create_index(**params)

@ -50,7 +50,9 @@ class StepInput(BaseModel):
class StepOutput(BaseModel): class StepOutput(BaseModel):
__root__: Any = Field( __root__: Any = Field(
..., ...,
description="Output that the task step has produced. Any value is allowed.", description=(
"Output that the task step has produced. Any value is allowed."
),
example='{\n"tokens": 7894,\n"estimated_cost": "0,24$"\n}', example='{\n"tokens": 7894,\n"estimated_cost": "0,24$"\n}',
) )
@ -112,8 +114,9 @@ class Step(StepRequestBody):
None, None,
description="Output of the task step.", description="Output of the task step.",
example=( example=(
"I am going to use the write_to_file command and write Washington to a file" "I am going to use the write_to_file command and write Washington"
" called output.txt <write_to_file('output.txt', 'Washington')" " to a file called output.txt <write_to_file('output.txt',"
" 'Washington')"
), ),
) )
additional_output: Optional[StepOutput] = None additional_output: Optional[StepOutput] = None

@ -57,7 +57,7 @@ def maximal_marginal_relevance(
def filter_complex_metadata( def filter_complex_metadata(
documents: List[Document], documents: List[Document],
*, *,
allowed_types: Tuple[Type, ...] = (str, bool, int, float) allowed_types: Tuple[Type, ...] = (str, bool, int, float),
) -> List[Document]: ) -> List[Document]:
"""Filter out metadata types that are not supported for a vector store.""" """Filter out metadata types that are not supported for a vector store."""
updated_documents = [] updated_documents = []

@ -7,7 +7,11 @@ sys.stderr = log_file
from swarms.models.anthropic import Anthropic # noqa: E402 from swarms.models.anthropic import Anthropic # noqa: E402
from swarms.models.petals import Petals # noqa: E402 from swarms.models.petals import Petals # noqa: E402
from swarms.models.mistral import Mistral # noqa: E402 from swarms.models.mistral import Mistral # noqa: E402
from swarms.models.openai_models import OpenAI, AzureOpenAI, OpenAIChat # noqa: E402 from swarms.models.openai_models import (
OpenAI,
AzureOpenAI,
OpenAIChat,
) # noqa: E402
from swarms.models.zephyr import Zephyr # noqa: E402 from swarms.models.zephyr import Zephyr # noqa: E402
from swarms.models.biogpt import BioGPT # noqa: E402 from swarms.models.biogpt import BioGPT # noqa: E402
from swarms.models.huggingface import HuggingfaceLLM # noqa: E402 from swarms.models.huggingface import HuggingfaceLLM # noqa: E402

@ -50,7 +50,9 @@ def xor_args(*arg_groups: Tuple[str, ...]) -> Callable:
] ]
invalid_groups = [i for i, count in enumerate(counts) if count != 1] invalid_groups = [i for i, count in enumerate(counts) if count != 1]
if invalid_groups: if invalid_groups:
invalid_group_names = [", ".join(arg_groups[i]) for i in invalid_groups] invalid_group_names = [
", ".join(arg_groups[i]) for i in invalid_groups
]
raise ValueError( raise ValueError(
"Exactly one argument in each of the following" "Exactly one argument in each of the following"
" groups must be defined:" " groups must be defined:"
@ -106,7 +108,10 @@ def mock_now(dt_value): # type: ignore
def guard_import( def guard_import(
module_name: str, *, pip_name: Optional[str] = None, package: Optional[str] = None module_name: str,
*,
pip_name: Optional[str] = None,
package: Optional[str] = None,
) -> Any: ) -> Any:
"""Dynamically imports a module and raises a helpful exception if the module is not """Dynamically imports a module and raises a helpful exception if the module is not
installed.""" installed."""
@ -180,18 +185,18 @@ def build_extra_kwargs(
if field_name in extra_kwargs: if field_name in extra_kwargs:
raise ValueError(f"Found {field_name} supplied twice.") raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names: if field_name not in all_required_field_names:
warnings.warn( warnings.warn(f"""WARNING! {field_name} is not default parameter.
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs. {field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended.""" Please confirm that {field_name} is what you intended.""")
)
extra_kwargs[field_name] = values.pop(field_name) extra_kwargs[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra_kwargs.keys()) invalid_model_kwargs = all_required_field_names.intersection(
extra_kwargs.keys()
)
if invalid_model_kwargs: if invalid_model_kwargs:
raise ValueError( raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Parameters {invalid_model_kwargs} should be specified explicitly."
"Instead they were passed in as part of `model_kwargs` parameter." " Instead they were passed in as part of `model_kwargs` parameter."
) )
return extra_kwargs return extra_kwargs
@ -250,7 +255,9 @@ class _AnthropicCommon(BaseLanguageModel):
def validate_environment(cls, values: Dict) -> Dict: def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment.""" """Validate that api key and python package exists in environment."""
values["anthropic_api_key"] = convert_to_secret_str( values["anthropic_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "anthropic_api_key", "ANTHROPIC_API_KEY") get_from_dict_or_env(
values, "anthropic_api_key", "ANTHROPIC_API_KEY"
)
) )
# Get custom api url from environment. # Get custom api url from environment.
values["anthropic_api_url"] = get_from_dict_or_env( values["anthropic_api_url"] = get_from_dict_or_env(
@ -305,7 +312,9 @@ class _AnthropicCommon(BaseLanguageModel):
"""Get the identifying parameters.""" """Get the identifying parameters."""
return {**{}, **self._default_params} return {**{}, **self._default_params}
def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]: def _get_anthropic_stop(
self, stop: Optional[List[str]] = None
) -> List[str]:
if not self.HUMAN_PROMPT or not self.AI_PROMPT: if not self.HUMAN_PROMPT or not self.AI_PROMPT:
raise NameError("Please ensure the anthropic package is loaded") raise NameError("Please ensure the anthropic package is loaded")
@ -354,8 +363,8 @@ class Anthropic(LLM, _AnthropicCommon):
def raise_warning(cls, values: Dict) -> Dict: def raise_warning(cls, values: Dict) -> Dict:
"""Raise warning that this class is deprecated.""" """Raise warning that this class is deprecated."""
warnings.warn( warnings.warn(
"This Anthropic LLM is deprecated. " "This Anthropic LLM is deprecated. Please use `from"
"Please use `from langchain.chat_models import ChatAnthropic` instead" " langchain.chat_models import ChatAnthropic` instead"
) )
return values return values
@ -372,12 +381,16 @@ class Anthropic(LLM, _AnthropicCommon):
return prompt # Already wrapped. return prompt # Already wrapped.
# Guard against common errors in specifying wrong number of newlines. # Guard against common errors in specifying wrong number of newlines.
corrected_prompt, n_subs = re.subn(r"^\n*Human:", self.HUMAN_PROMPT, prompt) corrected_prompt, n_subs = re.subn(
r"^\n*Human:", self.HUMAN_PROMPT, prompt
)
if n_subs == 1: if n_subs == 1:
return corrected_prompt return corrected_prompt
# As a last resort, wrap the prompt ourselves to emulate instruct-style. # As a last resort, wrap the prompt ourselves to emulate instruct-style.
return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n" return (
f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n"
)
def _call( def _call(
self, self,
@ -476,7 +489,10 @@ class Anthropic(LLM, _AnthropicCommon):
params = {**self._default_params, **kwargs} params = {**self._default_params, **kwargs}
for token in self.client.completions.create( for token in self.client.completions.create(
prompt=self._wrap_prompt(prompt), stop_sequences=stop, stream=True, **params prompt=self._wrap_prompt(prompt),
stop_sequences=stop,
stream=True,
**params,
): ):
chunk = GenerationChunk(text=token.completion) chunk = GenerationChunk(text=token.completion)
yield chunk yield chunk

@ -98,7 +98,9 @@ class BioClip:
) = open_clip.create_model_and_transforms(model_path) ) = open_clip.create_model_and_transforms(model_path)
self.tokenizer = open_clip.get_tokenizer(model_path) self.tokenizer = open_clip.get_tokenizer(model_path)
self.device = ( self.device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") torch.device("cuda")
if torch.cuda.is_available()
else torch.device("cpu")
) )
self.model.to(self.device) self.model.to(self.device)
self.model.eval() self.model.eval()
@ -110,13 +112,17 @@ class BioClip:
template: str = "this is a photo of ", template: str = "this is a photo of ",
context_length: int = 256, context_length: int = 256,
): ):
image = torch.stack([self.preprocess_val(Image.open(img_path))]).to(self.device) image = torch.stack([self.preprocess_val(Image.open(img_path))]).to(
self.device
)
texts = self.tokenizer( texts = self.tokenizer(
[template + l for l in labels], context_length=context_length [template + l for l in labels], context_length=context_length
).to(self.device) ).to(self.device)
with torch.no_grad(): with torch.no_grad():
image_features, text_features, logit_scale = self.model(image, texts) image_features, text_features, logit_scale = self.model(
image, texts
)
logits = ( logits = (
(logit_scale * image_features @ text_features.t()) (logit_scale * image_features @ text_features.t())
.detach() .detach()
@ -142,7 +148,9 @@ class BioClip:
title = ( title = (
metadata["filename"] metadata["filename"]
+ "\n" + "\n"
+ "\n".join([f"{k}: {v*100:.1f}" for k, v in metadata["top_probs"].items()]) + "\n".join(
[f"{k}: {v*100:.1f}" for k, v in metadata["top_probs"].items()]
)
) )
ax.set_title(title, fontsize=14) ax.set_title(title, fontsize=14)
plt.tight_layout() plt.tight_layout()

@ -154,7 +154,7 @@ class BioGPT:
min_length=self.min_length, min_length=self.min_length,
max_length=self.max_length, max_length=self.max_length,
num_beams=num_beams, num_beams=num_beams,
early_stopping=early_stopping early_stopping=early_stopping,
) )
return self.tokenizer.decode(beam_output[0], skip_special_tokens=True) return self.tokenizer.decode(beam_output[0], skip_special_tokens=True)

@ -96,7 +96,9 @@ class BaseCohere(Serializable):
values, "cohere_api_key", "COHERE_API_KEY" values, "cohere_api_key", "COHERE_API_KEY"
) )
client_name = values["user_agent"] client_name = values["user_agent"]
values["client"] = cohere.Client(cohere_api_key, client_name=client_name) values["client"] = cohere.Client(
cohere_api_key, client_name=client_name
)
values["async_client"] = cohere.AsyncClient( values["async_client"] = cohere.AsyncClient(
cohere_api_key, client_name=client_name cohere_api_key, client_name=client_name
) )
@ -172,17 +174,23 @@ class Cohere(LLM, BaseCohere):
"""Return type of llm.""" """Return type of llm."""
return "cohere" return "cohere"
def _invocation_params(self, stop: Optional[List[str]], **kwargs: Any) -> dict: def _invocation_params(
self, stop: Optional[List[str]], **kwargs: Any
) -> dict:
params = self._default_params params = self._default_params
if self.stop is not None and stop is not None: if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.") raise ValueError(
"`stop` found in both the input and default params."
)
elif self.stop is not None: elif self.stop is not None:
params["stop_sequences"] = self.stop params["stop_sequences"] = self.stop
else: else:
params["stop_sequences"] = stop params["stop_sequences"] = stop
return {**params, **kwargs} return {**params, **kwargs}
def _process_response(self, response: Any, stop: Optional[List[str]]) -> str: def _process_response(
self, response: Any, stop: Optional[List[str]]
) -> str:
text = response.generations[0].text text = response.generations[0].text
# If stop tokens are provided, Cohere's endpoint returns them. # If stop tokens are provided, Cohere's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them. # In order to make this consistent with other endpoints, we strip them.

@ -169,8 +169,8 @@ class Dalle3:
print( print(
colored( colored(
( (
f"Error running Dalle3: {error} try optimizing your api key and" f"Error running Dalle3: {error} try optimizing your api"
" or try again" " key and or try again"
), ),
"red", "red",
) )
@ -234,8 +234,8 @@ class Dalle3:
print( print(
colored( colored(
( (
f"Error running Dalle3: {error} try optimizing your api key and" f"Error running Dalle3: {error} try optimizing your api"
" or try again" " key and or try again"
), ),
"red", "red",
) )
@ -248,8 +248,7 @@ class Dalle3:
"""Print the Dalle3 dashboard""" """Print the Dalle3 dashboard"""
print( print(
colored( colored(
( f"""Dalle3 Dashboard:
f"""Dalle3 Dashboard:
-------------------- --------------------
Model: {self.model} Model: {self.model}
@ -265,13 +264,14 @@ class Dalle3:
-------------------- --------------------
""" """,
),
"green", "green",
) )
) )
def process_batch_concurrently(self, tasks: List[str], max_workers: int = 5): def process_batch_concurrently(
self, tasks: List[str], max_workers: int = 5
):
""" """
Process a batch of tasks concurrently Process a batch of tasks concurrently
@ -293,8 +293,12 @@ class Dalle3:
['https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png', ['https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png',
""" """
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: with concurrent.futures.ThreadPoolExecutor(
future_to_task = {executor.submit(self, task): task for task in tasks} max_workers=max_workers
) as executor:
future_to_task = {
executor.submit(self, task): task for task in tasks
}
results = [] results = []
for future in concurrent.futures.as_completed(future_to_task): for future in concurrent.futures.as_completed(future_to_task):
task = future_to_task[future] task = future_to_task[future]
@ -307,14 +311,20 @@ class Dalle3:
print( print(
colored( colored(
( (
f"Error running Dalle3: {error} try optimizing your api key and" f"Error running Dalle3: {error} try optimizing"
" or try again" " your api key and or try again"
), ),
"red", "red",
) )
) )
print(colored(f"Error running Dalle3: {error.http_status}", "red")) print(
print(colored(f"Error running Dalle3: {error.error}", "red")) colored(
f"Error running Dalle3: {error.http_status}", "red"
)
)
print(
colored(f"Error running Dalle3: {error.error}", "red")
)
raise error raise error
def _generate_uuid(self): def _generate_uuid(self):

@ -28,7 +28,10 @@ def async_retry(max_retries=3, exceptions=(Exception,), delay=1):
retries -= 1 retries -= 1
if retries <= 0: if retries <= 0:
raise raise
print(f"Retry after exception: {e}, Attempts remaining: {retries}") print(
f"Retry after exception: {e}, Attempts remaining:"
f" {retries}"
)
await asyncio.sleep(delay) await asyncio.sleep(delay)
return wrapper return wrapper
@ -62,7 +65,9 @@ class DistilWhisperModel:
def __init__(self, model_id="distil-whisper/distil-large-v2"): def __init__(self, model_id="distil-whisper/distil-large-v2"):
self.device = "cuda:0" if torch.cuda.is_available() else "cpu" self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
self.torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 self.torch_dtype = (
torch.float16 if torch.cuda.is_available() else torch.float32
)
self.model_id = model_id self.model_id = model_id
self.model = AutoModelForSpeechSeq2Seq.from_pretrained( self.model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_id, model_id,
@ -119,7 +124,9 @@ class DistilWhisperModel:
try: try:
with torch.no_grad(): with torch.no_grad():
# Load the whole audio file, but process and transcribe it in chunks # Load the whole audio file, but process and transcribe it in chunks
audio_input = self.processor.audio_file_to_array(audio_file_path) audio_input = self.processor.audio_file_to_array(
audio_file_path
)
sample_rate = audio_input.sampling_rate sample_rate = audio_input.sampling_rate
len(audio_input.array) / sample_rate len(audio_input.array) / sample_rate
chunks = [ chunks = [
@ -139,7 +146,9 @@ class DistilWhisperModel:
return_tensors="pt", return_tensors="pt",
padding=True, padding=True,
) )
processed_inputs = processed_inputs.input_values.to(self.device) processed_inputs = processed_inputs.input_values.to(
self.device
)
# Generate transcription for the chunk # Generate transcription for the chunk
logits = self.model.generate(processed_inputs) logits = self.model.generate(processed_inputs)
@ -157,4 +166,6 @@ class DistilWhisperModel:
time.sleep(chunk_duration) time.sleep(chunk_duration)
except Exception as e: except Exception as e:
print(colored(f"An error occurred during transcription: {e}", "red")) print(
colored(f"An error occurred during transcription: {e}", "red")
)

@ -79,7 +79,9 @@ class ElevenLabsText2SpeechTool(BaseTool):
f.write(speech) f.write(speech)
return f.name return f.name
except Exception as e: except Exception as e:
raise RuntimeError(f"Error while running ElevenLabsText2SpeechTool: {e}") raise RuntimeError(
f"Error while running ElevenLabsText2SpeechTool: {e}"
)
def play(self, speech_file: str) -> None: def play(self, speech_file: str) -> None:
"""Play the text as speech.""" """Play the text as speech."""
@ -93,7 +95,9 @@ class ElevenLabsText2SpeechTool(BaseTool):
"""Stream the text as speech as it is generated. """Stream the text as speech as it is generated.
Play the text in your speakers.""" Play the text in your speakers."""
elevenlabs = _import_elevenlabs() elevenlabs = _import_elevenlabs()
speech_stream = elevenlabs.generate(text=query, model=self.model, stream=True) speech_stream = elevenlabs.generate(
text=query, model=self.model, stream=True
)
elevenlabs.stream(speech_stream) elevenlabs.stream(speech_stream)
def save(self, speech_file: str, path: str) -> None: def save(self, speech_file: str, path: str) -> None:

@ -10,7 +10,9 @@ from pydantic import BaseModel, StrictFloat, StrictInt, validator
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load the classes for image classification # Load the classes for image classification
with open(os.path.join(os.path.dirname(__file__), "fast_vit_classes.json")) as f: with open(
os.path.join(os.path.dirname(__file__), "fast_vit_classes.json")
) as f:
FASTVIT_IMAGENET_1K_CLASSES = json.load(f) FASTVIT_IMAGENET_1K_CLASSES = json.load(f)
@ -20,7 +22,9 @@ class ClassificationResult(BaseModel):
@validator("class_id", "confidence", pre=True, each_item=True) @validator("class_id", "confidence", pre=True, each_item=True)
def check_list_contents(cls, v): def check_list_contents(cls, v):
assert isinstance(v, int) or isinstance(v, float), "must be integer or float" assert isinstance(v, int) or isinstance(
v, float
), "must be integer or float"
return v return v
@ -50,7 +54,9 @@ class FastViT:
"hf_hub:timm/fastvit_s12.apple_in1k", pretrained=True "hf_hub:timm/fastvit_s12.apple_in1k", pretrained=True
).to(DEVICE) ).to(DEVICE)
data_config = timm.data.resolve_model_data_config(self.model) data_config = timm.data.resolve_model_data_config(self.model)
self.transforms = timm.data.create_transform(**data_config, is_training=False) self.transforms = timm.data.create_transform(
**data_config, is_training=False
)
self.model.eval() self.model.eval()
def __call__( def __call__(

@ -46,7 +46,9 @@ class Fuyu:
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_path) self.tokenizer = AutoTokenizer.from_pretrained(pretrained_path)
self.image_processor = FuyuImageProcessor() self.image_processor = FuyuImageProcessor()
self.processor = FuyuProcessor( self.processor = FuyuProcessor(
image_processor=self.image_processor, tokenizer=self.tokenizer, **kwargs image_processor=self.image_processor,
tokenizer=self.tokenizer,
**kwargs,
) )
self.model = FuyuForCausalLM.from_pretrained( self.model = FuyuForCausalLM.from_pretrained(
pretrained_path, pretrained_path,
@ -69,8 +71,12 @@ class Fuyu:
for k, v in model_inputs.items(): for k, v in model_inputs.items():
model_inputs[k] = v.to(self.device_map) model_inputs[k] = v.to(self.device_map)
output = self.model.generate(**model_inputs, max_new_tokens=self.max_new_tokens) output = self.model.generate(
text = self.processor.batch_decode(output[:, -7:], skip_special_tokens=True) **model_inputs, max_new_tokens=self.max_new_tokens
)
text = self.processor.batch_decode(
output[:, -7:], skip_special_tokens=True
)
return print(str(text)) return print(str(text))
def get_img_from_web(self, img_url: str): def get_img_from_web(self, img_url: str):

@ -190,12 +190,15 @@ class GPT4Vision:
"""Process a batch of tasks and images""" """Process a batch of tasks and images"""
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [ futures = [
executor.submit(self.run, task, img) for task, img in tasks_images executor.submit(self.run, task, img)
for task, img in tasks_images
] ]
results = [future.result() for future in futures] results = [future.result() for future in futures]
return results return results
async def run_batch_async(self, tasks_images: List[Tuple[str, str]]) -> List[str]: async def run_batch_async(
self, tasks_images: List[Tuple[str, str]]
) -> List[str]:
"""Process a batch of tasks and images asynchronously""" """Process a batch of tasks and images asynchronously"""
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
futures = [ futures = [

@ -133,7 +133,9 @@ class HuggingfaceLLM:
): ):
self.logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)
self.device = ( self.device = (
device if device else ("cuda" if torch.cuda.is_available() else "cpu") device
if device
else ("cuda" if torch.cuda.is_available() else "cpu")
) )
self.model_id = model_id self.model_id = model_id
self.max_length = max_length self.max_length = max_length
@ -178,7 +180,11 @@ class HuggingfaceLLM:
except Exception as e: except Exception as e:
# self.logger.error(f"Failed to load the model or the tokenizer: {e}") # self.logger.error(f"Failed to load the model or the tokenizer: {e}")
# raise # raise
print(colored(f"Failed to load the model and or the tokenizer: {e}", "red")) print(
colored(
f"Failed to load the model and or the tokenizer: {e}", "red"
)
)
def print_error(self, error: str): def print_error(self, error: str):
"""Print error""" """Print error"""
@ -207,12 +213,16 @@ class HuggingfaceLLM:
if self.distributed: if self.distributed:
self.model = DDP(self.model) self.model = DDP(self.model)
except Exception as error: except Exception as error:
self.logger.error(f"Failed to load the model or the tokenizer: {error}") self.logger.error(
f"Failed to load the model or the tokenizer: {error}"
)
raise raise
def concurrent_run(self, tasks: List[str], max_workers: int = 5): def concurrent_run(self, tasks: List[str], max_workers: int = 5):
"""Concurrently generate text for a list of prompts.""" """Concurrently generate text for a list of prompts."""
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: with concurrent.futures.ThreadPoolExecutor(
max_workers=max_workers
) as executor:
results = list(executor.map(self.run, tasks)) results = list(executor.map(self.run, tasks))
return results return results
@ -220,7 +230,8 @@ class HuggingfaceLLM:
"""Process a batch of tasks and images""" """Process a batch of tasks and images"""
with concurrent.futures.ThreadPoolExecutor() as executor: with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [ futures = [
executor.submit(self.run, task, img) for task, img in tasks_images executor.submit(self.run, task, img)
for task, img in tasks_images
] ]
results = [future.result() for future in futures] results = [future.result() for future in futures]
return results return results
@ -243,7 +254,9 @@ class HuggingfaceLLM:
self.print_dashboard(task) self.print_dashboard(task)
try: try:
inputs = self.tokenizer.encode(task, return_tensors="pt").to(self.device) inputs = self.tokenizer.encode(task, return_tensors="pt").to(
self.device
)
# self.log.start() # self.log.start()
@ -279,8 +292,8 @@ class HuggingfaceLLM:
print( print(
colored( colored(
( (
f"HuggingfaceLLM could not generate text because of error: {e}," "HuggingfaceLLM could not generate text because of"
" try optimizing your arguments" f" error: {e}, try optimizing your arguments"
), ),
"red", "red",
) )
@ -305,7 +318,9 @@ class HuggingfaceLLM:
self.print_dashboard(task) self.print_dashboard(task)
try: try:
inputs = self.tokenizer.encode(task, return_tensors="pt").to(self.device) inputs = self.tokenizer.encode(task, return_tensors="pt").to(
self.device
)
# self.log.start() # self.log.start()

@ -66,7 +66,9 @@ class Idefics:
max_length=100, max_length=100,
): ):
self.device = ( self.device = (
device if device else ("cuda" if torch.cuda.is_available() else "cpu") device
if device
else ("cuda" if torch.cuda.is_available() else "cpu")
) )
self.model = IdeficsForVisionText2Text.from_pretrained( self.model = IdeficsForVisionText2Text.from_pretrained(
checkpoint, checkpoint,

@ -54,7 +54,9 @@ class JinaEmbeddings:
): ):
self.logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)
self.device = ( self.device = (
device if device else ("cuda" if torch.cuda.is_available() else "cpu") device
if device
else ("cuda" if torch.cuda.is_available() else "cpu")
) )
self.model_id = model_id self.model_id = model_id
self.max_length = max_length self.max_length = max_length
@ -83,7 +85,9 @@ class JinaEmbeddings:
try: try:
self.model = AutoModelForCausalLM.from_pretrained( self.model = AutoModelForCausalLM.from_pretrained(
self.model_id, quantization_config=bnb_config, trust_remote_code=True self.model_id,
quantization_config=bnb_config,
trust_remote_code=True,
) )
self.model # .to(self.device) self.model # .to(self.device)
@ -112,7 +116,9 @@ class JinaEmbeddings:
if self.distributed: if self.distributed:
self.model = DDP(self.model) self.model = DDP(self.model)
except Exception as error: except Exception as error:
self.logger.error(f"Failed to load the model or the tokenizer: {error}") self.logger.error(
f"Failed to load the model or the tokenizer: {error}"
)
raise raise
def run(self, task: str): def run(self, task: str):

@ -70,11 +70,13 @@ class Kosmos2(BaseModel):
prompt = "<grounding>An image of" prompt = "<grounding>An image of"
inputs = self.processor(text=prompt, images=image, return_tensors="pt") inputs = self.processor(text=prompt, images=image, return_tensors="pt")
outputs = self.model.generate(**inputs, use_cache=True, max_new_tokens=64) outputs = self.model.generate(
**inputs, use_cache=True, max_new_tokens=64
)
generated_text = self.processor.batch_decode(outputs, skip_special_tokens=True)[ generated_text = self.processor.batch_decode(
0 outputs, skip_special_tokens=True
] )[0]
# The actual processing of generated_text to entities would go here # The actual processing of generated_text to entities would go here
# For the purpose of this example, assume a mock function 'extract_entities' exists: # For the purpose of this example, assume a mock function 'extract_entities' exists:
@ -99,7 +101,9 @@ class Kosmos2(BaseModel):
if not entities: if not entities:
return Detections.empty() return Detections.empty()
class_ids = [0] * len(entities) # Replace with actual class ID extraction logic class_ids = [0] * len(
entities
) # Replace with actual class ID extraction logic
xyxys = [ xyxys = [
( (
e[1][0] * image.width, e[1][0] * image.width,
@ -111,7 +115,9 @@ class Kosmos2(BaseModel):
] ]
confidences = [1.0] * len(entities) # Placeholder confidence confidences = [1.0] * len(entities) # Placeholder confidence
return Detections(xyxy=xyxys, class_id=class_ids, confidence=confidences) return Detections(
xyxy=xyxys, class_id=class_ids, confidence=confidences
)
# Usage: # Usage:

@ -145,12 +145,12 @@ class Kosmos:
elif isinstance(image, torch.Tensor): elif isinstance(image, torch.Tensor):
# pdb.set_trace() # pdb.set_trace()
image_tensor = image.cpu() image_tensor = image.cpu()
reverse_norm_mean = torch.tensor([0.48145466, 0.4578275, 0.40821073])[ reverse_norm_mean = torch.tensor(
:, None, None [0.48145466, 0.4578275, 0.40821073]
] )[:, None, None]
reverse_norm_std = torch.tensor([0.26862954, 0.26130258, 0.27577711])[ reverse_norm_std = torch.tensor(
:, None, None [0.26862954, 0.26130258, 0.27577711]
] )[:, None, None]
image_tensor = image_tensor * reverse_norm_std + reverse_norm_mean image_tensor = image_tensor * reverse_norm_std + reverse_norm_mean
pil_img = T.ToPILImage()(image_tensor) pil_img = T.ToPILImage()(image_tensor)
image_h = pil_img.height image_h = pil_img.height
@ -188,7 +188,11 @@ class Kosmos:
# random color # random color
color = tuple(np.random.randint(0, 255, size=3).tolist()) color = tuple(np.random.randint(0, 255, size=3).tolist())
new_image = cv2.rectangle( new_image = cv2.rectangle(
new_image, (orig_x1, orig_y1), (orig_x2, orig_y2), color, box_line new_image,
(orig_x1, orig_y1),
(orig_x2, orig_y2),
color,
box_line,
) )
l_o, r_o = ( l_o, r_o = (
@ -211,7 +215,10 @@ class Kosmos:
# add text background # add text background
(text_width, text_height), _ = cv2.getTextSize( (text_width, text_height), _ = cv2.getTextSize(
f" {entity_name}", cv2.FONT_HERSHEY_COMPLEX, text_size, text_line f" {entity_name}",
cv2.FONT_HERSHEY_COMPLEX,
text_size,
text_line,
) )
text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2 = ( text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2 = (
x1, x1,
@ -222,7 +229,8 @@ class Kosmos:
for prev_bbox in previous_bboxes: for prev_bbox in previous_bboxes:
while is_overlapping( while is_overlapping(
(text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2), prev_bbox (text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2),
prev_bbox,
): ):
text_bg_y1 += ( text_bg_y1 += (
text_height + text_offset_original + 2 * text_spaces text_height + text_offset_original + 2 * text_spaces
@ -230,14 +238,18 @@ class Kosmos:
text_bg_y2 += ( text_bg_y2 += (
text_height + text_offset_original + 2 * text_spaces text_height + text_offset_original + 2 * text_spaces
) )
y1 += text_height + text_offset_original + 2 * text_spaces y1 += (
text_height + text_offset_original + 2 * text_spaces
)
if text_bg_y2 >= image_h: if text_bg_y2 >= image_h:
text_bg_y1 = max( text_bg_y1 = max(
0, 0,
image_h image_h
- ( - (
text_height + text_offset_original + 2 * text_spaces text_height
+ text_offset_original
+ 2 * text_spaces
), ),
) )
text_bg_y2 = image_h text_bg_y2 = image_h
@ -270,7 +282,9 @@ class Kosmos:
cv2.LINE_AA, cv2.LINE_AA,
) )
# previous_locations.append((x1, y1)) # previous_locations.append((x1, y1))
previous_bboxes.append((text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2)) previous_bboxes.append(
(text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2)
)
pil_image = Image.fromarray(new_image[:, :, [2, 1, 0]]) pil_image = Image.fromarray(new_image[:, :, [2, 1, 0]])
if save_path: if save_path:

@ -121,7 +121,11 @@ class LlamaFunctionCaller:
) )
def add_func( def add_func(
self, name: str, function: Callable, description: str, arguments: List[Dict] self,
name: str,
function: Callable,
description: str,
arguments: List[Dict],
): ):
""" """
Adds a new function to the LlamaFunctionCaller. Adds a new function to the LlamaFunctionCaller.
@ -172,12 +176,17 @@ class LlamaFunctionCaller:
if self.streaming: if self.streaming:
out = self.model.generate( out = self.model.generate(
**inputs, streamer=streamer, max_new_tokens=self.max_tokens, **kwargs **inputs,
streamer=streamer,
max_new_tokens=self.max_tokens,
**kwargs,
) )
return out return out
else: else:
out = self.model.generate(**inputs, max_length=self.max_tokens, **kwargs) out = self.model.generate(
**inputs, max_length=self.max_tokens, **kwargs
)
# return self.tokenizer.decode(out[0], skip_special_tokens=True) # return self.tokenizer.decode(out[0], skip_special_tokens=True)
return out return out

@ -49,7 +49,9 @@ class Mistral:
# Check if the specified device is available # Check if the specified device is available
if not torch.cuda.is_available() and device == "cuda": if not torch.cuda.is_available() and device == "cuda":
raise ValueError("CUDA is not available. Please choose a different device.") raise ValueError(
"CUDA is not available. Please choose a different device."
)
# Load the model and tokenizer # Load the model and tokenizer
self.model = None self.model = None
@ -70,7 +72,9 @@ class Mistral:
"""Run the model on a given task.""" """Run the model on a given task."""
try: try:
model_inputs = self.tokenizer([task], return_tensors="pt").to(self.device) model_inputs = self.tokenizer([task], return_tensors="pt").to(
self.device
)
generated_ids = self.model.generate( generated_ids = self.model.generate(
**model_inputs, **model_inputs,
max_length=self.max_length, max_length=self.max_length,
@ -87,7 +91,9 @@ class Mistral:
"""Run the model on a given task.""" """Run the model on a given task."""
try: try:
model_inputs = self.tokenizer([task], return_tensors="pt").to(self.device) model_inputs = self.tokenizer([task], return_tensors="pt").to(
self.device
)
generated_ids = self.model.generate( generated_ids = self.model.generate(
**model_inputs, **model_inputs,
max_length=self.max_length, max_length=self.max_length,

@ -29,7 +29,9 @@ class MPT7B:
""" """
def __init__(self, model_name: str, tokenizer_name: str, max_tokens: int = 100): def __init__(
self, model_name: str, tokenizer_name: str, max_tokens: int = 100
):
# Loading model and tokenizer details # Loading model and tokenizer details
self.model_name = model_name self.model_name = model_name
self.tokenizer_name = tokenizer_name self.tokenizer_name = tokenizer_name
@ -118,7 +120,10 @@ class MPT7B:
""" """
with torch.autocast("cuda", dtype=torch.bfloat16): with torch.autocast("cuda", dtype=torch.bfloat16):
return self.pipe( return self.pipe(
prompt, max_new_tokens=self.max_tokens, do_sample=True, use_cache=True prompt,
max_new_tokens=self.max_tokens,
do_sample=True,
use_cache=True,
)[0]["generated_text"] )[0]["generated_text"]
async def generate_async(self, prompt: str) -> str: async def generate_async(self, prompt: str) -> str:

@ -41,8 +41,12 @@ class Nougat:
self.min_length = min_length self.min_length = min_length
self.max_new_tokens = max_new_tokens self.max_new_tokens = max_new_tokens
self.processor = NougatProcessor.from_pretrained(self.model_name_or_path) self.processor = NougatProcessor.from_pretrained(
self.model = VisionEncoderDecoderModel.from_pretrained(self.model_name_or_path) self.model_name_or_path
)
self.model = VisionEncoderDecoderModel.from_pretrained(
self.model_name_or_path
)
self.device = "cuda" if torch.cuda.is_available() else "cpu" self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.model.to(self.device) self.model.to(self.device)
@ -63,8 +67,12 @@ class Nougat:
max_new_tokens=self.max_new_tokens, max_new_tokens=self.max_new_tokens,
) )
sequence = self.processor.batch_decode(outputs, skip_special_tokens=True)[0] sequence = self.processor.batch_decode(
sequence = self.processor.post_process_generation(sequence, fix_markdown=False) outputs, skip_special_tokens=True
)[0]
sequence = self.processor.post_process_generation(
sequence, fix_markdown=False
)
out = print(sequence) out = print(sequence)
return out return out

@ -43,7 +43,9 @@ def get_pydantic_field_names(cls: Any) -> Set[str]:
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: def _create_retry_decorator(
embeddings: OpenAIEmbeddings,
) -> Callable[[Any], Any]:
import llm import llm
min_seconds = 4 min_seconds = 4
@ -118,7 +120,9 @@ def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any:
return _embed_with_retry(**kwargs) return _embed_with_retry(**kwargs)
async def async_embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: async def async_embed_with_retry(
embeddings: OpenAIEmbeddings, **kwargs: Any
) -> Any:
"""Use tenacity to retry the embedding call.""" """Use tenacity to retry the embedding call."""
@_async_retry_decorator(embeddings) @_async_retry_decorator(embeddings)
@ -172,7 +176,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
client: Any #: :meta private: client: Any #: :meta private:
model: str = "text-embedding-ada-002" model: str = "text-embedding-ada-002"
deployment: str = model # to support Azure OpenAI Service custom deployment names deployment: str = (
model # to support Azure OpenAI Service custom deployment names
)
openai_api_version: Optional[str] = None openai_api_version: Optional[str] = None
# to support Azure OpenAI Service custom endpoints # to support Azure OpenAI Service custom endpoints
openai_api_base: Optional[str] = None openai_api_base: Optional[str] = None
@ -229,11 +235,14 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
) )
extra[field_name] = values.pop(field_name) extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) invalid_model_kwargs = all_required_field_names.intersection(
extra.keys()
)
if invalid_model_kwargs: if invalid_model_kwargs:
raise ValueError( raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Parameters {invalid_model_kwargs} should be specified"
"Instead they were passed in as part of `model_kwargs` parameter." " explicitly. Instead they were passed in as part of"
" `model_kwargs` parameter."
) )
values["model_kwargs"] = extra values["model_kwargs"] = extra
@ -333,7 +342,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
try: try:
encoding = tiktoken.encoding_for_model(model_name) encoding = tiktoken.encoding_for_model(model_name)
except KeyError: except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.") logger.warning(
"Warning: model not found. Using cl100k_base encoding."
)
model = "cl100k_base" model = "cl100k_base"
encoding = tiktoken.get_encoding(model) encoding = tiktoken.get_encoding(model)
for i, text in enumerate(texts): for i, text in enumerate(texts):
@ -384,11 +395,11 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
self, self,
input="", input="",
**self._invocation_params, **self._invocation_params,
)[ )["data"][0]["embedding"]
"data"
][0]["embedding"]
else: else:
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) average = np.average(
_result, axis=0, weights=num_tokens_in_batch[i]
)
embeddings[i] = (average / np.linalg.norm(average)).tolist() embeddings[i] = (average / np.linalg.norm(average)).tolist()
return embeddings return embeddings
@ -414,7 +425,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
try: try:
encoding = tiktoken.encoding_for_model(model_name) encoding = tiktoken.encoding_for_model(model_name)
except KeyError: except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.") logger.warning(
"Warning: model not found. Using cl100k_base encoding."
)
model = "cl100k_base" model = "cl100k_base"
encoding = tiktoken.get_encoding(model) encoding = tiktoken.get_encoding(model)
for i, text in enumerate(texts): for i, text in enumerate(texts):
@ -458,7 +471,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
) )
)["data"][0]["embedding"] )["data"][0]["embedding"]
else: else:
average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) average = np.average(
_result, axis=0, weights=num_tokens_in_batch[i]
)
embeddings[i] = (average / np.linalg.norm(average)).tolist() embeddings[i] = (average / np.linalg.norm(average)).tolist()
return embeddings return embeddings
@ -495,7 +510,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings):
""" """
# NOTE: to keep things simple, we assume the list may contain texts longer # NOTE: to keep things simple, we assume the list may contain texts longer
# than the maximum context and use length-safe embedding function. # than the maximum context and use length-safe embedding function.
return await self._aget_len_safe_embeddings(texts, engine=self.deployment) return await self._aget_len_safe_embeddings(
texts, engine=self.deployment
)
def embed_query(self, text: str) -> List[float]: def embed_query(self, text: str) -> List[float]:
"""Call out to OpenAI's embedding endpoint for embedding query text. """Call out to OpenAI's embedding endpoint for embedding query text.

@ -146,7 +146,8 @@ class OpenAIFunctionCaller:
self.messages.append({"role": role, "content": content}) self.messages.append({"role": role, "content": content})
@retry( @retry(
wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3) wait=wait_random_exponential(multiplier=1, max=40),
stop=stop_after_attempt(3),
) )
def chat_completion_request( def chat_completion_request(
self, self,
@ -194,17 +195,22 @@ class OpenAIFunctionCaller:
elif message["role"] == "user": elif message["role"] == "user":
print( print(
colored( colored(
f"user: {message['content']}\n", role_to_color[message["role"]] f"user: {message['content']}\n",
role_to_color[message["role"]],
) )
) )
elif message["role"] == "assistant" and message.get("function_call"): elif message["role"] == "assistant" and message.get(
"function_call"
):
print( print(
colored( colored(
f"assistant: {message['function_call']}\n", f"assistant: {message['function_call']}\n",
role_to_color[message["role"]], role_to_color[message["role"]],
) )
) )
elif message["role"] == "assistant" and not message.get("function_call"): elif message["role"] == "assistant" and not message.get(
"function_call"
):
print( print(
colored( colored(
f"assistant: {message['content']}\n", f"assistant: {message['content']}\n",

@ -62,19 +62,25 @@ def _stream_response_to_generation_chunk(
return GenerationChunk( return GenerationChunk(
text=stream_response["choices"][0]["text"], text=stream_response["choices"][0]["text"],
generation_info=dict( generation_info=dict(
finish_reason=stream_response["choices"][0].get("finish_reason", None), finish_reason=stream_response["choices"][0].get(
"finish_reason", None
),
logprobs=stream_response["choices"][0].get("logprobs", None), logprobs=stream_response["choices"][0].get("logprobs", None),
), ),
) )
def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None: def _update_response(
response: Dict[str, Any], stream_response: Dict[str, Any]
) -> None:
"""Update response from the stream response.""" """Update response from the stream response."""
response["choices"][0]["text"] += stream_response["choices"][0]["text"] response["choices"][0]["text"] += stream_response["choices"][0]["text"]
response["choices"][0]["finish_reason"] = stream_response["choices"][0].get( response["choices"][0]["finish_reason"] = stream_response["choices"][0].get(
"finish_reason", None "finish_reason", None
) )
response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"] response["choices"][0]["logprobs"] = stream_response["choices"][0][
"logprobs"
]
def _streaming_response_template() -> Dict[str, Any]: def _streaming_response_template() -> Dict[str, Any]:
@ -315,9 +321,11 @@ class BaseOpenAI(BaseLLM):
chunk.text, chunk.text,
chunk=chunk, chunk=chunk,
verbose=self.verbose, verbose=self.verbose,
logprobs=chunk.generation_info["logprobs"] logprobs=(
if chunk.generation_info chunk.generation_info["logprobs"]
else None, if chunk.generation_info
else None
),
) )
async def _astream( async def _astream(
@ -339,9 +347,11 @@ class BaseOpenAI(BaseLLM):
chunk.text, chunk.text,
chunk=chunk, chunk=chunk,
verbose=self.verbose, verbose=self.verbose,
logprobs=chunk.generation_info["logprobs"] logprobs=(
if chunk.generation_info chunk.generation_info["logprobs"]
else None, if chunk.generation_info
else None
),
) )
def _generate( def _generate(
@ -377,10 +387,14 @@ class BaseOpenAI(BaseLLM):
for _prompts in sub_prompts: for _prompts in sub_prompts:
if self.streaming: if self.streaming:
if len(_prompts) > 1: if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.") raise ValueError(
"Cannot stream results with multiple prompts."
)
generation: Optional[GenerationChunk] = None generation: Optional[GenerationChunk] = None
for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs): for chunk in self._stream(
_prompts[0], stop, run_manager, **kwargs
):
if generation is None: if generation is None:
generation = chunk generation = chunk
else: else:
@ -389,12 +403,16 @@ class BaseOpenAI(BaseLLM):
choices.append( choices.append(
{ {
"text": generation.text, "text": generation.text,
"finish_reason": generation.generation_info.get("finish_reason") "finish_reason": (
if generation.generation_info generation.generation_info.get("finish_reason")
else None, if generation.generation_info
"logprobs": generation.generation_info.get("logprobs") else None
if generation.generation_info ),
else None, "logprobs": (
generation.generation_info.get("logprobs")
if generation.generation_info
else None
),
} }
) )
else: else:
@ -424,7 +442,9 @@ class BaseOpenAI(BaseLLM):
for _prompts in sub_prompts: for _prompts in sub_prompts:
if self.streaming: if self.streaming:
if len(_prompts) > 1: if len(_prompts) > 1:
raise ValueError("Cannot stream results with multiple prompts.") raise ValueError(
"Cannot stream results with multiple prompts."
)
generation: Optional[GenerationChunk] = None generation: Optional[GenerationChunk] = None
async for chunk in self._astream( async for chunk in self._astream(
@ -438,12 +458,16 @@ class BaseOpenAI(BaseLLM):
choices.append( choices.append(
{ {
"text": generation.text, "text": generation.text,
"finish_reason": generation.generation_info.get("finish_reason") "finish_reason": (
if generation.generation_info generation.generation_info.get("finish_reason")
else None, if generation.generation_info
"logprobs": generation.generation_info.get("logprobs") else None
if generation.generation_info ),
else None, "logprobs": (
generation.generation_info.get("logprobs")
if generation.generation_info
else None
),
} }
) )
else: else:
@ -463,7 +487,9 @@ class BaseOpenAI(BaseLLM):
"""Get the sub prompts for llm call.""" """Get the sub prompts for llm call."""
if stop is not None: if stop is not None:
if "stop" in params: if "stop" in params:
raise ValueError("`stop` found in both the input and default params.") raise ValueError(
"`stop` found in both the input and default params."
)
params["stop"] = stop params["stop"] = stop
if params["max_tokens"] == -1: if params["max_tokens"] == -1:
if len(prompts) != 1: if len(prompts) != 1:
@ -541,7 +567,9 @@ class BaseOpenAI(BaseLLM):
try: try:
enc = tiktoken.encoding_for_model(model_name) enc = tiktoken.encoding_for_model(model_name)
except KeyError: except KeyError:
logger.warning("Warning: model not found. Using cl100k_base encoding.") logger.warning(
"Warning: model not found. Using cl100k_base encoding."
)
model = "cl100k_base" model = "cl100k_base"
enc = tiktoken.get_encoding(model) enc = tiktoken.get_encoding(model)
@ -602,8 +630,9 @@ class BaseOpenAI(BaseLLM):
if context_size is None: if context_size is None:
raise ValueError( raise ValueError(
f"Unknown model: {modelname}. Please provide a valid OpenAI model name." f"Unknown model: {modelname}. Please provide a valid OpenAI"
"Known models are: " + ", ".join(model_token_mapping.keys()) " model name.Known models are: "
+ ", ".join(model_token_mapping.keys())
) )
return context_size return context_size
@ -753,7 +782,9 @@ class OpenAIChat(BaseLLM):
@root_validator(pre=True) @root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in.""" """Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()} all_required_field_names = {
field.alias for field in cls.__fields__.values()
}
extra = values.get("model_kwargs", {}) extra = values.get("model_kwargs", {})
for field_name in list(values): for field_name in list(values):
@ -820,13 +851,21 @@ class OpenAIChat(BaseLLM):
) -> Tuple: ) -> Tuple:
if len(prompts) > 1: if len(prompts) > 1:
raise ValueError( raise ValueError(
f"OpenAIChat currently only supports single prompt, got {prompts}" "OpenAIChat currently only supports single prompt, got"
f" {prompts}"
) )
messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}] messages = self.prefix_messages + [
params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params} {"role": "user", "content": prompts[0]}
]
params: Dict[str, Any] = {
**{"model": self.model_name},
**self._default_params,
}
if stop is not None: if stop is not None:
if "stop" in params: if "stop" in params:
raise ValueError("`stop` found in both the input and default params.") raise ValueError(
"`stop` found in both the input and default params."
)
params["stop"] = stop params["stop"] = stop
if params.get("max_tokens") == -1: if params.get("max_tokens") == -1:
# for ChatGPT api, omitting max_tokens is equivalent to having no limit # for ChatGPT api, omitting max_tokens is equivalent to having no limit
@ -897,7 +936,11 @@ class OpenAIChat(BaseLLM):
} }
return LLMResult( return LLMResult(
generations=[ generations=[
[Generation(text=full_response["choices"][0]["message"]["content"])] [
Generation(
text=full_response["choices"][0]["message"]["content"]
)
]
], ],
llm_output=llm_output, llm_output=llm_output,
) )
@ -911,7 +954,9 @@ class OpenAIChat(BaseLLM):
) -> LLMResult: ) -> LLMResult:
if self.streaming: if self.streaming:
generation: Optional[GenerationChunk] = None generation: Optional[GenerationChunk] = None
async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs): async for chunk in self._astream(
prompts[0], stop, run_manager, **kwargs
):
if generation is None: if generation is None:
generation = chunk generation = chunk
else: else:
@ -930,7 +975,11 @@ class OpenAIChat(BaseLLM):
} }
return LLMResult( return LLMResult(
generations=[ generations=[
[Generation(text=full_response["choices"][0]["message"]["content"])] [
Generation(
text=full_response["choices"][0]["message"]["content"]
)
]
], ],
llm_output=llm_output, llm_output=llm_output,
) )

@ -37,10 +37,16 @@ def _create_retry_decorator() -> Callable[[Any], Any]:
return retry( return retry(
reraise=True, reraise=True,
stop=stop_after_attempt(max_retries), stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds), wait=wait_exponential(
multiplier=multiplier, min=min_seconds, max=max_seconds
),
retry=( retry=(
retry_if_exception_type(google.api_core.exceptions.ResourceExhausted) retry_if_exception_type(
| retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable) google.api_core.exceptions.ResourceExhausted
)
| retry_if_exception_type(
google.api_core.exceptions.ServiceUnavailable
)
| retry_if_exception_type(google.api_core.exceptions.GoogleAPIError) | retry_if_exception_type(google.api_core.exceptions.GoogleAPIError)
), ),
before_sleep=before_sleep_log(logger, logging.WARNING), before_sleep=before_sleep_log(logger, logging.WARNING),
@ -64,7 +70,9 @@ def _strip_erroneous_leading_spaces(text: str) -> str:
The PaLM API will sometimes erroneously return a single leading space in all The PaLM API will sometimes erroneously return a single leading space in all
lines > 1. This function strips that space. lines > 1. This function strips that space.
""" """
has_leading_space = all(not line or line[0] == " " for line in text.split("\n")[1:]) has_leading_space = all(
not line or line[0] == " " for line in text.split("\n")[1:]
)
if has_leading_space: if has_leading_space:
return text.replace("\n ", "\n") return text.replace("\n ", "\n")
else: else:
@ -112,7 +120,10 @@ class GooglePalm(BaseLLM, BaseModel):
values["client"] = genai values["client"] = genai
if values["temperature"] is not None and not 0 <= values["temperature"] <= 1: if (
values["temperature"] is not None
and not 0 <= values["temperature"] <= 1
):
raise ValueError("temperature must be in the range [0.0, 1.0]") raise ValueError("temperature must be in the range [0.0, 1.0]")
if values["top_p"] is not None and not 0 <= values["top_p"] <= 1: if values["top_p"] is not None and not 0 <= values["top_p"] <= 1:
@ -121,7 +132,10 @@ class GooglePalm(BaseLLM, BaseModel):
if values["top_k"] is not None and values["top_k"] <= 0: if values["top_k"] is not None and values["top_k"] <= 0:
raise ValueError("top_k must be positive") raise ValueError("top_k must be positive")
if values["max_output_tokens"] is not None and values["max_output_tokens"] <= 0: if (
values["max_output_tokens"] is not None
and values["max_output_tokens"] <= 0
):
raise ValueError("max_output_tokens must be greater than zero") raise ValueError("max_output_tokens must be greater than zero")
return values return values

@ -16,4 +16,6 @@ def get_ada_embeddings(text: str, model: str = "text-embedding-ada-002"):
text = text.replace("\n", " ") text = text.replace("\n", " ")
return client.embeddings.create(input=[text], model=model)["data"][0]["embedding"] return client.embeddings.create(input=[text], model=model)["data"][0][
"embedding"
]

@ -90,7 +90,9 @@ class SpeechT5:
self.processor = SpeechT5Processor.from_pretrained(self.model_name) self.processor = SpeechT5Processor.from_pretrained(self.model_name)
self.model = SpeechT5ForTextToSpeech.from_pretrained(self.model_name) self.model = SpeechT5ForTextToSpeech.from_pretrained(self.model_name)
self.vocoder = SpeechT5HifiGan.from_pretrained(self.vocoder_name) self.vocoder = SpeechT5HifiGan.from_pretrained(self.vocoder_name)
self.embeddings_dataset = load_dataset(self.dataset_name, split="validation") self.embeddings_dataset = load_dataset(
self.dataset_name, split="validation"
)
def __call__(self, text: str, speaker_id: float = 7306): def __call__(self, text: str, speaker_id: float = 7306):
"""Call the model on some text and return the speech.""" """Call the model on some text and return the speech."""
@ -121,7 +123,9 @@ class SpeechT5:
def set_embeddings_dataset(self, dataset_name): def set_embeddings_dataset(self, dataset_name):
"""Set the embeddings dataset to a new dataset.""" """Set the embeddings dataset to a new dataset."""
self.dataset_name = dataset_name self.dataset_name = dataset_name
self.embeddings_dataset = load_dataset(self.dataset_name, split="validation") self.embeddings_dataset = load_dataset(
self.dataset_name, split="validation"
)
# Feature 1: Get sampling rate # Feature 1: Get sampling rate
def get_sampling_rate(self): def get_sampling_rate(self):

@ -141,8 +141,8 @@ class SSD1B:
print( print(
colored( colored(
( (
f"Error running SSD1B: {error} try optimizing your api key and" f"Error running SSD1B: {error} try optimizing your api"
" or try again" " key and or try again"
), ),
"red", "red",
) )
@ -167,8 +167,7 @@ class SSD1B:
"""Print the SSD1B dashboard""" """Print the SSD1B dashboard"""
print( print(
colored( colored(
( f"""SSD1B Dashboard:
f"""SSD1B Dashboard:
-------------------- --------------------
Model: {self.model} Model: {self.model}
@ -184,13 +183,14 @@ class SSD1B:
-------------------- --------------------
""" """,
),
"green", "green",
) )
) )
def process_batch_concurrently(self, tasks: List[str], max_workers: int = 5): def process_batch_concurrently(
self, tasks: List[str], max_workers: int = 5
):
""" """
Process a batch of tasks concurrently Process a batch of tasks concurrently
@ -211,8 +211,12 @@ class SSD1B:
>>> print(results) >>> print(results)
""" """
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: with concurrent.futures.ThreadPoolExecutor(
future_to_task = {executor.submit(self, task): task for task in tasks} max_workers=max_workers
) as executor:
future_to_task = {
executor.submit(self, task): task for task in tasks
}
results = [] results = []
for future in concurrent.futures.as_completed(future_to_task): for future in concurrent.futures.as_completed(future_to_task):
task = future_to_task[future] task = future_to_task[future]
@ -225,13 +229,17 @@ class SSD1B:
print( print(
colored( colored(
( (
f"Error running SSD1B: {error} try optimizing your api key and" f"Error running SSD1B: {error} try optimizing"
" or try again" " your api key and or try again"
), ),
"red", "red",
) )
) )
print(colored(f"Error running SSD1B: {error.http_status}", "red")) print(
colored(
f"Error running SSD1B: {error.http_status}", "red"
)
)
print(colored(f"Error running SSD1B: {error.error}", "red")) print(colored(f"Error running SSD1B: {error.error}", "red"))
raise error raise error

@ -66,7 +66,9 @@ class WhisperX:
compute_type = "float16" compute_type = "float16"
# 1. Transcribe with original Whisper (batched) 🗣️ # 1. Transcribe with original Whisper (batched) 🗣️
model = whisperx.load_model("large-v2", device, compute_type=compute_type) model = whisperx.load_model(
"large-v2", device, compute_type=compute_type
)
audio = whisperx.load_audio(audio_file) audio = whisperx.load_audio(audio_file)
result = model.transcribe(audio, batch_size=batch_size) result = model.transcribe(audio, batch_size=batch_size)

@ -45,7 +45,9 @@ class WizardLLMStoryTeller:
): ):
self.logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)
self.device = ( self.device = (
device if device else ("cuda" if torch.cuda.is_available() else "cpu") device
if device
else ("cuda" if torch.cuda.is_available() else "cpu")
) )
self.model_id = model_id self.model_id = model_id
self.max_length = max_length self.max_length = max_length
@ -101,7 +103,9 @@ class WizardLLMStoryTeller:
if self.distributed: if self.distributed:
self.model = DDP(self.model) self.model = DDP(self.model)
except Exception as error: except Exception as error:
self.logger.error(f"Failed to load the model or the tokenizer: {error}") self.logger.error(
f"Failed to load the model or the tokenizer: {error}"
)
raise raise
def run(self, prompt_text: str): def run(self, prompt_text: str):

@ -45,7 +45,9 @@ class YarnMistral128:
): ):
self.logger = logging.getLogger(__name__) self.logger = logging.getLogger(__name__)
self.device = ( self.device = (
device if device else ("cuda" if torch.cuda.is_available() else "cpu") device
if device
else ("cuda" if torch.cuda.is_available() else "cpu")
) )
self.model_id = model_id self.model_id = model_id
self.max_length = max_length self.max_length = max_length
@ -106,7 +108,9 @@ class YarnMistral128:
if self.distributed: if self.distributed:
self.model = DDP(self.model) self.model = DDP(self.model)
except Exception as error: except Exception as error:
self.logger.error(f"Failed to load the model or the tokenizer: {error}") self.logger.error(
f"Failed to load the model or the tokenizer: {error}"
)
raise raise
def run(self, prompt_text: str): def run(self, prompt_text: str):

@ -15,7 +15,9 @@ class PromptGenerator:
"thoughts": { "thoughts": {
"text": "thought", "text": "thought",
"reasoning": "reasoning", "reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan", "plan": (
"- short bulleted\n- list that conveys\n- long-term plan"
),
"criticism": "constructive self-criticism", "criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user", "speak": "thoughts summary to say to user",
}, },
@ -66,13 +68,11 @@ class PromptGenerator:
""" """
formatted_response_format = json.dumps(self.response_format, indent=4) formatted_response_format = json.dumps(self.response_format, indent=4)
prompt_string = ( prompt_string = (
f"Constraints:\n{''.join(self.constraints)}\n\n" f"Constraints:\n{''.join(self.constraints)}\n\nCommands:\n{''.join(self.commands)}\n\nResources:\n{''.join(self.resources)}\n\nPerformance"
f"Commands:\n{''.join(self.commands)}\n\n" f" Evaluation:\n{''.join(self.performance_evaluation)}\n\nYou"
f"Resources:\n{''.join(self.resources)}\n\n" " should only respond in JSON format as described below \nResponse"
f"Performance Evaluation:\n{''.join(self.performance_evaluation)}\n\n" f" Format: \n{formatted_response_format} \nEnsure the response can"
"You should only respond in JSON format as described below " " be parsed by Python json.loads"
f"\nResponse Format: \n{formatted_response_format} "
"\nEnsure the response can be parsed by Python json.loads"
) )
return prompt_string return prompt_string

@ -5,26 +5,26 @@ def generate_agent_role_prompt(agent):
""" """
prompts = { prompts = {
"Finance Agent": ( "Finance Agent": (
"You are a seasoned finance analyst AI assistant. Your primary goal is to" "You are a seasoned finance analyst AI assistant. Your primary goal"
" compose comprehensive, astute, impartial, and methodically arranged" " is to compose comprehensive, astute, impartial, and methodically"
" financial reports based on provided data and trends." " arranged financial reports based on provided data and trends."
), ),
"Travel Agent": ( "Travel Agent": (
"You are a world-travelled AI tour guide assistant. Your main purpose is to" "You are a world-travelled AI tour guide assistant. Your main"
" draft engaging, insightful, unbiased, and well-structured travel reports" " purpose is to draft engaging, insightful, unbiased, and"
" on given locations, including history, attractions, and cultural" " well-structured travel reports on given locations, including"
" insights." " history, attractions, and cultural insights."
), ),
"Academic Research Agent": ( "Academic Research Agent": (
"You are an AI academic research assistant. Your primary responsibility is" "You are an AI academic research assistant. Your primary"
" to create thorough, academically rigorous, unbiased, and systematically" " responsibility is to create thorough, academically rigorous,"
" organized reports on a given research topic, following the standards of" " unbiased, and systematically organized reports on a given"
" scholarly work." " research topic, following the standards of scholarly work."
), ),
"Default Agent": ( "Default Agent": (
"You are an AI critical thinker research assistant. Your sole purpose is to" "You are an AI critical thinker research assistant. Your sole"
" write well written, critically acclaimed, objective and structured" " purpose is to write well written, critically acclaimed, objective"
" reports on given text." " and structured reports on given text."
), ),
} }
@ -39,12 +39,12 @@ def generate_report_prompt(question, research_summary):
""" """
return ( return (
f'"""{research_summary}""" Using the above information, answer the following' f'"""{research_summary}""" Using the above information, answer the'
f' question or topic: "{question}" in a detailed report -- The report should' f' following question or topic: "{question}" in a detailed report --'
" focus on the answer to the question, should be well structured, informative," " The report should focus on the answer to the question, should be"
" in depth, with facts and numbers if available, a minimum of 1,200 words and" " well structured, informative, in depth, with facts and numbers if"
" with markdown syntax and apa format. Write all source urls at the end of the" " available, a minimum of 1,200 words and with markdown syntax and apa"
" report in apa format" " format. Write all source urls at the end of the report in apa format"
) )
@ -55,9 +55,10 @@ def generate_search_queries_prompt(question):
""" """
return ( return (
"Write 4 google search queries to search online that form an objective opinion" "Write 4 google search queries to search online that form an objective"
f' from the following: "{question}"You must respond with a list of strings in' f' opinion from the following: "{question}"You must respond with a list'
' the following format: ["query 1", "query 2", "query 3", "query 4"]' ' of strings in the following format: ["query 1", "query 2", "query'
' 3", "query 4"]'
) )
@ -73,14 +74,15 @@ def generate_resource_report_prompt(question, research_summary):
""" """
return ( return (
f'"""{research_summary}""" Based on the above information, generate a' f'"""{research_summary}""" Based on the above information, generate a'
" bibliography recommendation report for the following question or topic:" " bibliography recommendation report for the following question or"
f' "{question}". The report should provide a detailed analysis of each' f' topic: "{question}". The report should provide a detailed analysis'
" recommended resource, explaining how each source can contribute to finding" " of each recommended resource, explaining how each source can"
" answers to the research question. Focus on the relevance, reliability, and" " contribute to finding answers to the research question. Focus on the"
" significance of each source. Ensure that the report is well-structured," " relevance, reliability, and significance of each source. Ensure that"
" informative, in-depth, and follows Markdown syntax. Include relevant facts," " the report is well-structured, informative, in-depth, and follows"
" figures, and numbers whenever available. The report should have a minimum" " Markdown syntax. Include relevant facts, figures, and numbers"
" length of 1,200 words." " whenever available. The report should have a minimum length of 1,200"
" words."
) )
@ -92,13 +94,14 @@ def generate_outline_report_prompt(question, research_summary):
""" """
return ( return (
f'"""{research_summary}""" Using the above information, generate an outline for' f'"""{research_summary}""" Using the above information, generate an'
" a research report in Markdown syntax for the following question or topic:" " outline for a research report in Markdown syntax for the following"
f' "{question}". The outline should provide a well-structured framework for the' f' question or topic: "{question}". The outline should provide a'
" research report, including the main sections, subsections, and key points to" " well-structured framework for the research report, including the"
" be covered. The research report should be detailed, informative, in-depth," " main sections, subsections, and key points to be covered. The"
" and a minimum of 1,200 words. Use appropriate Markdown syntax to format the" " research report should be detailed, informative, in-depth, and a"
" outline and ensure readability." " minimum of 1,200 words. Use appropriate Markdown syntax to format"
" the outline and ensure readability."
) )
@ -110,11 +113,12 @@ def generate_concepts_prompt(question, research_summary):
""" """
return ( return (
f'"""{research_summary}""" Using the above information, generate a list of 5' f'"""{research_summary}""" Using the above information, generate a list'
" main concepts to learn for a research report on the following question or" " of 5 main concepts to learn for a research report on the following"
f' topic: "{question}". The outline should provide a well-structured' f' question or topic: "{question}". The outline should provide a'
" frameworkYou must respond with a list of strings in the following format:" " well-structured frameworkYou must respond with a list of strings in"
' ["concepts 1", "concepts 2", "concepts 3", "concepts 4, concepts 5"]' ' the following format: ["concepts 1", "concepts 2", "concepts 3",'
' "concepts 4, concepts 5"]'
) )
@ -128,10 +132,10 @@ def generate_lesson_prompt(concept):
""" """
prompt = ( prompt = (
f"generate a comprehensive lesson about {concept} in Markdown syntax. This" f"generate a comprehensive lesson about {concept} in Markdown syntax."
f" should include the definitionof {concept}, its historical background and" f" This should include the definitionof {concept}, its historical"
" development, its applications or uses in differentfields, and notable events" " background and development, its applications or uses in"
f" or facts related to {concept}." f" differentfields, and notable events or facts related to {concept}."
) )
return prompt return prompt

@ -12,7 +12,9 @@ if TYPE_CHECKING:
def get_buffer_string( def get_buffer_string(
messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI" messages: Sequence[BaseMessage],
human_prefix: str = "Human",
ai_prefix: str = "AI",
) -> str: ) -> str:
"""Convert sequence of Messages to strings and concatenate them into one string. """Convert sequence of Messages to strings and concatenate them into one string.

@ -105,7 +105,9 @@ class ChatMessage(Message):
def get_buffer_string( def get_buffer_string(
messages: Sequence[Message], human_prefix: str = "Human", ai_prefix: str = "AI" messages: Sequence[Message],
human_prefix: str = "Human",
ai_prefix: str = "AI",
) -> str: ) -> str:
string_messages = [] string_messages = []
for m in messages: for m in messages:

@ -1,6 +1,6 @@
ERROR_PROMPT = ( ERROR_PROMPT = (
"An error has occurred for the following text: \n{promptedQuery} Please explain" "An error has occurred for the following text: \n{promptedQuery} Please"
" this error.\n {e}" " explain this error.\n {e}"
) )
IMAGE_PROMPT = """ IMAGE_PROMPT = """

@ -1,16 +1,17 @@
PY_SIMPLE_COMPLETION_INSTRUCTION = "# Write the body of this function only." PY_SIMPLE_COMPLETION_INSTRUCTION = "# Write the body of this function only."
PY_REFLEXION_COMPLETION_INSTRUCTION = ( PY_REFLEXION_COMPLETION_INSTRUCTION = (
"You are a Python writing assistant. You will be given your past function" "You are a Python writing assistant. You will be given your past function"
" implementation, a series of unit tests, and a hint to change the implementation" " implementation, a series of unit tests, and a hint to change the"
" appropriately. Write your full implementation (restate the function" " implementation appropriately. Write your full implementation (restate the"
" signature).\n\n-----" " function signature).\n\n-----"
) )
PY_SELF_REFLECTION_COMPLETION_INSTRUCTION = ( PY_SELF_REFLECTION_COMPLETION_INSTRUCTION = (
"You are a Python writing assistant. You will be given a function implementation" "You are a Python writing assistant. You will be given a function"
" and a series of unit tests. Your goal is to write a few sentences to explain why" " implementation and a series of unit tests. Your goal is to write a few"
" your implementation is wrong as indicated by the tests. You will need this as a" " sentences to explain why your implementation is wrong as indicated by the"
" hint when you try again later. Only provide the few sentence description in your" " tests. You will need this as a hint when you try again later. Only"
" answer, not the implementation.\n\n-----" " provide the few sentence description in your answer, not the"
" implementation.\n\n-----"
) )
USE_PYTHON_CODEBLOCK_INSTRUCTION = ( USE_PYTHON_CODEBLOCK_INSTRUCTION = (
"Use a Python code block to write your response. For" "Use a Python code block to write your response. For"
@ -18,25 +19,26 @@ USE_PYTHON_CODEBLOCK_INSTRUCTION = (
) )
PY_SIMPLE_CHAT_INSTRUCTION = ( PY_SIMPLE_CHAT_INSTRUCTION = (
"You are an AI that only responds with python code, NOT ENGLISH. You will be given" "You are an AI that only responds with python code, NOT ENGLISH. You will"
" a function signature and its docstring by the user. Write your full" " be given a function signature and its docstring by the user. Write your"
" implementation (restate the function signature)." " full implementation (restate the function signature)."
) )
PY_SIMPLE_CHAT_INSTRUCTION_V2 = ( PY_SIMPLE_CHAT_INSTRUCTION_V2 = (
"You are an AI that only responds with only python code. You will be given a" "You are an AI that only responds with only python code. You will be given"
" function signature and its docstring by the user. Write your full implementation" " a function signature and its docstring by the user. Write your full"
" (restate the function signature)." " implementation (restate the function signature)."
) )
PY_REFLEXION_CHAT_INSTRUCTION = ( PY_REFLEXION_CHAT_INSTRUCTION = (
"You are an AI Python assistant. You will be given your past function" "You are an AI Python assistant. You will be given your past function"
" implementation, a series of unit tests, and a hint to change the implementation" " implementation, a series of unit tests, and a hint to change the"
" appropriately. Write your full implementation (restate the function signature)." " implementation appropriately. Write your full implementation (restate the"
" function signature)."
) )
PY_REFLEXION_CHAT_INSTRUCTION_V2 = ( PY_REFLEXION_CHAT_INSTRUCTION_V2 = (
"You are an AI Python assistant. You will be given your previous implementation of" "You are an AI Python assistant. You will be given your previous"
" a function, a series of unit tests results, and your self-reflection on your" " implementation of a function, a series of unit tests results, and your"
" previous implementation. Write your full implementation (restate the function" " self-reflection on your previous implementation. Write your full"
" signature)." " implementation (restate the function signature)."
) )
PY_REFLEXION_FEW_SHOT_ADD = '''Example 1: PY_REFLEXION_FEW_SHOT_ADD = '''Example 1:
[previous impl]: [previous impl]:
@ -172,18 +174,19 @@ END EXAMPLES
''' '''
PY_SELF_REFLECTION_CHAT_INSTRUCTION = ( PY_SELF_REFLECTION_CHAT_INSTRUCTION = (
"You are a Python programming assistant. You will be given a function" "You are a Python programming assistant. You will be given a function"
" implementation and a series of unit tests. Your goal is to write a few sentences" " implementation and a series of unit tests. Your goal is to write a few"
" to explain why your implementation is wrong as indicated by the tests. You will" " sentences to explain why your implementation is wrong as indicated by the"
" need this as a hint when you try again later. Only provide the few sentence" " tests. You will need this as a hint when you try again later. Only"
" description in your answer, not the implementation." " provide the few sentence description in your answer, not the"
" implementation."
) )
PY_SELF_REFLECTION_CHAT_INSTRUCTION_V2 = ( PY_SELF_REFLECTION_CHAT_INSTRUCTION_V2 = (
"You are a Python programming assistant. You will be given a function" "You are a Python programming assistant. You will be given a function"
" implementation and a series of unit test results. Your goal is to write a few" " implementation and a series of unit test results. Your goal is to write a"
" sentences to explain why your implementation is wrong as indicated by the tests." " few sentences to explain why your implementation is wrong as indicated by"
" You will need this as guidance when you try again later. Only provide the few" " the tests. You will need this as guidance when you try again later. Only"
" sentence description in your answer, not the implementation. You will be given a" " provide the few sentence description in your answer, not the"
" few examples by the user." " implementation. You will be given a few examples by the user."
) )
PY_SELF_REFLECTION_FEW_SHOT = """Example 1: PY_SELF_REFLECTION_FEW_SHOT = """Example 1:
[function impl]: [function impl]:

@ -1,23 +1,26 @@
conversation_stages = { conversation_stages = {
"1": ( "1": (
"Introduction: Start the conversation by introducing yourself and your company." "Introduction: Start the conversation by introducing yourself and your"
" Be polite and respectful while keeping the tone of the conversation" " company. Be polite and respectful while keeping the tone of the"
" professional. Your greeting should be welcoming. Always clarify in your" " conversation professional. Your greeting should be welcoming. Always"
" greeting the reason why you are contacting the prospect." " clarify in your greeting the reason why you are contacting the"
" prospect."
), ),
"2": ( "2": (
"Qualification: Qualify the prospect by confirming if they are the right person" "Qualification: Qualify the prospect by confirming if they are the"
" to talk to regarding your product/service. Ensure that they have the" " right person to talk to regarding your product/service. Ensure that"
" authority to make purchasing decisions." " they have the authority to make purchasing decisions."
), ),
"3": ( "3": (
"Value proposition: Briefly explain how your product/service can benefit the" "Value proposition: Briefly explain how your product/service can"
" prospect. Focus on the unique selling points and value proposition of your" " benefit the prospect. Focus on the unique selling points and value"
" product/service that sets it apart from competitors." " proposition of your product/service that sets it apart from"
" competitors."
), ),
"4": ( "4": (
"Needs analysis: Ask open-ended questions to uncover the prospect's needs and" "Needs analysis: Ask open-ended questions to uncover the prospect's"
" pain points. Listen carefully to their responses and take notes." " needs and pain points. Listen carefully to their responses and take"
" notes."
), ),
"5": ( "5": (
"Solution presentation: Based on the prospect's needs, present your" "Solution presentation: Based on the prospect's needs, present your"
@ -29,9 +32,9 @@ conversation_stages = {
" testimonials to support your claims." " testimonials to support your claims."
), ),
"7": ( "7": (
"Close: Ask for the sale by proposing a next step. This could be a demo, a" "Close: Ask for the sale by proposing a next step. This could be a"
" trial or a meeting with decision-makers. Ensure to summarize what has been" " demo, a trial or a meeting with decision-makers. Ensure to summarize"
" discussed and reiterate the benefits." " what has been discussed and reiterate the benefits."
), ),
} }

@ -46,24 +46,27 @@ Conversation history:
conversation_stages = { conversation_stages = {
"1": ( "1": (
"Introduction: Start the conversation by introducing yourself and your company." "Introduction: Start the conversation by introducing yourself and your"
" Be polite and respectful while keeping the tone of the conversation" " company. Be polite and respectful while keeping the tone of the"
" professional. Your greeting should be welcoming. Always clarify in your" " conversation professional. Your greeting should be welcoming. Always"
" greeting the reason why you are contacting the prospect." " clarify in your greeting the reason why you are contacting the"
" prospect."
), ),
"2": ( "2": (
"Qualification: Qualify the prospect by confirming if they are the right person" "Qualification: Qualify the prospect by confirming if they are the"
" to talk to regarding your product/service. Ensure that they have the" " right person to talk to regarding your product/service. Ensure that"
" authority to make purchasing decisions." " they have the authority to make purchasing decisions."
), ),
"3": ( "3": (
"Value proposition: Briefly explain how your product/service can benefit the" "Value proposition: Briefly explain how your product/service can"
" prospect. Focus on the unique selling points and value proposition of your" " benefit the prospect. Focus on the unique selling points and value"
" product/service that sets it apart from competitors." " proposition of your product/service that sets it apart from"
" competitors."
), ),
"4": ( "4": (
"Needs analysis: Ask open-ended questions to uncover the prospect's needs and" "Needs analysis: Ask open-ended questions to uncover the prospect's"
" pain points. Listen carefully to their responses and take notes." " needs and pain points. Listen carefully to their responses and take"
" notes."
), ),
"5": ( "5": (
"Solution presentation: Based on the prospect's needs, present your" "Solution presentation: Based on the prospect's needs, present your"
@ -75,8 +78,8 @@ conversation_stages = {
" testimonials to support your claims." " testimonials to support your claims."
), ),
"7": ( "7": (
"Close: Ask for the sale by proposing a next step. This could be a demo, a" "Close: Ask for the sale by proposing a next step. This could be a"
" trial or a meeting with decision-makers. Ensure to summarize what has been" " demo, a trial or a meeting with decision-makers. Ensure to summarize"
" discussed and reiterate the benefits." " what has been discussed and reiterate the benefits."
), ),
} }

@ -7,7 +7,11 @@ from typing import Callable, Dict, List
from termcolor import colored from termcolor import colored
from swarms.structs.flow import Flow from swarms.structs.flow import Flow
from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator from swarms.utils.decorators import (
error_decorator,
log_decorator,
timing_decorator,
)
class AutoScaler: class AutoScaler:
@ -69,7 +73,9 @@ class AutoScaler:
try: try:
self.tasks_queue.put(task) self.tasks_queue.put(task)
except Exception as error: except Exception as error:
print(f"Error adding task to queue: {error} try again with a new task") print(
f"Error adding task to queue: {error} try again with a new task"
)
@log_decorator @log_decorator
@error_decorator @error_decorator
@ -108,10 +114,15 @@ class AutoScaler:
if pending_tasks / len(self.agents_pool) > self.busy_threshold: if pending_tasks / len(self.agents_pool) > self.busy_threshold:
self.scale_up() self.scale_up()
elif active_agents / len(self.agents_pool) < self.idle_threshold: elif (
active_agents / len(self.agents_pool) < self.idle_threshold
):
self.scale_down() self.scale_down()
except Exception as error: except Exception as error:
print(f"Error monitoring and scaling: {error} try again with a new task") print(
f"Error monitoring and scaling: {error} try again with a new"
" task"
)
@log_decorator @log_decorator
@error_decorator @error_decorator
@ -125,7 +136,9 @@ class AutoScaler:
while True: while True:
task = self.task_queue.get() task = self.task_queue.get()
if task: if task:
available_agent = next((agent for agent in self.agents_pool)) available_agent = next(
(agent for agent in self.agents_pool)
)
if available_agent: if available_agent:
available_agent.run(task) available_agent.run(task)
except Exception as error: except Exception as error:

@ -348,7 +348,8 @@ class Flow:
return "\n".join(tool_descriptions) return "\n".join(tool_descriptions)
except Exception as error: except Exception as error:
print( print(
f"Error getting tool description: {error} try adding a description to the tool or removing the tool" f"Error getting tool description: {error} try adding a"
" description to the tool or removing the tool"
) )
else: else:
return "No tools available" return "No tools available"
@ -479,8 +480,12 @@ class Flow:
print(colored("Initializing Autonomous Agent...", "yellow")) print(colored("Initializing Autonomous Agent...", "yellow"))
# print(colored("Loading modules...", "yellow")) # print(colored("Loading modules...", "yellow"))
# print(colored("Modules loaded successfully.", "green")) # print(colored("Modules loaded successfully.", "green"))
print(colored("Autonomous Agent Activated.", "cyan", attrs=["bold"])) print(
print(colored("All systems operational. Executing task...", "green")) colored("Autonomous Agent Activated.", "cyan", attrs=["bold"])
)
print(
colored("All systems operational. Executing task...", "green")
)
except Exception as error: except Exception as error:
print( print(
colored( colored(
@ -525,14 +530,16 @@ class Flow:
loop_count = 0 loop_count = 0
while self.max_loops == "auto" or loop_count < self.max_loops: while self.max_loops == "auto" or loop_count < self.max_loops:
loop_count += 1 loop_count += 1
print(colored(f"\nLoop {loop_count} of {self.max_loops}", "blue")) print(
colored(f"\nLoop {loop_count} of {self.max_loops}", "blue")
)
print("\n") print("\n")
# Check to see if stopping token is in the output to stop the loop # Check to see if stopping token is in the output to stop the loop
if self.stopping_token: if self.stopping_token:
if self._check_stopping_condition(response) or parse_done_token( if self._check_stopping_condition(
response response
): ) or parse_done_token(response):
break break
# Adjust temperature, comment if no work # Adjust temperature, comment if no work
@ -629,7 +636,9 @@ class Flow:
print(colored(f"\nLoop {loop_count} of {self.max_loops}", "blue")) print(colored(f"\nLoop {loop_count} of {self.max_loops}", "blue"))
print("\n") print("\n")
if self._check_stopping_condition(response) or parse_done_token(response): if self._check_stopping_condition(response) or parse_done_token(
response
):
break break
# Adjust temperature, comment if no work # Adjust temperature, comment if no work
@ -949,7 +958,8 @@ class Flow:
if hasattr(self.llm, name): if hasattr(self.llm, name):
value = getattr(self.llm, name) value = getattr(self.llm, name)
if isinstance( if isinstance(
value, (str, int, float, bool, list, dict, tuple, type(None)) value,
(str, int, float, bool, list, dict, tuple, type(None)),
): ):
llm_params[name] = value llm_params[name] = value
else: else:
@ -1010,7 +1020,9 @@ class Flow:
print(f"Flow state loaded from {file_path}") print(f"Flow state loaded from {file_path}")
def retry_on_failure(self, function, retries: int = 3, retry_delay: int = 1): def retry_on_failure(
self, function, retries: int = 3, retry_delay: int = 1
):
"""Retry wrapper for LLM calls.""" """Retry wrapper for LLM calls."""
attempt = 0 attempt = 0
while attempt < retries: while attempt < retries:

@ -7,7 +7,11 @@ from typing import Callable, List, Dict, Any, Sequence
class Task: class Task:
def __init__( def __init__(
self, id: str, task: str, flows: Sequence[Flow], dependencies: List[str] = [] self,
id: str,
task: str,
flows: Sequence[Flow],
dependencies: List[str] = [],
): ):
self.id = id self.id = id
self.task = task self.task = task
@ -20,7 +24,9 @@ class Task:
for flow in self.flows: for flow in self.flows:
result = flow.run(self.task, *args) result = flow.run(self.task, *args)
self.results.append(result) self.results.append(result)
args = [result] # The output of one flow becomes the input to the next args = [
result
] # The output of one flow becomes the input to the next
class Workflow: class Workflow:
@ -41,7 +47,10 @@ class Workflow:
): ):
future = self.executor.submit( future = self.executor.submit(
task.execute, task.execute,
{dep: self.tasks[dep].results for dep in task.dependencies}, {
dep: self.tasks[dep].results
for dep in task.dependencies
},
) )
futures.append((future, task.id)) futures.append((future, task.id))

@ -113,7 +113,9 @@ class SequentialWorkflow:
restore_state_filepath: Optional[str] = None restore_state_filepath: Optional[str] = None
dashboard: bool = False dashboard: bool = False
def add(self, task: str, flow: Union[Callable, Flow], *args, **kwargs) -> None: def add(
self, task: str, flow: Union[Callable, Flow], *args, **kwargs
) -> None:
""" """
Add a task to the workflow. Add a task to the workflow.
@ -182,7 +184,9 @@ class SequentialWorkflow:
raise ValueError(f"Task {task_description} not found in workflow.") raise ValueError(f"Task {task_description} not found in workflow.")
def save_workflow_state( def save_workflow_state(
self, filepath: Optional[str] = "sequential_workflow_state.json", **kwargs self,
filepath: Optional[str] = "sequential_workflow_state.json",
**kwargs,
) -> None: ) -> None:
""" """
Saves the workflow state to a json file. Saves the workflow state to a json file.
@ -348,8 +352,9 @@ class SequentialWorkflow:
# Ensure that 'task' is provided in the kwargs # Ensure that 'task' is provided in the kwargs
if "task" not in task.kwargs: if "task" not in task.kwargs:
raise ValueError( raise ValueError(
"The 'task' argument is required for the Flow flow" "The 'task' argument is required for the"
f" execution in '{task.description}'" " Flow flow execution in"
f" '{task.description}'"
) )
# Separate the 'task' argument from other kwargs # Separate the 'task' argument from other kwargs
flow_task_arg = task.kwargs.pop("task") flow_task_arg = task.kwargs.pop("task")
@ -373,7 +378,9 @@ class SequentialWorkflow:
# Autosave the workflow state # Autosave the workflow state
if self.autosave: if self.autosave:
self.save_workflow_state("sequential_workflow_state.json") self.save_workflow_state(
"sequential_workflow_state.json"
)
except Exception as e: except Exception as e:
print( print(
colored( colored(
@ -404,8 +411,8 @@ class SequentialWorkflow:
# Ensure that 'task' is provided in the kwargs # Ensure that 'task' is provided in the kwargs
if "task" not in task.kwargs: if "task" not in task.kwargs:
raise ValueError( raise ValueError(
"The 'task' argument is required for the Flow flow" "The 'task' argument is required for the Flow"
f" execution in '{task.description}'" f" flow execution in '{task.description}'"
) )
# Separate the 'task' argument from other kwargs # Separate the 'task' argument from other kwargs
flow_task_arg = task.kwargs.pop("task") flow_task_arg = task.kwargs.pop("task")
@ -429,4 +436,6 @@ class SequentialWorkflow:
# Autosave the workflow state # Autosave the workflow state
if self.autosave: if self.autosave:
self.save_workflow_state("sequential_workflow_state.json") self.save_workflow_state(
"sequential_workflow_state.json"
)

@ -103,7 +103,9 @@ class AutoBlogGenSwarm:
review_agent = self.print_beautifully("Review Agent", review_agent) review_agent = self.print_beautifully("Review Agent", review_agent)
# Agent that publishes on social media # Agent that publishes on social media
distribution_agent = self.llm(self.social_media_prompt(article=review_agent)) distribution_agent = self.llm(
self.social_media_prompt(article=review_agent)
)
distribution_agent = self.print_beautifully( distribution_agent = self.print_beautifully(
"Distribution Agent", distribution_agent "Distribution Agent", distribution_agent
) )
@ -115,7 +117,11 @@ class AutoBlogGenSwarm:
for i in range(self.iterations): for i in range(self.iterations):
self.step() self.step()
except Exception as error: except Exception as error:
print(colored(f"Error while running AutoBlogGenSwarm {error}", "red")) print(
colored(
f"Error while running AutoBlogGenSwarm {error}", "red"
)
)
if attempt == self.retry_attempts - 1: if attempt == self.retry_attempts - 1:
raise raise

@ -117,7 +117,9 @@ class AbstractSwarm(ABC):
pass pass
@abstractmethod @abstractmethod
def broadcast(self, message: str, sender: Optional["AbstractWorker"] = None): def broadcast(
self, message: str, sender: Optional["AbstractWorker"] = None
):
"""Broadcast a message to all workers""" """Broadcast a message to all workers"""
pass pass

@ -23,7 +23,9 @@ class DialogueSimulator:
>>> model.run("test") >>> model.run("test")
""" """
def __init__(self, agents: List[Callable], max_iters: int = 10, name: str = None): def __init__(
self, agents: List[Callable], max_iters: int = 10, name: str = None
):
self.agents = agents self.agents = agents
self.max_iters = max_iters self.max_iters = max_iters
self.name = name self.name = name
@ -45,7 +47,8 @@ class DialogueSimulator:
for receiver in self.agents: for receiver in self.agents:
message_history = ( message_history = (
f"Speaker Name: {speaker.name} and message: {speaker_message}" f"Speaker Name: {speaker.name} and message:"
f" {speaker_message}"
) )
receiver.run(message_history) receiver.run(message_history)
@ -56,7 +59,9 @@ class DialogueSimulator:
print(f"Error running dialogue simulator: {error}") print(f"Error running dialogue simulator: {error}")
def __repr__(self): def __repr__(self):
return f"DialogueSimulator({self.agents}, {self.max_iters}, {self.name})" return (
f"DialogueSimulator({self.agents}, {self.max_iters}, {self.name})"
)
def save_state(self): def save_state(self):
"""Save the state of the dialogue simulator""" """Save the state of the dialogue simulator"""

@ -64,7 +64,8 @@ class GodMode:
table.append([f"LLM {i+1}", response]) table.append([f"LLM {i+1}", response])
print( print(
colored( colored(
tabulate(table, headers=["LLM", "Response"], tablefmt="pretty"), "cyan" tabulate(table, headers=["LLM", "Response"], tablefmt="pretty"),
"cyan",
) )
) )
@ -83,7 +84,8 @@ class GodMode:
table.append([f"LLM {i+1}", response]) table.append([f"LLM {i+1}", response])
print( print(
colored( colored(
tabulate(table, headers=["LLM", "Response"], tablefmt="pretty"), "cyan" tabulate(table, headers=["LLM", "Response"], tablefmt="pretty"),
"cyan",
) )
) )
@ -115,11 +117,13 @@ class GodMode:
print(f"{i + 1}. {task}") print(f"{i + 1}. {task}")
print("\nLast Responses:") print("\nLast Responses:")
table = [ table = [
[f"LLM {i+1}", response] for i, response in enumerate(self.last_responses) [f"LLM {i+1}", response]
for i, response in enumerate(self.last_responses)
] ]
print( print(
colored( colored(
tabulate(table, headers=["LLM", "Response"], tablefmt="pretty"), "cyan" tabulate(table, headers=["LLM", "Response"], tablefmt="pretty"),
"cyan",
) )
) )
@ -137,7 +141,8 @@ class GodMode:
"""Asynchronous run the task string""" """Asynchronous run the task string"""
loop = asyncio.get_event_loop() loop = asyncio.get_event_loop()
futures = [ futures = [
loop.run_in_executor(None, lambda llm: llm(task), llm) for llm in self.llms loop.run_in_executor(None, lambda llm: llm(task), llm)
for llm in self.llms
] ]
for response in await asyncio.gather(*futures): for response in await asyncio.gather(*futures):
print(response) print(response)
@ -145,13 +150,18 @@ class GodMode:
def concurrent_run(self, task: str) -> List[str]: def concurrent_run(self, task: str) -> List[str]:
"""Synchronously run the task on all llms and collect responses""" """Synchronously run the task on all llms and collect responses"""
with ThreadPoolExecutor() as executor: with ThreadPoolExecutor() as executor:
future_to_llm = {executor.submit(llm, task): llm for llm in self.llms} future_to_llm = {
executor.submit(llm, task): llm for llm in self.llms
}
responses = [] responses = []
for future in as_completed(future_to_llm): for future in as_completed(future_to_llm):
try: try:
responses.append(future.result()) responses.append(future.result())
except Exception as error: except Exception as error:
print(f"{future_to_llm[future]} generated an exception: {error}") print(
f"{future_to_llm[future]} generated an exception:"
f" {error}"
)
self.last_responses = responses self.last_responses = responses
self.task_history.append(task) self.task_history.append(task)
return responses return responses

@ -47,7 +47,9 @@ class GroupChat:
def next_agent(self, agent: Flow) -> Flow: def next_agent(self, agent: Flow) -> Flow:
"""Return the next agent in the list.""" """Return the next agent in the list."""
return self.agents[(self.agent_names.index(agent.name) + 1) % len(self.agents)] return self.agents[
(self.agent_names.index(agent.name) + 1) % len(self.agents)
]
def select_speaker_msg(self): def select_speaker_msg(self):
"""Return the message for selecting the next speaker.""" """Return the message for selecting the next speaker."""
@ -78,9 +80,9 @@ class GroupChat:
{ {
"role": "system", "role": "system",
"content": ( "content": (
"Read the above conversation. Then select the next most" "Read the above conversation. Then select the next"
f" suitable role from {self.agent_names} to play. Only" f" most suitable role from {self.agent_names} to"
" return the role." " play. Only return the role."
), ),
} }
] ]
@ -126,7 +128,9 @@ class GroupChatManager:
self.selector = selector self.selector = selector
def __call__(self, task: str): def __call__(self, task: str):
self.groupchat.messages.append({"role": self.selector.name, "content": task}) self.groupchat.messages.append(
{"role": self.selector.name, "content": task}
)
for i in range(self.groupchat.max_round): for i in range(self.groupchat.max_round):
speaker = self.groupchat.select_speaker( speaker = self.groupchat.select_speaker(
last_speaker=self.selector, selector=self.selector last_speaker=self.selector, selector=self.selector

@ -13,8 +13,8 @@ from swarms.utils.logger import logger
class BidOutputParser(RegexParser): class BidOutputParser(RegexParser):
def get_format_instructions(self) -> str: def get_format_instructions(self) -> str:
return ( return (
"Your response should be an integrater delimited by angled brackets like" "Your response should be an integrater delimited by angled brackets"
" this: <int>" " like this: <int>"
) )
@ -194,11 +194,15 @@ class MultiAgentCollaboration:
print("\n") print("\n")
n += 1 n += 1
def select_next_speaker_roundtable(self, step: int, agents: List[Flow]) -> int: def select_next_speaker_roundtable(
self, step: int, agents: List[Flow]
) -> int:
"""Selects the next speaker.""" """Selects the next speaker."""
return step % len(agents) return step % len(agents)
def select_next_speaker_director(step: int, agents: List[Flow], director) -> int: def select_next_speaker_director(
step: int, agents: List[Flow], director
) -> int:
# if the step if even => director # if the step if even => director
# => director selects next speaker # => director selects next speaker
if step % 2 == 1: if step % 2 == 1:
@ -265,7 +269,10 @@ class MultiAgentCollaboration:
def format_results(self, results): def format_results(self, results):
"""Formats the results of the run method""" """Formats the results of the run method"""
formatted_results = "\n".join( formatted_results = "\n".join(
[f"{result['agent']} responded: {result['response']}" for result in results] [
f"{result['agent']} responded: {result['response']}"
for result in results
]
) )
return formatted_results return formatted_results
@ -291,7 +298,12 @@ class MultiAgentCollaboration:
return state return state
def __repr__(self): def __repr__(self):
return f"MultiAgentCollaboration(agents={self.agents}, selection_function={self.select_next_speaker}, max_iters={self.max_iters}, autosave={self.autosave}, saved_file_path_name={self.saved_file_path_name})" return (
f"MultiAgentCollaboration(agents={self.agents},"
f" selection_function={self.select_next_speaker},"
f" max_iters={self.max_iters}, autosave={self.autosave},"
f" saved_file_path_name={self.saved_file_path_name})"
)
def performance(self): def performance(self):
"""Tracks and reports the performance of each agent""" """Tracks and reports the performance of each agent"""

@ -111,7 +111,9 @@ class Orchestrator:
self.chroma_client = chromadb.Client() self.chroma_client = chromadb.Client()
self.collection = self.chroma_client.create_collection(name=collection_name) self.collection = self.chroma_client.create_collection(
name=collection_name
)
self.current_tasks = {} self.current_tasks = {}
@ -148,13 +150,14 @@ class Orchestrator:
) )
logging.info( logging.info(
f"Task {id(str)} has been processed by agent {id(agent)} with" f"Task {id(str)} has been processed by agent"
f" {id(agent)} with"
) )
except Exception as error: except Exception as error:
logging.error( logging.error(
f"Failed to process task {id(task)} by agent {id(agent)}. Error:" f"Failed to process task {id(task)} by agent {id(agent)}."
f" {error}" f" Error: {error}"
) )
finally: finally:
with self.condition: with self.condition:
@ -175,7 +178,9 @@ class Orchestrator:
try: try:
# Query the vector database for documents created by the agents # Query the vector database for documents created by the agents
results = self.collection.query(query_texts=[str(agent_id)], n_results=10) results = self.collection.query(
query_texts=[str(agent_id)], n_results=10
)
return results return results
except Exception as e: except Exception as e:
@ -212,7 +217,9 @@ class Orchestrator:
self.collection.add(documents=[result], ids=[str(id(result))]) self.collection.add(documents=[result], ids=[str(id(result))])
except Exception as e: except Exception as e:
logging.error(f"Failed to append the agent output to database. Error: {e}") logging.error(
f"Failed to append the agent output to database. Error: {e}"
)
raise raise
def run(self, objective: str): def run(self, objective: str):
@ -226,7 +233,9 @@ class Orchestrator:
results = [ results = [
self.assign_task(agent_id, task) self.assign_task(agent_id, task)
for agent_id, task in zip(range(len(self.agents)), self.task_queue) for agent_id, task in zip(
range(len(self.agents)), self.task_queue
)
] ]
for result in results: for result in results:

@ -6,7 +6,9 @@ from typing import Optional
import pandas as pd import pandas as pd
import torch import torch
from langchain.agents import tool from langchain.agents import tool
from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent from langchain.agents.agent_toolkits.pandas.base import (
create_pandas_dataframe_agent,
)
from langchain.chains.qa_with_sources.loading import ( from langchain.chains.qa_with_sources.loading import (
BaseCombineDocumentsChain, BaseCombineDocumentsChain,
) )
@ -38,7 +40,10 @@ def pushd(new_dir):
@tool @tool
def process_csv( def process_csv(
llm, csv_file_path: str, instructions: str, output_path: Optional[str] = None llm,
csv_file_path: str,
instructions: str,
output_path: Optional[str] = None,
) -> str: ) -> str:
"""Process a CSV by with pandas in a limited REPL.\ """Process a CSV by with pandas in a limited REPL.\
Only use this after writing data to disk as a csv file.\ Only use this after writing data to disk as a csv file.\
@ -49,7 +54,9 @@ def process_csv(
df = pd.read_csv(csv_file_path) df = pd.read_csv(csv_file_path)
except Exception as e: except Exception as e:
return f"Error: {e}" return f"Error: {e}"
agent = create_pandas_dataframe_agent(llm, df, max_iterations=30, verbose=False) agent = create_pandas_dataframe_agent(
llm, df, max_iterations=30, verbose=False
)
if output_path is not None: if output_path is not None:
instructions += f" Save output to disk at {output_path}" instructions += f" Save output to disk at {output_path}"
try: try:
@ -79,7 +86,9 @@ async def async_load_playwright(url: str) -> str:
text = soup.get_text() text = soup.get_text()
lines = (line.strip() for line in text.splitlines()) lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) chunks = (
phrase.strip() for line in lines for phrase in line.split(" ")
)
results = "\n".join(chunk for chunk in chunks if chunk) results = "\n".join(chunk for chunk in chunks if chunk)
except Exception as e: except Exception as e:
results = f"Error: {e}" results = f"Error: {e}"
@ -110,7 +119,8 @@ def _get_text_splitter():
class WebpageQATool(BaseTool): class WebpageQATool(BaseTool):
name = "query_webpage" name = "query_webpage"
description = ( description = (
"Browse a webpage and retrieve the information relevant to the question." "Browse a webpage and retrieve the information relevant to the"
" question."
) )
text_splitter: RecursiveCharacterTextSplitter = Field( text_splitter: RecursiveCharacterTextSplitter = Field(
default_factory=_get_text_splitter default_factory=_get_text_splitter
@ -176,7 +186,9 @@ def VQAinference(self, inputs):
image_path, question = inputs.split(",") image_path, question = inputs.split(",")
raw_image = Image.open(image_path).convert("RGB") raw_image = Image.open(image_path).convert("RGB")
inputs = processor(raw_image, question, return_tensors="pt").to(device, torch_dtype) inputs = processor(raw_image, question, return_tensors="pt").to(
device, torch_dtype
)
out = model.generate(**inputs) out = model.generate(**inputs)
answer = processor.decode(out[0], skip_special_tokens=True) answer = processor.decode(out[0], skip_special_tokens=True)

@ -28,7 +28,9 @@ class MaskFormer:
def __init__(self, device): def __init__(self, device):
print("Initializing MaskFormer to %s" % device) print("Initializing MaskFormer to %s" % device)
self.device = device self.device = device
self.processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") self.processor = CLIPSegProcessor.from_pretrained(
"CIDAS/clipseg-rd64-refined"
)
self.model = CLIPSegForImageSegmentation.from_pretrained( self.model = CLIPSegForImageSegmentation.from_pretrained(
"CIDAS/clipseg-rd64-refined" "CIDAS/clipseg-rd64-refined"
).to(device) ).to(device)
@ -76,23 +78,26 @@ class ImageEditing:
@tool( @tool(
name="Remove Something From The Photo", name="Remove Something From The Photo",
description=( description=(
"useful when you want to remove and object or something from the photo " "useful when you want to remove and object or something from the"
"from its description or location. " " photo from its description or location. The input to this tool"
"The input to this tool should be a comma separated string of two, " " should be a comma separated string of two, representing the"
"representing the image_path and the object need to be removed. " " image_path and the object need to be removed. "
), ),
) )
def inference_remove(self, inputs): def inference_remove(self, inputs):
image_path, to_be_removed_txt = inputs.split(",") image_path, to_be_removed_txt = inputs.split(",")
return self.inference_replace(f"{image_path},{to_be_removed_txt},background") return self.inference_replace(
f"{image_path},{to_be_removed_txt},background"
)
@tool( @tool(
name="Replace Something From The Photo", name="Replace Something From The Photo",
description=( description=(
"useful when you want to replace an object from the object description or" "useful when you want to replace an object from the object"
" location with another object from its description. The input to this tool" " description or location with another object from its description."
" should be a comma separated string of three, representing the image_path," " The input to this tool should be a comma separated string of"
" the object to be replaced, the object to be replaced with " " three, representing the image_path, the object to be replaced,"
" the object to be replaced with "
), ),
) )
def inference_replace(self, inputs): def inference_replace(self, inputs):
@ -137,10 +142,10 @@ class InstructPix2Pix:
@tool( @tool(
name="Instruct Image Using Text", name="Instruct Image Using Text",
description=( description=(
"useful when you want to the style of the image to be like the text. " "useful when you want to the style of the image to be like the"
"like: make it look like a painting. or make it like a robot. " " text. like: make it look like a painting. or make it like a"
"The input to this tool should be a comma separated string of two, " " robot. The input to this tool should be a comma separated string"
"representing the image_path and the text. " " of two, representing the image_path and the text. "
), ),
) )
def inference(self, inputs): def inference(self, inputs):
@ -149,14 +154,17 @@ class InstructPix2Pix:
image_path, text = inputs.split(",")[0], ",".join(inputs.split(",")[1:]) image_path, text = inputs.split(",")[0], ",".join(inputs.split(",")[1:])
original_image = Image.open(image_path) original_image = Image.open(image_path)
image = self.pipe( image = self.pipe(
text, image=original_image, num_inference_steps=40, image_guidance_scale=1.2 text,
image=original_image,
num_inference_steps=40,
image_guidance_scale=1.2,
).images[0] ).images[0]
updated_image_path = get_new_image_name(image_path, func_name="pix2pix") updated_image_path = get_new_image_name(image_path, func_name="pix2pix")
image.save(updated_image_path) image.save(updated_image_path)
logger.debug( logger.debug(
f"\nProcessed InstructPix2Pix, Input Image: {image_path}, Instruct Text:" f"\nProcessed InstructPix2Pix, Input Image: {image_path}, Instruct"
f" {text}, Output Image: {updated_image_path}" f" Text: {text}, Output Image: {updated_image_path}"
) )
return updated_image_path return updated_image_path
@ -173,17 +181,18 @@ class Text2Image:
self.pipe.to(device) self.pipe.to(device)
self.a_prompt = "best quality, extremely detailed" self.a_prompt = "best quality, extremely detailed"
self.n_prompt = ( self.n_prompt = (
"longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, " "longbody, lowres, bad anatomy, bad hands, missing fingers, extra"
"fewer digits, cropped, worst quality, low quality" " digit, fewer digits, cropped, worst quality, low quality"
) )
@tool( @tool(
name="Generate Image From User Input Text", name="Generate Image From User Input Text",
description=( description=(
"useful when you want to generate an image from a user input text and save" "useful when you want to generate an image from a user input text"
" it to a file. like: generate an image of an object or something, or" " and save it to a file. like: generate an image of an object or"
" generate an image that includes some objects. The input to this tool" " something, or generate an image that includes some objects. The"
" should be a string, representing the text used to generate image. " " input to this tool should be a string, representing the text used"
" to generate image. "
), ),
) )
def inference(self, text): def inference(self, text):
@ -205,7 +214,9 @@ class VisualQuestionAnswering:
print("Initializing VisualQuestionAnswering to %s" % device) print("Initializing VisualQuestionAnswering to %s" % device)
self.torch_dtype = torch.float16 if "cuda" in device else torch.float32 self.torch_dtype = torch.float16 if "cuda" in device else torch.float32
self.device = device self.device = device
self.processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base") self.processor = BlipProcessor.from_pretrained(
"Salesforce/blip-vqa-base"
)
self.model = BlipForQuestionAnswering.from_pretrained( self.model = BlipForQuestionAnswering.from_pretrained(
"Salesforce/blip-vqa-base", torch_dtype=self.torch_dtype "Salesforce/blip-vqa-base", torch_dtype=self.torch_dtype
).to(self.device) ).to(self.device)
@ -213,10 +224,11 @@ class VisualQuestionAnswering:
@tool( @tool(
name="Answer Question About The Image", name="Answer Question About The Image",
description=( description=(
"useful when you need an answer for a question based on an image. like:" "useful when you need an answer for a question based on an image."
" what is the background color of the last image, how many cats in this" " like: what is the background color of the last image, how many"
" figure, what is in this figure. The input to this tool should be a comma" " cats in this figure, what is in this figure. The input to this"
" separated string of two, representing the image_path and the question" " tool should be a comma separated string of two, representing the"
" image_path and the question"
), ),
) )
def inference(self, inputs): def inference(self, inputs):
@ -229,8 +241,8 @@ class VisualQuestionAnswering:
answer = self.processor.decode(out[0], skip_special_tokens=True) answer = self.processor.decode(out[0], skip_special_tokens=True)
logger.debug( logger.debug(
f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}, Input" f"\nProcessed VisualQuestionAnswering, Input Image: {image_path},"
f" Question: {question}, Output Answer: {answer}" f" Input Question: {question}, Output Answer: {answer}"
) )
return answer return answer
@ -245,7 +257,8 @@ class ImageCaptioning(BaseHandler):
"Salesforce/blip-image-captioning-base" "Salesforce/blip-image-captioning-base"
) )
self.model = BlipForConditionalGeneration.from_pretrained( self.model = BlipForConditionalGeneration.from_pretrained(
"Salesforce/blip-image-captioning-base", torch_dtype=self.torch_dtype "Salesforce/blip-image-captioning-base",
torch_dtype=self.torch_dtype,
).to(self.device) ).to(self.device)
def handle(self, filename: str): def handle(self, filename: str):
@ -264,8 +277,8 @@ class ImageCaptioning(BaseHandler):
out = self.model.generate(**inputs) out = self.model.generate(**inputs)
description = self.processor.decode(out[0], skip_special_tokens=True) description = self.processor.decode(out[0], skip_special_tokens=True)
print( print(
f"\nProcessed ImageCaptioning, Input Image: {filename}, Output Text:" f"\nProcessed ImageCaptioning, Input Image: {filename}, Output"
f" {description}" f" Text: {description}"
) )
return IMAGE_PROMPT.format(filename=filename, description=description) return IMAGE_PROMPT.format(filename=filename, description=description)

@ -7,7 +7,17 @@ import warnings
from abc import abstractmethod from abc import abstractmethod
from functools import partial from functools import partial
from inspect import signature from inspect import signature
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union from typing import (
Any,
Awaitable,
Callable,
Dict,
List,
Optional,
Tuple,
Type,
Union,
)
from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import ( from langchain.callbacks.manager import (
@ -27,7 +37,11 @@ from pydantic import (
root_validator, root_validator,
validate_arguments, validate_arguments,
) )
from langchain.schema.runnable import Runnable, RunnableConfig, RunnableSerializable from langchain.schema.runnable import (
Runnable,
RunnableConfig,
RunnableSerializable,
)
class SchemaAnnotationError(TypeError): class SchemaAnnotationError(TypeError):
@ -52,7 +66,11 @@ def _get_filtered_args(
"""Get the arguments from a function's signature.""" """Get the arguments from a function's signature."""
schema = inferred_model.schema()["properties"] schema = inferred_model.schema()["properties"]
valid_keys = signature(func).parameters valid_keys = signature(func).parameters
return {k: schema[k] for k in valid_keys if k not in ("run_manager", "callbacks")} return {
k: schema[k]
for k in valid_keys
if k not in ("run_manager", "callbacks")
}
class _SchemaConfig: class _SchemaConfig:
@ -120,12 +138,11 @@ class ChildTool(BaseTool):
...""" ..."""
name = cls.__name__ name = cls.__name__
raise SchemaAnnotationError( raise SchemaAnnotationError(
f"Tool definition for {name} must include valid type annotations" f"Tool definition for {name} must include valid type"
" for argument 'args_schema' to behave as expected.\n" " annotations for argument 'args_schema' to behave as"
"Expected annotation of 'Type[BaseModel]'" " expected.\nExpected annotation of 'Type[BaseModel]' but"
f" but got '{args_schema_type}'.\n" f" got '{args_schema_type}'.\nExpected class looks"
"Expected class looks like:\n" f" like:\n{typehint_mandate}"
f"{typehint_mandate}"
) )
name: str name: str
@ -147,7 +164,9 @@ class ChildTool(BaseTool):
callbacks: Callbacks = Field(default=None, exclude=True) callbacks: Callbacks = Field(default=None, exclude=True)
"""Callbacks to be called during tool execution.""" """Callbacks to be called during tool execution."""
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) callback_manager: Optional[BaseCallbackManager] = Field(
default=None, exclude=True
)
"""Deprecated. Please use callbacks instead.""" """Deprecated. Please use callbacks instead."""
tags: Optional[List[str]] = None tags: Optional[List[str]] = None
"""Optional list of tags associated with the tool. Defaults to None """Optional list of tags associated with the tool. Defaults to None
@ -244,7 +263,9 @@ class ChildTool(BaseTool):
else: else:
if input_args is not None: if input_args is not None:
result = input_args.parse_obj(tool_input) result = input_args.parse_obj(tool_input)
return {k: v for k, v in result.dict().items() if k in tool_input} return {
k: v for k, v in result.dict().items() if k in tool_input
}
return tool_input return tool_input
@root_validator() @root_validator()
@ -286,7 +307,9 @@ class ChildTool(BaseTool):
*args, *args,
) )
def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]: def _to_args_and_kwargs(
self, tool_input: Union[str, Dict]
) -> Tuple[Tuple, Dict]:
# For backwards compatibility, if run_input is a string, # For backwards compatibility, if run_input is a string,
# pass as a positional argument. # pass as a positional argument.
if isinstance(tool_input, str): if isinstance(tool_input, str):
@ -353,8 +376,9 @@ class ChildTool(BaseTool):
observation = self.handle_tool_error(e) observation = self.handle_tool_error(e)
else: else:
raise ValueError( raise ValueError(
"Got unexpected type of `handle_tool_error`. Expected bool, str " "Got unexpected type of `handle_tool_error`. Expected"
f"or callable. Received: {self.handle_tool_error}" " bool, str or callable. Received:"
f" {self.handle_tool_error}"
) )
run_manager.on_tool_end( run_manager.on_tool_end(
str(observation), color="red", name=self.name, **kwargs str(observation), color="red", name=self.name, **kwargs
@ -409,7 +433,9 @@ class ChildTool(BaseTool):
# We then call the tool on the tool input to get an observation # We then call the tool on the tool input to get an observation
tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input) tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)
observation = ( observation = (
await self._arun(*tool_args, run_manager=run_manager, **tool_kwargs) await self._arun(
*tool_args, run_manager=run_manager, **tool_kwargs
)
if new_arg_supported if new_arg_supported
else await self._arun(*tool_args, **tool_kwargs) else await self._arun(*tool_args, **tool_kwargs)
) )
@ -428,8 +454,9 @@ class ChildTool(BaseTool):
observation = self.handle_tool_error(e) observation = self.handle_tool_error(e)
else: else:
raise ValueError( raise ValueError(
"Got unexpected type of `handle_tool_error`. Expected bool, str " "Got unexpected type of `handle_tool_error`. Expected"
f"or callable. Received: {self.handle_tool_error}" " bool, str or callable. Received:"
f" {self.handle_tool_error}"
) )
await run_manager.on_tool_end( await run_manager.on_tool_end(
str(observation), color="red", name=self.name, **kwargs str(observation), color="red", name=self.name, **kwargs
@ -484,14 +511,17 @@ class Tool(BaseTool):
# assume it takes a single string input. # assume it takes a single string input.
return {"tool_input": {"type": "string"}} return {"tool_input": {"type": "string"}}
def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]: def _to_args_and_kwargs(
self, tool_input: Union[str, Dict]
) -> Tuple[Tuple, Dict]:
"""Convert tool input to pydantic model.""" """Convert tool input to pydantic model."""
args, kwargs = super()._to_args_and_kwargs(tool_input) args, kwargs = super()._to_args_and_kwargs(tool_input)
# For backwards compatibility. The tool must be run with a single input # For backwards compatibility. The tool must be run with a single input
all_args = list(args) + list(kwargs.values()) all_args = list(args) + list(kwargs.values())
if len(all_args) != 1: if len(all_args) != 1:
raise ToolException( raise ToolException(
f"Too many arguments to single-input tool {self.name}. Args: {all_args}" f"Too many arguments to single-input tool {self.name}. Args:"
f" {all_args}"
) )
return tuple(all_args), {} return tuple(all_args), {}
@ -503,7 +533,9 @@ class Tool(BaseTool):
) -> Any: ) -> Any:
"""Use the tool.""" """Use the tool."""
if self.func: if self.func:
new_argument_supported = signature(self.func).parameters.get("callbacks") new_argument_supported = signature(self.func).parameters.get(
"callbacks"
)
return ( return (
self.func( self.func(
*args, *args,
@ -537,12 +569,18 @@ class Tool(BaseTool):
) )
else: else:
return await asyncio.get_running_loop().run_in_executor( return await asyncio.get_running_loop().run_in_executor(
None, partial(self._run, run_manager=run_manager, **kwargs), *args None,
partial(self._run, run_manager=run_manager, **kwargs),
*args,
) )
# TODO: this is for backwards compatibility, remove in future # TODO: this is for backwards compatibility, remove in future
def __init__( def __init__(
self, name: str, func: Optional[Callable], description: str, **kwargs: Any self,
name: str,
func: Optional[Callable],
description: str,
**kwargs: Any,
) -> None: ) -> None:
"""Initialize tool.""" """Initialize tool."""
super(Tool, self).__init__( super(Tool, self).__init__(
@ -617,7 +655,9 @@ class StructuredTool(BaseTool):
) -> Any: ) -> Any:
"""Use the tool.""" """Use the tool."""
if self.func: if self.func:
new_argument_supported = signature(self.func).parameters.get("callbacks") new_argument_supported = signature(self.func).parameters.get(
"callbacks"
)
return ( return (
self.func( self.func(
*args, *args,
@ -714,7 +754,9 @@ class StructuredTool(BaseTool):
description = f"{name}{sig} - {description.strip()}" description = f"{name}{sig} - {description.strip()}"
_args_schema = args_schema _args_schema = args_schema
if _args_schema is None and infer_schema: if _args_schema is None and infer_schema:
_args_schema = create_schema_from_function(f"{name}Schema", source_function) _args_schema = create_schema_from_function(
f"{name}Schema", source_function
)
return cls( return cls(
name=name, name=name,
func=func, func=func,
@ -772,7 +814,9 @@ def tool(
async def ainvoke_wrapper( async def ainvoke_wrapper(
callbacks: Optional[Callbacks] = None, **kwargs: Any callbacks: Optional[Callbacks] = None, **kwargs: Any
) -> Any: ) -> Any:
return await runnable.ainvoke(kwargs, {"callbacks": callbacks}) return await runnable.ainvoke(
kwargs, {"callbacks": callbacks}
)
def invoke_wrapper( def invoke_wrapper(
callbacks: Optional[Callbacks] = None, **kwargs: Any callbacks: Optional[Callbacks] = None, **kwargs: Any
@ -821,7 +865,11 @@ def tool(
return _make_tool return _make_tool
if len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], Runnable): if (
len(args) == 2
and isinstance(args[0], str)
and isinstance(args[1], Runnable)
):
return _make_with_name(args[0])(args[1]) return _make_with_name(args[0])(args[1])
elif len(args) == 1 and isinstance(args[0], str): elif len(args) == 1 and isinstance(args[0], str):
# if the argument is a string, then we use the string as the tool name # if the argument is a string, then we use the string as the tool name

@ -144,7 +144,9 @@ class Singleton(abc.ABCMeta, type):
def __call__(cls, *args, **kwargs): def __call__(cls, *args, **kwargs):
"""Call method for the singleton metaclass.""" """Call method for the singleton metaclass."""
if cls not in cls._instances: if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) cls._instances[cls] = super(Singleton, cls).__call__(
*args, **kwargs
)
return cls._instances[cls] return cls._instances[cls]

@ -116,14 +116,20 @@ class SubprocessCodeInterpreter(BaseCodeInterpreter):
# Most of the time it doesn't matter, but we should figure out why it happens frequently with: # Most of the time it doesn't matter, but we should figure out why it happens frequently with:
# applescript # applescript
yield {"output": traceback.format_exc()} yield {"output": traceback.format_exc()}
yield {"output": f"Retrying... ({retry_count}/{max_retries})"} yield {
"output": f"Retrying... ({retry_count}/{max_retries})"
}
yield {"output": "Restarting process."} yield {"output": "Restarting process."}
self.start_process() self.start_process()
retry_count += 1 retry_count += 1
if retry_count > max_retries: if retry_count > max_retries:
yield {"output": "Maximum retries reached. Could not execute code."} yield {
"output": (
"Maximum retries reached. Could not execute code."
)
}
return return
while True: while True:
@ -132,7 +138,9 @@ class SubprocessCodeInterpreter(BaseCodeInterpreter):
else: else:
time.sleep(0.1) time.sleep(0.1)
try: try:
output = self.output_queue.get(timeout=0.3) # Waits for 0.3 seconds output = self.output_queue.get(
timeout=0.3
) # Waits for 0.3 seconds
yield output yield output
except queue.Empty: except queue.Empty:
if self.done.is_set(): if self.done.is_set():

@ -31,7 +31,9 @@ def timing_decorator(func):
start_time = time.time() start_time = time.time()
result = func(*args, **kwargs) result = func(*args, **kwargs)
end_time = time.time() end_time = time.time()
logging.info(f"{func.__name__} executed in {end_time - start_time} seconds") logging.info(
f"{func.__name__} executed in {end_time - start_time} seconds"
)
return result return result
return wrapper return wrapper
@ -79,7 +81,9 @@ def synchronized_decorator(func):
def deprecated_decorator(func): def deprecated_decorator(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
warnings.warn(f"{func.__name__} is deprecated", category=DeprecationWarning) warnings.warn(
f"{func.__name__} is deprecated", category=DeprecationWarning
)
return func(*args, **kwargs) return func(*args, **kwargs)
return wrapper return wrapper

@ -5,6 +5,8 @@ T = TypeVar("T")
def execute_futures_dict(fs_dict: dict[str, futures.Future[T]]) -> dict[str, T]: def execute_futures_dict(fs_dict: dict[str, futures.Future[T]]) -> dict[str, T]:
futures.wait(fs_dict.values(), timeout=None, return_when=futures.ALL_COMPLETED) futures.wait(
fs_dict.values(), timeout=None, return_when=futures.ALL_COMPLETED
)
return {key: future.result() for key, future in fs_dict.items()} return {key: future.result() for key, future in fs_dict.items()}

@ -113,8 +113,8 @@ class Logger:
) )
error_handler.setLevel(logging.ERROR) error_handler.setLevel(logging.ERROR)
error_formatter = AutoGptFormatter( error_formatter = AutoGptFormatter(
"%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d"
" %(message_no_color)s" " %(title)s %(message_no_color)s"
) )
error_handler.setFormatter(error_formatter) error_handler.setFormatter(error_formatter)
@ -140,7 +140,12 @@ class Logger:
self.chat_plugins = [] self.chat_plugins = []
def typewriter_log( def typewriter_log(
self, title="", title_color="", content="", speak_text=False, level=logging.INFO self,
title="",
title_color="",
content="",
speak_text=False,
level=logging.INFO,
): ):
""" """
Logs a message to the typewriter. Logs a message to the typewriter.
@ -255,7 +260,9 @@ class Logger:
if isinstance(message, list): if isinstance(message, list):
message = " ".join(message) message = " ".join(message)
self.logger.log( self.logger.log(
level, message, extra={"title": str(title), "color": str(title_color)} level,
message,
extra={"title": str(title), "color": str(title_color)},
) )
def set_level(self, level): def set_level(self, level):
@ -284,12 +291,15 @@ class Logger:
if not additionalText: if not additionalText:
additionalText = ( additionalText = (
"Please ensure you've setup and configured everything" "Please ensure you've setup and configured everything"
" correctly. Read https://github.com/Torantulino/Auto-GPT#readme to " " correctly. Read"
"double check. You can also create a github issue or join the discord" " https://github.com/Torantulino/Auto-GPT#readme to double"
" check. You can also create a github issue or join the discord"
" and ask there!" " and ask there!"
) )
self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText) self.typewriter_log(
"DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText
)
def log_json(self, data: Any, file_name: str) -> None: def log_json(self, data: Any, file_name: str) -> None:
""" """
@ -367,7 +377,9 @@ class TypingConsoleHandler(logging.StreamHandler):
print(word, end="", flush=True) print(word, end="", flush=True)
if i < len(words) - 1: if i < len(words) - 1:
print(" ", end="", flush=True) print(" ", end="", flush=True)
typing_speed = random.uniform(min_typing_speed, max_typing_speed) typing_speed = random.uniform(
min_typing_speed, max_typing_speed
)
time.sleep(typing_speed) time.sleep(typing_speed)
# type faster after each word # type faster after each word
min_typing_speed = min_typing_speed * 0.95 min_typing_speed = min_typing_speed * 0.95

@ -201,7 +201,9 @@ def dim_multiline(message: str) -> str:
lines = message.split("\n") lines = message.split("\n")
if len(lines) <= 1: if len(lines) <= 1:
return lines[0] return lines[0]
return lines[0] + ANSI("\n... ".join([""] + lines[1:])).to(Color.black().bright()) return lines[0] + ANSI("\n... ".join([""] + lines[1:])).to(
Color.black().bright()
)
# +=============================> ANSI Ending # +=============================> ANSI Ending
@ -227,7 +229,9 @@ class AbstractUploader(ABC):
class S3Uploader(AbstractUploader): class S3Uploader(AbstractUploader):
def __init__(self, accessKey: str, secretKey: str, region: str, bucket: str): def __init__(
self, accessKey: str, secretKey: str, region: str, bucket: str
):
self.accessKey = accessKey self.accessKey = accessKey
self.secretKey = secretKey self.secretKey = secretKey
self.region = region self.region = region
@ -338,7 +342,9 @@ class FileHandler:
self.handlers = handlers self.handlers = handlers
self.path = path self.path = path
def register(self, filetype: FileType, handler: BaseHandler) -> "FileHandler": def register(
self, filetype: FileType, handler: BaseHandler
) -> "FileHandler":
self.handlers[filetype] = handler self.handlers[filetype] = handler
return self return self
@ -356,7 +362,9 @@ class FileHandler:
def handle(self, url: str) -> str: def handle(self, url: str) -> str:
try: try:
if url.startswith(os.environ.get("SERVER", "http://localhost:8000")): if url.startswith(
os.environ.get("SERVER", "http://localhost:8000")
):
local_filepath = url[ local_filepath = url[
len(os.environ.get("SERVER", "http://localhost:8000")) + 1 : len(os.environ.get("SERVER", "http://localhost:8000")) + 1 :
] ]

@ -7,5 +7,7 @@ def extract_code_in_backticks_in_string(message: str) -> str:
""" """
pattern = r"`` ``(.*?)`` " # Non-greedy match between six backticks pattern = r"`` ``(.*?)`` " # Non-greedy match between six backticks
match = re.search(pattern, message, re.DOTALL) # re.DOTALL to match newline chars match = re.search(
pattern, message, re.DOTALL
) # re.DOTALL to match newline chars
return match.group(1).strip() if match else None return match.group(1).strip() if match else None

@ -109,9 +109,11 @@ class Serializable(BaseModel, ABC):
"lc": 1, "lc": 1,
"type": "constructor", "type": "constructor",
"id": [*self.lc_namespace, self.__class__.__name__], "id": [*self.lc_namespace, self.__class__.__name__],
"kwargs": lc_kwargs "kwargs": (
if not secrets lc_kwargs
else _replace_secrets(lc_kwargs, secrets), if not secrets
else _replace_secrets(lc_kwargs, secrets)
),
} }
def to_json_not_implemented(self) -> SerializedNotImplemented: def to_json_not_implemented(self) -> SerializedNotImplemented:

@ -35,4 +35,6 @@ def test_omnimodalagent_run(omni_agent):
def test_task_executor_initialization(omni_agent): def test_task_executor_initialization(omni_agent):
assert omni_agent.task_executor is not None, "TaskExecutor initialization failed" assert (
omni_agent.task_executor is not None
), "TaskExecutor initialization failed"

@ -30,7 +30,9 @@ def test_create_collection():
def test_create_collection_exception(): def test_create_collection_exception():
with patch("oceandb.Client") as MockClient: with patch("oceandb.Client") as MockClient:
MockClient.create_collection.side_effect = Exception("Create collection error") MockClient.create_collection.side_effect = Exception(
"Create collection error"
)
db = OceanDB(MockClient) db = OceanDB(MockClient)
with pytest.raises(Exception) as e: with pytest.raises(Exception) as e:
db.create_collection("test", "modality") db.create_collection("test", "modality")

@ -6,7 +6,9 @@ api_key = os.getenv("PINECONE_API_KEY") or ""
def test_init(): def test_init():
with patch("pinecone.init") as MockInit, patch("pinecone.Index") as MockIndex: with patch("pinecone.init") as MockInit, patch(
"pinecone.Index"
) as MockIndex:
store = PineconeVectorStore( store = PineconeVectorStore(
api_key=api_key, index_name="test_index", environment="test_env" api_key=api_key, index_name="test_index", environment="test_env"
) )

@ -11,7 +11,9 @@ class TestLLM(unittest.TestCase):
@patch.object(ChatOpenAI, "__init__", return_value=None) @patch.object(ChatOpenAI, "__init__", return_value=None)
def setUp(self, mock_hf_init, mock_openai_init): def setUp(self, mock_hf_init, mock_openai_init):
self.llm_openai = LLM(openai_api_key="mock_openai_key") self.llm_openai = LLM(openai_api_key="mock_openai_key")
self.llm_hf = LLM(hf_repo_id="mock_repo_id", hf_api_token="mock_hf_token") self.llm_hf = LLM(
hf_repo_id="mock_repo_id", hf_api_token="mock_hf_token"
)
self.prompt = "Who won the FIFA World Cup in 1998?" self.prompt = "Who won the FIFA World Cup in 1998?"
def test_init(self): def test_init(self):

@ -74,7 +74,9 @@ def test_anthropic_default_params(anthropic_instance):
} }
def test_anthropic_run(mock_anthropic_env, mock_requests_post, anthropic_instance): def test_anthropic_run(
mock_anthropic_env, mock_requests_post, anthropic_instance
):
mock_response = Mock() mock_response = Mock()
mock_response.json.return_value = {"completion": "Generated text"} mock_response.json.return_value = {"completion": "Generated text"}
mock_requests_post.return_value = mock_response mock_requests_post.return_value = mock_response
@ -98,7 +100,9 @@ def test_anthropic_run(mock_anthropic_env, mock_requests_post, anthropic_instanc
) )
def test_anthropic_call(mock_anthropic_env, mock_requests_post, anthropic_instance): def test_anthropic_call(
mock_anthropic_env, mock_requests_post, anthropic_instance
):
mock_response = Mock() mock_response = Mock()
mock_response.json.return_value = {"completion": "Generated text"} mock_response.json.return_value = {"completion": "Generated text"}
mock_requests_post.return_value = mock_response mock_requests_post.return_value = mock_response
@ -193,18 +197,24 @@ def test_anthropic_convert_prompt(anthropic_instance):
def test_anthropic_call_with_stop(anthropic_instance): def test_anthropic_call_with_stop(anthropic_instance):
response = anthropic_instance("Translate to French.", stop=["stop1", "stop2"]) response = anthropic_instance(
"Translate to French.", stop=["stop1", "stop2"]
)
assert response == "Mocked Response from Anthropic" assert response == "Mocked Response from Anthropic"
def test_anthropic_stream_with_stop(anthropic_instance): def test_anthropic_stream_with_stop(anthropic_instance):
generator = anthropic_instance.stream("Write a story.", stop=["stop1", "stop2"]) generator = anthropic_instance.stream(
"Write a story.", stop=["stop1", "stop2"]
)
for token in generator: for token in generator:
assert isinstance(token, str) assert isinstance(token, str)
def test_anthropic_async_call_with_stop(anthropic_instance): def test_anthropic_async_call_with_stop(anthropic_instance):
response = anthropic_instance.async_call("Tell me a joke.", stop=["stop1", "stop2"]) response = anthropic_instance.async_call(
"Tell me a joke.", stop=["stop1", "stop2"]
)
assert response == "Mocked Response from Anthropic" assert response == "Mocked Response from Anthropic"

@ -47,7 +47,9 @@ def test_run_auto_select(auto_temp_agent):
def test_run_no_scores(auto_temp_agent): def test_run_no_scores(auto_temp_agent):
task = "Invalid task." task = "Invalid task."
temperature_string = "0.4,0.6,0.8,1.0,1.2,1.4" temperature_string = "0.4,0.6,0.8,1.0,1.2,1.4"
with ThreadPoolExecutor(max_workers=auto_temp_agent.max_workers) as executor: with ThreadPoolExecutor(
max_workers=auto_temp_agent.max_workers
) as executor:
with patch.object( with patch.object(
executor, "submit", side_effect=[None, None, None, None, None, None] executor, "submit", side_effect=[None, None, None, None, None, None]
): ):

@ -44,7 +44,9 @@ class TestBingChat(unittest.TestCase):
original_image_gen = BingChat.ImageGen original_image_gen = BingChat.ImageGen
BingChat.ImageGen = MockImageGen BingChat.ImageGen = MockImageGen
img_path = self.chat.create_img("Test prompt", auth_cookie="mock_auth_cookie") img_path = self.chat.create_img(
"Test prompt", auth_cookie="mock_auth_cookie"
)
self.assertEqual(img_path, "./output/mock_image.png") self.assertEqual(img_path, "./output/mock_image.png")
BingChat.ImageGen = original_image_gen BingChat.ImageGen = original_image_gen

@ -127,7 +127,9 @@ def test_clip_multiple_images(clip_instance, sample_image_path):
# Test model inference performance # Test model inference performance
def test_clip_inference_performance(clip_instance, sample_image_path, benchmark): def test_clip_inference_performance(
clip_instance, sample_image_path, benchmark
):
labels = [ labels = [
"adenocarcinoma histopathology", "adenocarcinoma histopathology",
"brain MRI", "brain MRI",

@ -46,7 +46,10 @@ def test_cell_biology_response(biogpt_instance):
# 40. Test for a question about protein structure # 40. Test for a question about protein structure
def test_protein_structure_response(biogpt_instance): def test_protein_structure_response(biogpt_instance):
question = "What's the difference between alpha helix and beta sheet structures in proteins?" question = (
"What's the difference between alpha helix and beta sheet structures in"
" proteins?"
)
response = biogpt_instance(question) response = biogpt_instance(question)
assert response and isinstance(response, str) assert response and isinstance(response, str)

@ -49,7 +49,9 @@ def test_cohere_stream_api_error_handling(cohere_instance):
cohere_instance.model = "base" cohere_instance.model = "base"
cohere_instance.cohere_api_key = "invalid-api-key" cohere_instance.cohere_api_key = "invalid-api-key"
with pytest.raises(Exception): with pytest.raises(Exception):
generator = cohere_instance.stream("Error handling with invalid API key.") generator = cohere_instance.stream(
"Error handling with invalid API key."
)
for token in generator: for token in generator:
pass pass
@ -94,13 +96,17 @@ def test_cohere_call_with_stop(cohere_instance):
def test_cohere_stream_with_stop(cohere_instance): def test_cohere_stream_with_stop(cohere_instance):
generator = cohere_instance.stream("Write a story.", stop=["stop1", "stop2"]) generator = cohere_instance.stream(
"Write a story.", stop=["stop1", "stop2"]
)
for token in generator: for token in generator:
assert isinstance(token, str) assert isinstance(token, str)
def test_cohere_async_call_with_stop(cohere_instance): def test_cohere_async_call_with_stop(cohere_instance):
response = cohere_instance.async_call("Tell me a joke.", stop=["stop1", "stop2"]) response = cohere_instance.async_call(
"Tell me a joke.", stop=["stop1", "stop2"]
)
assert response == "Mocked Response from Cohere" assert response == "Mocked Response from Cohere"
@ -187,14 +193,22 @@ def test_cohere_generate_with_embed_english_v2(cohere_instance):
def test_cohere_generate_with_embed_english_light_v2(cohere_instance): def test_cohere_generate_with_embed_english_light_v2(cohere_instance):
cohere_instance.model = "embed-english-light-v2.0" cohere_instance.model = "embed-english-light-v2.0"
response = cohere_instance("Generate embeddings with English Light v2.0 model.") response = cohere_instance(
assert response.startswith("Generated embeddings with English Light v2.0 model") "Generate embeddings with English Light v2.0 model."
)
assert response.startswith(
"Generated embeddings with English Light v2.0 model"
)
def test_cohere_generate_with_embed_multilingual_v2(cohere_instance): def test_cohere_generate_with_embed_multilingual_v2(cohere_instance):
cohere_instance.model = "embed-multilingual-v2.0" cohere_instance.model = "embed-multilingual-v2.0"
response = cohere_instance("Generate embeddings with Multilingual v2.0 model.") response = cohere_instance(
assert response.startswith("Generated embeddings with Multilingual v2.0 model") "Generate embeddings with Multilingual v2.0 model."
)
assert response.startswith(
"Generated embeddings with Multilingual v2.0 model"
)
def test_cohere_generate_with_embed_english_v3(cohere_instance): def test_cohere_generate_with_embed_english_v3(cohere_instance):
@ -205,14 +219,22 @@ def test_cohere_generate_with_embed_english_v3(cohere_instance):
def test_cohere_generate_with_embed_english_light_v3(cohere_instance): def test_cohere_generate_with_embed_english_light_v3(cohere_instance):
cohere_instance.model = "embed-english-light-v3.0" cohere_instance.model = "embed-english-light-v3.0"
response = cohere_instance("Generate embeddings with English Light v3.0 model.") response = cohere_instance(
assert response.startswith("Generated embeddings with English Light v3.0 model") "Generate embeddings with English Light v3.0 model."
)
assert response.startswith(
"Generated embeddings with English Light v3.0 model"
)
def test_cohere_generate_with_embed_multilingual_v3(cohere_instance): def test_cohere_generate_with_embed_multilingual_v3(cohere_instance):
cohere_instance.model = "embed-multilingual-v3.0" cohere_instance.model = "embed-multilingual-v3.0"
response = cohere_instance("Generate embeddings with Multilingual v3.0 model.") response = cohere_instance(
assert response.startswith("Generated embeddings with Multilingual v3.0 model") "Generate embeddings with Multilingual v3.0 model."
)
assert response.startswith(
"Generated embeddings with Multilingual v3.0 model"
)
def test_cohere_generate_with_embed_multilingual_light_v3(cohere_instance): def test_cohere_generate_with_embed_multilingual_light_v3(cohere_instance):
@ -423,7 +445,9 @@ def test_cohere_representation_model_classification(cohere_instance):
def test_cohere_representation_model_language_detection(cohere_instance): def test_cohere_representation_model_language_detection(cohere_instance):
# Test using the Representation model for language detection # Test using the Representation model for language detection
cohere_instance.model = "embed-english-v3.0" cohere_instance.model = "embed-english-v3.0"
language = cohere_instance.detect_language("Detect the language of this text.") language = cohere_instance.detect_language(
"Detect the language of this text."
)
assert isinstance(language, str) assert isinstance(language, str)
@ -447,7 +471,9 @@ def test_cohere_representation_model_multilingual_embedding(cohere_instance):
assert len(embedding) > 0 assert len(embedding) > 0
def test_cohere_representation_model_multilingual_classification(cohere_instance): def test_cohere_representation_model_multilingual_classification(
cohere_instance,
):
# Test using the Representation model for multilingual text classification # Test using the Representation model for multilingual text classification
cohere_instance.model = "embed-multilingual-v3.0" cohere_instance.model = "embed-multilingual-v3.0"
classification = cohere_instance.classify("Classify multilingual text.") classification = cohere_instance.classify("Classify multilingual text.")
@ -456,7 +482,9 @@ def test_cohere_representation_model_multilingual_classification(cohere_instance
assert "score" in classification assert "score" in classification
def test_cohere_representation_model_multilingual_language_detection(cohere_instance): def test_cohere_representation_model_multilingual_language_detection(
cohere_instance,
):
# Test using the Representation model for multilingual language detection # Test using the Representation model for multilingual language detection
cohere_instance.model = "embed-multilingual-v3.0" cohere_instance.model = "embed-multilingual-v3.0"
language = cohere_instance.detect_language( language = cohere_instance.detect_language(
@ -471,12 +499,17 @@ def test_cohere_representation_model_multilingual_max_tokens_limit_exceeded(
# Test handling max tokens limit exceeded error for multilingual model # Test handling max tokens limit exceeded error for multilingual model
cohere_instance.model = "embed-multilingual-v3.0" cohere_instance.model = "embed-multilingual-v3.0"
cohere_instance.max_tokens = 10 cohere_instance.max_tokens = 10
prompt = "This is a test prompt that will exceed the max tokens limit for multilingual model." prompt = (
"This is a test prompt that will exceed the max tokens limit for"
" multilingual model."
)
with pytest.raises(ValueError): with pytest.raises(ValueError):
cohere_instance.embed(prompt) cohere_instance.embed(prompt)
def test_cohere_representation_model_multilingual_light_embedding(cohere_instance): def test_cohere_representation_model_multilingual_light_embedding(
cohere_instance,
):
# Test using the Representation model for multilingual light text embedding # Test using the Representation model for multilingual light text embedding
cohere_instance.model = "embed-multilingual-light-v3.0" cohere_instance.model = "embed-multilingual-light-v3.0"
embedding = cohere_instance.embed("Generate multilingual light embeddings.") embedding = cohere_instance.embed("Generate multilingual light embeddings.")
@ -484,10 +517,14 @@ def test_cohere_representation_model_multilingual_light_embedding(cohere_instanc
assert len(embedding) > 0 assert len(embedding) > 0
def test_cohere_representation_model_multilingual_light_classification(cohere_instance): def test_cohere_representation_model_multilingual_light_classification(
cohere_instance,
):
# Test using the Representation model for multilingual light text classification # Test using the Representation model for multilingual light text classification
cohere_instance.model = "embed-multilingual-light-v3.0" cohere_instance.model = "embed-multilingual-light-v3.0"
classification = cohere_instance.classify("Classify multilingual light text.") classification = cohere_instance.classify(
"Classify multilingual light text."
)
assert isinstance(classification, dict) assert isinstance(classification, dict)
assert "class" in classification assert "class" in classification
assert "score" in classification assert "score" in classification
@ -510,7 +547,10 @@ def test_cohere_representation_model_multilingual_light_max_tokens_limit_exceede
# Test handling max tokens limit exceeded error for multilingual light model # Test handling max tokens limit exceeded error for multilingual light model
cohere_instance.model = "embed-multilingual-light-v3.0" cohere_instance.model = "embed-multilingual-light-v3.0"
cohere_instance.max_tokens = 10 cohere_instance.max_tokens = 10
prompt = "This is a test prompt that will exceed the max tokens limit for multilingual light model." prompt = (
"This is a test prompt that will exceed the max tokens limit for"
" multilingual light model."
)
with pytest.raises(ValueError): with pytest.raises(ValueError):
cohere_instance.embed(prompt) cohere_instance.embed(prompt)
@ -553,19 +593,26 @@ def test_cohere_representation_model_english_classification(cohere_instance):
assert "score" in classification assert "score" in classification
def test_cohere_representation_model_english_language_detection(cohere_instance): def test_cohere_representation_model_english_language_detection(
cohere_instance,
):
# Test using the Representation model for English language detection # Test using the Representation model for English language detection
cohere_instance.model = "embed-english-v3.0" cohere_instance.model = "embed-english-v3.0"
language = cohere_instance.detect_language("Detect the language of English text.") language = cohere_instance.detect_language(
"Detect the language of English text."
)
assert isinstance(language, str) assert isinstance(language, str)
def test_cohere_representation_model_english_max_tokens_limit_exceeded(cohere_instance): def test_cohere_representation_model_english_max_tokens_limit_exceeded(
cohere_instance,
):
# Test handling max tokens limit exceeded error for English model # Test handling max tokens limit exceeded error for English model
cohere_instance.model = "embed-english-v3.0" cohere_instance.model = "embed-english-v3.0"
cohere_instance.max_tokens = 10 cohere_instance.max_tokens = 10
prompt = ( prompt = (
"This is a test prompt that will exceed the max tokens limit for English model." "This is a test prompt that will exceed the max tokens limit for"
" English model."
) )
with pytest.raises(ValueError): with pytest.raises(ValueError):
cohere_instance.embed(prompt) cohere_instance.embed(prompt)
@ -579,7 +626,9 @@ def test_cohere_representation_model_english_light_embedding(cohere_instance):
assert len(embedding) > 0 assert len(embedding) > 0
def test_cohere_representation_model_english_light_classification(cohere_instance): def test_cohere_representation_model_english_light_classification(
cohere_instance,
):
# Test using the Representation model for English light text classification # Test using the Representation model for English light text classification
cohere_instance.model = "embed-english-light-v3.0" cohere_instance.model = "embed-english-light-v3.0"
classification = cohere_instance.classify("Classify English light text.") classification = cohere_instance.classify("Classify English light text.")
@ -588,7 +637,9 @@ def test_cohere_representation_model_english_light_classification(cohere_instanc
assert "score" in classification assert "score" in classification
def test_cohere_representation_model_english_light_language_detection(cohere_instance): def test_cohere_representation_model_english_light_language_detection(
cohere_instance,
):
# Test using the Representation model for English light language detection # Test using the Representation model for English light language detection
cohere_instance.model = "embed-english-light-v3.0" cohere_instance.model = "embed-english-light-v3.0"
language = cohere_instance.detect_language( language = cohere_instance.detect_language(
@ -603,7 +654,10 @@ def test_cohere_representation_model_english_light_max_tokens_limit_exceeded(
# Test handling max tokens limit exceeded error for English light model # Test handling max tokens limit exceeded error for English light model
cohere_instance.model = "embed-english-light-v3.0" cohere_instance.model = "embed-english-light-v3.0"
cohere_instance.max_tokens = 10 cohere_instance.max_tokens = 10
prompt = "This is a test prompt that will exceed the max tokens limit for English light model." prompt = (
"This is a test prompt that will exceed the max tokens limit for"
" English light model."
)
with pytest.raises(ValueError): with pytest.raises(ValueError):
cohere_instance.embed(prompt) cohere_instance.embed(prompt)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save