accountant swarm + autotemp agent

pull/109/head
Kye 1 year ago
parent e9c712f8f3
commit 11b02a5d13

@ -0,0 +1,61 @@
# !pip install --upgrade swarms==2.0.6
from swarms.models import OpenAIChat
from swarms.models.nougat import Nougat
from swarms.structs import Flow
from swarms.structs.sequential_workflow import SequentialWorkflow
# # URL of the image of the financial document
IMAGE_OF_FINANCIAL_DOC_URL = "bank_statement_2.jpg"
# Example usage
api_key = "" # Your actual API key here
# Initialize the OCR model
def ocr_model(img: str):
ocr = Nougat()
analyze_finance_docs = ocr(img)
return str(analyze_finance_docs)
# Initialize the language flow
llm = OpenAIChat(
model_name="gpt-4-turbo",
openai_api_key=api_key,
temperature=0.5,
max_tokens=3000,
)
# Create a prompt for the language model
def summary_agent_prompt(analyzed_doc: str):
analyzed_doc = ocr_model(img=analyzed_doc)
return f"""
Generate an actionable summary of this financial document, provide bulletpoints:
Here is the Analyzed Document:
---
{analyzed_doc}
"""
# Initialize the Flow with the language flow
flow1 = Flow(llm=llm, max_loops=1, dashboard=False)
# Create another Flow for a different task
flow2 = Flow(llm=llm, max_loops=1, dashboard=False)
# Create the workflow
workflow = SequentialWorkflow(max_loops=1)
# Add tasks to the workflow
workflow.add(summary_agent_prompt(IMAGE_OF_FINANCIAL_DOC_URL), flow1)
# Suppose the next task takes the output of the first task as input
workflow.add("Provide an actionable step by step plan on how to cut costs from the analyzed financial document.", flow2)
# Run the workflow
workflow.run()
# Output the results
for task in workflow.tasks:
print(f"Task: {task.description}, Result: {task.result}")

Binary file not shown.

After

Width:  |  Height:  |  Size: 538 KiB

@ -0,0 +1,101 @@
import re
from concurrent.futures import ThreadPoolExecutor, as_completed
from swarms.models import OpenAIChat
class AutoTempAgent:
"""
AutoTemp is a tool for automatically selecting the best temperature setting for a given task.
Flow:
1. Generate outputs at a range of temperature settings.
2. Evaluate each output using the default temperature setting.
3. Select the best output based on the evaluation score.
4. Return the best output.
Args:
temperature (float, optional): The default temperature setting to use. Defaults to 0.5.
api_key (str, optional): Your OpenAI API key. Defaults to None.
alt_temps ([type], optional): A list of alternative temperature settings to try. Defaults to None.
auto_select (bool, optional): If True, the best temperature setting will be automatically selected. Defaults to True.
max_workers (int, optional): The maximum number of workers to use when generating outputs. Defaults to 6.
Returns:
[type]: [description]
Examples:
>>> from swarms.demos.autotemp import AutoTemp
>>> autotemp = AutoTemp()
>>> autotemp.run("Generate a 10,000 word blog on mental clarity and the benefits of meditation.", "0.4,0.6,0.8,1.0,1.2,1.4")
Best AutoTemp Output (Temp 0.4 | Score: 100.0):
Generate a 10,000 word blog on mental clarity and the benefits of meditation.
"""
def __init__(
self,
temperature: float = 0.5,
api_key: str = None,
alt_temps=None,
auto_select=True,
max_workers=6,
):
self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4]
self.auto_select = auto_select
self.max_workers = max_workers
self.temperature = temperature
self.alt_temps = alt_temps
self.llm = OpenAIChat(
openai_api_key=api_key,
temperature=temperature,
)
def evaluate_output(self, output: str):
"""Evaluate the output using the default temperature setting."""
eval_prompt = f"""
Evaluate the following output which was generated at a temperature setting of {self.temperature}.
Provide a precise score from 0.0 to 100.0, considering the criteria of relevance, clarity, utility, pride, and delight.
Output to evaluate:
---
{output}
---
"""
score_text = self.llm(prompt=eval_prompt)
score_match = re.search(r"\b\d+(\.\d)?\b", score_text)
return round(float(score_match.group()), 1) if score_match else 0.0
def run(self, task: str, temperature_string):
"""Run the AutoTemp agent."""
temperature_list = [
float(temp.strip()) for temp in temperature_string.split(",")
]
outputs = {}
scores = {}
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
future_to_temp = {
executor.submit(self.llm.generate, task, temp): temp
for temp in temperature_list
}
for future in as_completed(future_to_temp):
temp = future_to_temp[future]
output_text = future.result()
outputs[temp] = output_text
scores[temp] = self.evaluate_output(output_text, temp)
if not scores:
return "No valid outputs generated.", None
sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True)
best_temp, best_score = sorted_scores[0]
best_output = outputs[best_temp]
return (
f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}"
if self.auto_select
else "\n".join(
f"Temp {temp} | Score: {score}:\n{outputs[temp]}"
for temp, score in sorted_scores
)
)

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "2.0.5"
version = "2.0.7"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]

@ -165,7 +165,6 @@ def get_tools(product_catalog):
func=knowledge_base.run,
description="useful for when you need to answer questions about product information",
),
# omnimodal agent
]

@ -20,21 +20,15 @@ import os
import sys
@dataclass
class OmniChunker:
"""
"""
""" """
chunk_size: int = 1000
beautify: bool = False
use_tokenizer: bool = False
tokenizer: Optional[Callable[[str], List[str]]] = None
def __call__(self, file_path: str) -> List[str]:
"""
Chunk the given file into parts of size `chunk_size`.
@ -121,4 +115,3 @@ class OmniChunker:
"cyan",
)
)

@ -111,8 +111,8 @@ class BioGPT:
num_return_sequences=self.num_return_sequences,
do_sample=self.do_sample,
)
return out[0]['generated_text']
return out[0]["generated_text"]
def get_features(self, text):
"""

@ -61,9 +61,10 @@ class Nougat:
pixel_values.to(self.device),
min_length=self.min_length,
max_new_tokens=self.max_new_tokens,
bad_words_ids=[[self.processor.unk_token - id]],
)
sequence = self.processor.batch_decode(outputs, skip_special_tokens=True)[0]
sequence = self.processor.post_process_generation(sequence, fix_markdown=False)
return sequence
out = print(repr(sequence))
return out

@ -80,9 +80,7 @@ class OpenAITokenizer(BaseTokenizer):
return (tokens if tokens else self.DEFAULT_MAX_TOKENS) - offset
def count_tokens(
self, text: str | list, model: Optional[str] = None
) -> int:
def count_tokens(self, text: str | list, model: Optional[str] = None) -> int:
"""
Handles the special case of ChatML. Implementation adopted from the official OpenAI notebook:
https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
@ -144,7 +142,5 @@ class OpenAITokenizer(BaseTokenizer):
return num_tokens
else:
return len(
self.encoding.encode(
text, allowed_special=set(self.stop_sequences)
)
)
self.encoding.encode(text, allowed_special=set(self.stop_sequences))
)

@ -2,7 +2,7 @@ import queue
import threading
from time import sleep
from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator
from swarms.workers.worker import Worker
from swarms.structs.flow import Flow
class AutoScaler:
@ -52,7 +52,7 @@ class AutoScaler:
busy_threshold=0.7,
agent=None,
):
self.agent = agent or Worker
self.agent = agent or Flow
self.agents_pool = [self.agent() for _ in range(initial_agents)]
self.task_queue = queue.Queue()
self.scale_up_factor = scale_up_factor
@ -71,7 +71,7 @@ class AutoScaler:
with self.lock:
new_agents_counts = len(self.agents_pool) * self.scale_up_factor
for _ in range(new_agents_counts):
self.agents_pool.append(Worker())
self.agents_pool.append(Flow())
def scale_down(self):
"""scale down"""

Loading…
Cancel
Save