Merge branch 'master' into master

pull/153/head
evelynmitchell 1 year ago committed by GitHub
commit 949dbd7c56
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -241,9 +241,11 @@ Swarms framework is not just a tool but a robust, scalable, and secure partner i
- We're always looking for contributors to help us improve and expand this project. If you're interested, please check out our [Contributing Guidelines](CONTRIBUTING.md) and our [contributing board](https://github.com/users/kyegomez/projects/1)
## Community
- [Join the Swarms community here on Discord!](https://discord.gg/AJazBmhKnr)
- [Join the Swarms community on Discord!](https://discord.gg/AJazBmhKnr)
- Join our Swarms Community Gathering every Thursday at 1pm NYC Time to unlock the potential of autonomous agents in automating your daily tasks![Sign up here](https://lu.ma/5p2jnc2v)
# Discovery Call
## Discovery Call
Book a discovery call with the Swarms team to learn how to optimize and scale your swarm! [Click here to book a time that works for you!](https://calendly.com/swarm-corp/30min?month=2023-11)
# License

@ -0,0 +1,72 @@
import os
from dotenv import load_dotenv
from swarms.models import Anthropic, OpenAIChat
from swarms.prompts.accountant_swarm_prompts import (
DECISION_MAKING_PROMPT,
DOC_ANALYZER_AGENT_PROMPT,
SUMMARY_GENERATOR_AGENT_PROMPT,
)
from swarms.structs import Flow
from swarms.utils.pdf_to_text import pdf_to_text
# Environment variables
load_dotenv()
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
openai_api_key = os.getenv("OPENAI_API_KEY")
# Base llms
llm1 = OpenAIChat(
openai_api_key=openai_api_key,
max_tokens=5000,
)
llm2 = Anthropic(
anthropic_api_key=anthropic_api_key,
max_tokens=5000,
)
# Agents
doc_analyzer_agent = Flow(
llm=llm2,
sop=DOC_ANALYZER_AGENT_PROMPT,
max_loops="auto",
)
summary_generator_agent = Flow(
llm=llm2,
sop=SUMMARY_GENERATOR_AGENT_PROMPT,
max_loops="auto",
)
decision_making_support_agent = Flow(
llm=llm2,
sop=DECISION_MAKING_PROMPT,
max_loops="auto",
)
pdf_path="swarmdeck_a1.pdf"
fraud_detection_instructions="Detect fraud in the document"
summary_agent_instructions="Generate an actionable summary of the document"
decision_making_support_agent_instructions="Provide decision making support to the business owner:"
# Transform the pdf to text
pdf_text = pdf_to_text(pdf_path)
print(pdf_text)
# Detect fraud in the document
fraud_detection_agent_output = doc_analyzer_agent.run(
f"{fraud_detection_instructions}: {pdf_text}"
)
# Generate an actionable summary of the document
summary_agent_output = summary_generator_agent.run(
f"{summary_agent_instructions}: {fraud_detection_agent_output}"
)
# Provide decision making support to the accountant
decision_making_support_agent_output = decision_making_support_agent.run(
f"{decision_making_support_agent_instructions}: {summary_agent_output}"
)

@ -104,9 +104,9 @@ nav:
- swarms.memory:
- PineconeVectorStoreStore: "swarms/memory/pinecone.md"
- PGVectorStore: "swarms/memory/pg.md"
- swarms.chunkers:
- BaseChunker: "swarms/chunkers/basechunker.md"
- PdfChunker: "swarms/chunkers/pdf_chunker.md"
# - swarms.chunkers:
# - BaseChunker: "swarms/chunkers/basechunker.md"
# - PdfChunker: "swarms/chunkers/pdf_chunker.md"
- Guides:
- Overview: "examples/index.md"
- Agents:

@ -1,35 +1,117 @@
import re
from swarms.models.nougat import Nougat
import os
from typing import List
from dotenv import load_dotenv
from swarms.models import Anthropic, OpenAIChat
from swarms.prompts.accountant_swarm_prompts import (
DECISION_MAKING_PROMPT,
DOC_ANALYZER_AGENT_PROMPT,
FRAUD_DETECTION_AGENT_PROMPT,
SUMMARY_GENERATOR_AGENT_PROMPT,
)
from swarms.structs import Flow
from swarms.models import OpenAIChat
from swarms.models import LayoutLMDocumentQA
from swarms.utils.pdf_to_text import pdf_to_text
# # URL of the image of the financial document
IMAGE_OF_FINANCIAL_DOC_URL = "bank_statement_2.jpg"
# Environment variables
load_dotenv()
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
openai_api_key = os.getenv("OPENAI_API_KEY")
# Example usage
api_key = ""
# Initialize the language flow
llm = OpenAIChat(
openai_api_key=api_key,
# Base llms
llm1 = OpenAIChat(
openai_api_key=openai_api_key,
)
llm2 = Anthropic(
anthropic_api_key=anthropic_api_key,
)
# LayoutLM Document QA
pdf_analyzer = LayoutLMDocumentQA()
question = "What is the total amount of expenses?"
answer = pdf_analyzer(
question,
IMAGE_OF_FINANCIAL_DOC_URL,
# Agents
doc_analyzer_agent = Flow(
llm=llm1,
sop=DOC_ANALYZER_AGENT_PROMPT,
)
summary_generator_agent = Flow(
llm=llm2,
sop=SUMMARY_GENERATOR_AGENT_PROMPT,
)
decision_making_support_agent = Flow(
llm=llm2,
sop=DECISION_MAKING_PROMPT,
)
class AccountantSwarms:
"""
Accountant Swarms is a collection of agents that work together to help
accountants with their work.
Flow: analyze doc -> detect fraud -> generate summary -> decision making support
The agents are:
- User Consultant: Asks the user many questions
- Document Analyzer: Extracts text from the image of the financial document
- Fraud Detection: Detects fraud in the document
- Summary Agent: Generates an actionable summary of the document
- Decision Making Support: Provides decision making support to the accountant
# Initialize the Flow with the language flow
agent = Flow(llm=llm)
SUMMARY_AGENT_PROMPT = f"""
Generate an actionable summary of this financial document be very specific and precise, provide bulletpoints be very specific provide methods of lowering expenses: {answer}"
"""
The agents are connected together in a workflow that is defined in the
run method.
# Add tasks to the workflow
summary_agent = agent.run(SUMMARY_AGENT_PROMPT)
print(summary_agent)
The workflow is as follows:
1. The Document Analyzer agent extracts text from the image of the
financial document.
2. The Fraud Detection agent detects fraud in the document.
3. The Summary Agent generates an actionable summary of the document.
4. The Decision Making Support agent provides decision making support
"""
def __init__(
self,
pdf_path: str,
list_pdfs: List[str] = None,
fraud_detection_instructions: str = None,
summary_agent_instructions: str = None,
decision_making_support_agent_instructions: str = None,
):
super().__init__()
self.pdf_path = pdf_path
self.list_pdfs = list_pdfs
self.fraud_detection_instructions = fraud_detection_instructions
self.summary_agent_instructions = summary_agent_instructions
self.decision_making_support_agent_instructions = (
decision_making_support_agent_instructions
)
def run(self):
# Transform the pdf to text
pdf_text = pdf_to_text(self.pdf_path)
# Detect fraud in the document
fraud_detection_agent_output = doc_analyzer_agent.run(
f"{self.fraud_detection_instructions}: {pdf_text}"
)
# Generate an actionable summary of the document
summary_agent_output = summary_generator_agent.run(
f"{self.summary_agent_instructions}: {fraud_detection_agent_output}"
)
# Provide decision making support to the accountant
decision_making_support_agent_output = decision_making_support_agent.run(
f"{self.decision_making_support_agent_instructions}: {summary_agent_output}"
)
return decision_making_support_agent_output
swarm = AccountantSwarms(
pdf_path="tesla.pdf",
fraud_detection_instructions="Detect fraud in the document",
summary_agent_instructions="Generate an actionable summary of the document",
decision_making_support_agent_instructions="Provide decision making support to the business owner:",
)

@ -0,0 +1,50 @@
import os
from dotenv import load_dotenv
from swarms.models import Anthropic, OpenAIChat
from swarms.prompts.ai_research_team import (
PAPER_IMPLEMENTOR_AGENT_PROMPT,
PAPER_SUMMARY_ANALYZER,
)
from swarms.structs import Flow
from swarms.utils.pdf_to_text import pdf_to_text
# Base llms
# Environment variables
load_dotenv()
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
openai_api_key = os.getenv("OPENAI_API_KEY")
PDF_PATH = "shallowfeedforward.pdf"
# Base llms
llm1 = OpenAIChat(
openai_api_key=openai_api_key,
)
llm2 = Anthropic(
anthropic_api_key=anthropic_api_key,
)
# Agents
paper_summarizer_agent = Flow(
llm=llm2,
sop=PAPER_SUMMARY_ANALYZER,
max_loops=1,
autosave=True,
saved_state_path='paper_summarizer.json'
)
paper_implementor_agent = Flow(
llm=llm1,
sop=PAPER_IMPLEMENTOR_AGENT_PROMPT,
max_loops=1,
autosave=True,
saved_state_path='paper_implementor.json'
)
paper = pdf_to_text(PDF_PATH)
algorithmic_psuedocode_agent = paper_summarizer_agent.run(paper)
pytorch_code = paper_implementor_agent.run(algorithmic_psuedocode_agent)

@ -0,0 +1,19 @@
from swarms.structs import Flow, SequentialWorkflow
from swarms.models import OpenAIChat, Anthropic
# llm
llm = OpenAIChat()
llm2 = Anthropic()
# 2 Flows, one that creates an algorithmic pseuedocode and another that creates the pytorch code
flow1 = Flow(llm2, max_loops=1)
flow2 = Flow(llm, max_loops=1)
# SequentialWorkflow
workflow = SequentialWorkflow(
[flow1, flow2],
max_loops=1,
name="Paper to Code",
autosave=True,
description="This workflow takes a paper and converts it to code.",
)

@ -0,0 +1,10 @@
from swarms import Flow, Fuyu
llm = Fuyu()
flow = Flow(max_loops="auto", llm=llm)
flow.run(
task="Describe this image in a few sentences: ",
img="https://unsplash.com/photos/0pIC5ByPpZY",
)

@ -0,0 +1,14 @@
# This might not work in the beginning but it's a starting point
from swarms.structs import Flow, GPT4V
llm = GPT4V()
flow = Flow(
max_loops="auto",
llm=llm,
)
flow.run(
task="Describe this image in a few sentences: ",
img="https://unsplash.com/photos/0pIC5ByPpZY",
)

@ -1,15 +0,0 @@
from swarms import WorkerUltraUltraNode
# Define an objective
objective = """
Please make a web GUI for using HTTP API server.
The name of it is Swarms.
You can check the server code at ./main.py.
The server is served on localhost:8000.
Users should be able to write text input as 'query' and url array as 'files', and check the response.
Users input form should be delivered in JSON format.
I want it to have neumorphism-style. Serve it on port 4500.
"""
node = WorkerUltraUltraNode(objective)
result = node.execute()

@ -1,17 +0,0 @@
from langchain.models import OpenAIChat
from swarms import Worker
llm = OpenAIChat(model_name="gpt-4", openai_api_key="api-key", temperature=0.5)
node = Worker(
llm=llm,
ai_name="Optimus Prime",
ai_role="Worker in a swarm",
external_tools=None,
human_in_the_loop=False,
temperature=0.5,
)
task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times."
response = node.run(task)
print(response)

@ -1,15 +0,0 @@
from swarms import worker_node
# Your OpenAI API key
api_key = "sksdsds"
# Initialize a WorkerNode with your API key
node = worker_node(api_key)
# Define an objective
objective = "Please make a web GUI for using HTTP API server..."
# Run the task
task = node.run(objective)
print(task)

@ -1,25 +0,0 @@
import os
from swarms.swarms.swarms import WorkerUltra
api_key = os.getenv("OPENAI_API_KEY")
# Define an objective
objective = """
Please make a web GUI for using HTTP API server.
The name of it is Swarms.
You can check the server code at ./main.py.
The server is served on localhost:8000.
Users should be able to write text input as 'query' and url array as 'files', and check the response.
Users input form should be delivered in JSON format.
I want it to have neumorphism-style. Serve it on port 4500.
"""
# Create an instance of WorkerUltra
worker = WorkerUltra(objective, api_key)
# Execute the task
result = worker.execute()
# Print the result
print(result)

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "2.3.5"
version = "2.3.9"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
@ -39,10 +39,10 @@ backoff = "*"
marshmallow = "*"
datasets = "*"
diffusers = "*"
PyPDF2 = "*"
accelerate = "*"
sentencepiece = "*"
wget = "*"
griptape = "*"
httpx = "*"
tiktoken = "*"
safetensors = "*"

@ -27,13 +27,13 @@ huggingface-hub
google-generativeai
sentencepiece
duckduckgo-search
PyPDF2
agent-protocol
accelerate
chromadb
tiktoken
tabulate
colored
griptape
addict
backoff
ratelimit

@ -4,9 +4,11 @@ from swarms.structs.sequential_workflow import SequentialWorkflow
import os
# Example usage
openai_api_key = os.environ.get("OPENAI_API_KEY")
anthropic_api_key = os.environ.get("ANTHROPIC_API_KEY")
# Initialize the language flow
llm = OpenAIChat(
openai_api_key=openai_api_key,
@ -46,4 +48,4 @@ workflow.run()
# Output the results
for task in workflow.tasks:
print(f"Task: {task.description}, Result: {task.result}")
print(f"Task: {task.description}, Result: {task.result}")

@ -8,4 +8,4 @@ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from swarms.agents import * # noqa: E402, F403
from swarms.swarms import * # noqa: E402, F403
from swarms.structs import * # noqa: E402, F403
from swarms.models import * # noqa: E402, F403
from swarms.models import * # noqa: E402, F403

@ -1,81 +0,0 @@
from __future__ import annotations
import json
import uuid
from abc import ABC, abstractmethod
from attr import define, field, Factory
from marshmallow import class_registry
from marshmallow.exceptions import RegistryError
@define
class BaseArtifact(ABC):
id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True)
name: str = field(
default=Factory(lambda self: self.id, takes_self=True), kw_only=True
)
value: any = field()
type: str = field(
default=Factory(lambda self: self.__class__.__name__, takes_self=True),
kw_only=True,
)
@classmethod
def value_to_bytes(cls, value: any) -> bytes:
if isinstance(value, bytes):
return value
else:
return str(value).encode()
@classmethod
def value_to_dict(cls, value: any) -> dict:
if isinstance(value, dict):
dict_value = value
else:
dict_value = json.loads(value)
return {k: v for k, v in dict_value.items()}
@classmethod
def from_dict(cls, artifact_dict: dict) -> BaseArtifact:
from griptape.schemas import (
TextArtifactSchema,
InfoArtifactSchema,
ErrorArtifactSchema,
BlobArtifactSchema,
CsvRowArtifactSchema,
ListArtifactSchema,
)
class_registry.register("TextArtifact", TextArtifactSchema)
class_registry.register("InfoArtifact", InfoArtifactSchema)
class_registry.register("ErrorArtifact", ErrorArtifactSchema)
class_registry.register("BlobArtifact", BlobArtifactSchema)
class_registry.register("CsvRowArtifact", CsvRowArtifactSchema)
class_registry.register("ListArtifact", ListArtifactSchema)
try:
return class_registry.get_class(artifact_dict["type"])().load(artifact_dict)
except RegistryError:
raise ValueError("Unsupported artifact type")
@classmethod
def from_json(cls, artifact_str: str) -> BaseArtifact:
return cls.from_dict(json.loads(artifact_str))
def __str__(self):
return json.dumps(self.to_dict())
def to_json(self) -> str:
return json.dumps(self.to_dict())
@abstractmethod
def to_text(self) -> str:
...
@abstractmethod
def to_dict(self) -> dict:
...
@abstractmethod
def __add__(self, other: BaseArtifact) -> BaseArtifact:
...

@ -1,19 +0,0 @@
from __future__ import annotations
from attr import define, field
from swarms.artifacts.base import BaseArtifact
@define(frozen=True)
class ErrorArtifact(BaseArtifact):
value: str = field(converter=str)
def __add__(self, other: ErrorArtifact) -> ErrorArtifact:
return ErrorArtifact(self.value + other.value)
def to_text(self) -> str:
return self.value
def to_dict(self) -> dict:
from griptape.schemas import ErrorArtifactSchema
return dict(ErrorArtifactSchema().dump(self))

@ -1,74 +0,0 @@
from __future__ import annotations
import pprint
import json
from typing import Optional
from pydantic import BaseModel, Field, StrictStr
class Artifact(BaseModel):
"""
Artifact that has the task has been produced
Attributes:
-----------
artifact_id: str
ID of the artifact
file_name: str
Filename of the artifact
relative_path: str
Relative path of the artifact
"""
artifact_id: StrictStr = Field(..., description="ID of the artifact")
file_name: StrictStr = Field(..., description="Filename of the artifact")
relative_path: Optional[StrictStr] = Field(
None, description="Relative path of the artifact"
)
__properties = ["artifact_id", "file_name", "relative_path"]
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
def to_str(self) -> str:
"""Returns the string representation of the model using alias"""
return pprint.pformat(self.dict(by_alias=True))
@classmethod
def from_json(cls, json_str: str) -> Artifact:
"""Create an instance of Artifact from a json string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self):
"""Returns the dict representation of the model"""
_dict = self.dict(by_alias=True, exclude={}, exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> Artifact:
"""Create an instance of Artifact from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return Artifact.parse_obj(obj)
_obj = Artifact.parse_obj(
{
"artifact_id": obj.get("artifact_id"),
"file_name": obj.get("file_name"),
"relative_path": obj.get("relative_path"),
}
)
return _obj

@ -1,3 +1,4 @@
import os
from openai import OpenAI
client = OpenAI()

@ -0,0 +1,90 @@
ONBOARDING_AGENT_PROMPT = """
Onboarding:
"As the Onboarding Agent, your role is critical in guiding new users, particularly tech-savvy entrepreneurs, through the initial stages of engaging with our advanced swarm technology services. Begin by welcoming users in a friendly, professional manner, setting a positive tone for the interaction. Your conversation should flow logically, starting with an introduction to our services and their potential benefits for the user's specific business context.
Inquire about their industry, delving into specifics such as the industry's current trends, challenges, and the role technology plays in their sector. Show expertise and understanding by using industry-specific terminology and referencing relevant technological advancements. Ask open-ended questions to encourage detailed responses, enabling you to gain a comprehensive understanding of their business needs and objectives.
As you gather information, focus on identifying how our services can address their specific challenges. For instance, if a user mentions efficiency issues, discuss how swarm technology can optimize their operations. Tailor your responses to demonstrate the direct impact of our services on their business goals, emphasizing customization options and scalability.
Explain the technical aspects of swarm configurations in a way that aligns with their stated needs. Use analogies or real-world examples to simplify complex concepts. If the user appears knowledgeable, engage in more technical discussions, but always be prepared to adjust your communication style to match their level of understanding.
Throughout the conversation, maintain a balance between being informative and listening actively. Validate their concerns and provide reassurances where necessary, especially regarding data security, system integration, and support services. Your objective is to build trust and confidence in our services.
Finally, guide them through the initial setup process. Explain each step clearly, using visual aids if available, and offer to assist in real-time. Confirm their understanding at each stage and patiently address any questions or concerns.
Conclude the onboarding process by summarizing the key points discussed, reaffirming how our services align with their specific needs, and what they can expect moving forward. Encourage them to reach out for further assistance and express your availability for ongoing support. Your ultimate goal is to ensure a seamless, informative, and reassuring onboarding experience, laying the foundation for a strong, ongoing business relationship."
##################
"""
DOC_ANALYZER_AGENT_PROMPT = """ As a Financial Document Analysis Agent equipped with advanced vision capabilities, your primary role is to analyze financial documents by meticulously scanning and interpreting the visual data they contain. Your task is multifaceted, requiring both a keen eye for detail and a deep understanding of financial metrics and what they signify.
When presented with a financial document, such as a balance sheet, income statement, or cash flow statement, begin by identifying the layout and structure of the document. Recognize tables, charts, and graphs, and understand their relevance in the context of financial analysis. Extract key figures such as total revenue, net profit, operating expenses, and various financial ratios. Pay attention to the arrangement of these figures in tables and how they are visually represented in graphs.
Your vision capabilities allow you to detect subtle visual cues that might indicate important trends or anomalies. For instance, in a bar chart representing quarterly sales over several years, identify patterns like consistent growth, seasonal fluctuations, or sudden drops. In a line graph showing expenses, notice any spikes that might warrant further investigation.
Apart from numerical data, also focus on the textual components within the documents. Extract and comprehend written explanations or notes that accompany financial figures, as they often provide crucial context. For example, a note accompanying an expense report might explain a one-time expenditure that significantly impacted the company's financials for that period.
Go beyond mere data extraction and engage in a level of interpretation that synthesizes the visual and textual information into a coherent analysis. For instance, if the profit margins are shrinking despite increasing revenues, hypothesize potential reasons such as rising costs or changes in the market conditions.
As you process each document, maintain a focus on accuracy and reliability. Your goal is to convert visual data into actionable insights, providing a clear and accurate depiction of the company's financial status. This analysis will serve as a foundation for further financial decision-making, planning, and strategic development by the users relying on your capabilities. Remember, your role is crucial in transforming complex financial visuals into meaningful, accessible insights." ok we need to edit this prompt down so that it can extract all the prompt info from a financial transaction doc
"""
SUMMARY_GENERATOR_AGENT_PROMPT = """
Summarizer:
"As the Financial Summary Generation Agent, your task is to synthesize the complex data extracted by the vision model into clear, concise, and insightful summaries. Your responsibility is to distill the essence of the financial documents into an easily digestible format. Begin by structuring your summary to highlight the most critical financial metrics - revenues, expenses, profit margins, and key financial ratios. These figures should be presented in a way that is readily understandable to a non-specialist audience.
Go beyond mere presentation of data; provide context and interpretation. For example, if the revenue has shown a consistent upward trend, highlight this as a sign of growth, but also consider external market factors that might have influenced this trend. Similarly, in explaining expenses, differentiate between one-time expenditures and recurring operational costs, offering insights into how these affect the company's financial health.
Incorporate a narrative that ties together the different financial aspects. If the vision model has detected anomalies or significant changes in financial patterns, these should be woven into the narrative with potential explanations or hypotheses. For instance, a sudden drop in revenue in a particular quarter could be linked to market downturns or internal restructuring.
Your summary should also touch upon forward-looking aspects. Utilize any predictive insights or trends identified by the vision model to give a perspective on the company's future financial trajectory. However, ensure to maintain a balanced view, acknowledging uncertainties and risks where relevant.
Conclude your summary with a succinct overview, reiterating the key points and their implications for the company's overall financial status. Your goal is to empower the reader with a comprehensive understanding of the company's financial narrative, enabling them to grasp complex financial information quickly and make informed decisions."
##################
"""
FRAUD_DETECTION_AGENT_PROMPT = """
Fraud Detection:
"As the Fraud Detection Agent, your mission is to meticulously scrutinize financial documents for any signs of fraudulent activities. Employ your advanced analytical capabilities to scan through various financial statements, receipts, ledgers, and transaction records. Focus on identifying discrepancies that might indicate fraud, such as inconsistent or altered numbers, unusual patterns in financial transactions, or mismatched entries between related documents.
Your approach should be both systematic and detail-oriented. Start by establishing a baseline of normal financial activity for the entity in question. Compare current financial data against this baseline to spot any deviations that fall outside of expected ranges or norms. Pay special attention to red flags like sudden changes in revenue or expenses, unusually high transactions compared to historical averages, or irregularities in bookkeeping entries.
In addition to quantitative analysis, consider qualitative aspects as well. Scrutinize the context in which certain financial decisions were made. Are there logical explanations for unusual transactions, or do they hint at potential malfeasance? For instance, repeated payments to unknown vendors or significant adjustments to revenue just before a financial reporting period might warrant further investigation.
Part of your role also involves keeping up-to-date with common fraudulent schemes in the financial world. Apply this knowledge to recognize sophisticated fraud tactics such as earnings manipulation, embezzlement schemes, or money laundering activities.
Whenever you detect potential fraud indicators, flag them clearly in your report. Provide a detailed account of your findings, including specific transactions or document sections that raised suspicions. Your goal is to aid in early detection of fraud, thereby mitigating risks and safeguarding the financial integrity of the entity. Remember, your vigilance and accuracy are critical in the battle against financial fraud."
##################
"""
DECISION_MAKING_PROMPT = """
Actionable Decision-Making:
"As the Decision-Making Support Agent, your role is to assist users in making informed financial decisions based on the analysis provided by the Financial Document Analysis and Summary Generation Agents. You are to provide actionable advice and recommendations, grounded in the data but also considering broader business strategies and market conditions.
Begin by reviewing the financial summaries and analysis reports, understanding the key metrics and trends they highlight. Cross-reference this data with industry benchmarks, economic trends, and best practices to provide well-rounded advice. For instance, if the analysis indicates a strong cash flow position, you might recommend strategic investments or suggest areas for expansion.
Address potential risks and opportunities. If the analysis reveals certain vulnerabilities, like over-reliance on a single revenue stream, advise on diversification strategies or risk mitigation tactics. Conversely, if there are untapped opportunities, such as emerging markets or technological innovations, highlight these as potential growth areas.
Your recommendations should be specific, actionable, and tailored to the user's unique business context. Provide different scenarios and their potential outcomes, helping the user to weigh their options. For example, in suggesting an investment, outline both the potential returns and the risks involved.
Additionally, ensure that your advice adheres to financial regulations and ethical guidelines. Advocate for fiscal responsibility and sustainable business practices. Encourage users to consider not just the short-term gains but also the long-term health and reputation of their business.
Ultimately, your goal is to empower users with the knowledge and insights they need to make confident, data-driven decisions. Your guidance should be a blend of financial acumen, strategic foresight, and practical wisdom."
"""

@ -0,0 +1,91 @@
PAPER_IMPLEMENTOR_AGENT_PROMPT = """\
You are Lucidrains, Phil Wang a computer scientist and artificial intelligence researcher
who is widely regarded as one of the leading experts in deep learning and neural network architecture search.
Your work in this area has focused on developing efficient algorithms for searching the space of possible neural network architectures, with the goal of finding architectures that perform well on a given task while minimizing the computational cost of training and inference.
You are an expert in the field of neural architecture search.
Your task is to assist me in selecting the best operations to design a neural network
The objective is to maximize the model's performance.
Your work in this area has focused on developing efficient algorithms for searching the
space of possible neural network architectures, with the goal of finding architectures
that perform well on a given task while minimizing the computational cost of training and inference.
Let's break this down step by step:
Next, please consider the gradient flow based on the ideal model architecture.
For example, how the gradient from the later stage affects the earlier stage.
Now, answer the question - how we can design a high-performance model using the available operations?
Based the analysis, your task is to propose a model design with the given operations that prioritizes performance, without considering factors such as size and complexity.
After you suggest a design, I will test its actual performance and provide you with feedback.
Based on the results of previous experiments, we can collaborate to iterate and improve the design. P
lease avoid suggesting the same design again during this iterative process.
############ CREATE PYTORCH CODE FROM THE FOLLOWING ALGORITHMIC PSEUDOCODE ############
"""
PAPER_SUMMARY_ANALYZER = """
### Standard Operating Procedure (SOP) for Creating Reliable Algorithmic Pseudocode from AI Research Papers
#### Objective
To develop accurate and reliable algorithmic pseudocodes based on techniques and methodologies presented in AI research papers, with a primary focus on ensuring fidelity to the original research.
#### Scope
This SOP targets AI researchers and developers tasked with interpreting and implementing complex algorithms from academic papers into practical pseudocode, particularly in the fields of neural network architecture and deep learning.
#### Procedure
1. **Selection and Comprehensive Reading of Papers:**
- Carefully choose AI research papers that are relevant and credible.
- Conduct a thorough reading to grasp the paper's primary algorithms, theories, and contributions.
2. **In-Depth Analysis for Algorithm Extraction:**
- Dive deep into the methodology section of the paper.
- Understand the theoretical foundation, algorithmic approaches, and computational models used.
- Pay special attention to the nuances of the algorithm and its implementation details.
3. **Drafting Initial Pseudocode:**
- Begin translating the core algorithm into pseudocode.
- Focus on replicating the logic and structure of the algorithm as presented in the paper.
- Ensure that all steps, variables, and functions are clearly defined and logically sequenced.
4. **Pseudocode Refinement:**
- Review the initial pseudocode for completeness and accuracy.
- Revise to clarify complex parts and add comments for better understanding.
- Ensure the pseudocode mirrors the papers algorithm faithfully, including handling edge cases and exceptions.
5. **Cross-Verification:**
- Compare the pseudocode with any available source code or implementation details provided in the paper.
- If possible, consult with experts or the paper's authors for validation.
- Adjust the pseudocode based on this feedback to enhance reliability.
6. **Testing and Debugging:**
- Simulate the pseudocode, if possible, using a conceptual or a simplified coding environment.
- Identify any logical or syntactical errors and rectify them.
- Document these tests and their outcomes for future reference.
7. **Peer Review and Collaboration:**
- Engage with other experts or team members to review the pseudocode.
- Incorporate feedback to improve the accuracy and clarity of the pseudocode.
8. **Final Documentation:**
- Document the final version of the pseudocode with comprehensive comments and annotations.
- Include references to the original paper and any other sources consulted.
- Ensure the documentation is clear and understandable to someone familiar with the field but not necessarily with the specific paper.
9. **Ongoing Updates and Revisions:**
- Regularly revisit the pseudocode in light of new research or feedback.
- Maintain version control and document changes to track the evolution of the pseudocode.
#### Additional Notes
- Prioritize precision and fidelity to the original research in every step.
- Acknowledge and respect intellectual property rights; cite all sources appropriately.
- Adapt and evolve this process as new methodologies and standards emerge in AI research.
########## GENERATE THE ALGORITHMIC PSEUDOCODE OF THE NOVEL TECHNIQUE FROM THE PAPER #########
"""

@ -1,6 +1,6 @@
from swarms.structs.workflow import Workflow
from swarms.structs.task import Task
# from swarms.structs.workflow import Workflow
# from swarms.structs.task import Task
from swarms.structs.flow import Flow
from swarms.structs.sequential_workflow import SequentialWorkflow
__all__ = ["Workflow", "Task", "Flow", "SequentialWorkflow"]
__all__ = ["Flow", "SequentialWorkflow"]

@ -46,6 +46,7 @@ commands: {
}
}
-------------TOOLS---------------------------
{tools}
"""
@ -149,14 +150,17 @@ class Flow:
dynamic_loops: Optional[bool] = False,
interactive: bool = False,
dashboard: bool = False,
agent_name: str = "Flow agent",
agent_name: str = " Autonomous Agent XYZ1B",
agent_description: str = None,
system_prompt: str = FLOW_SYSTEM_PROMPT,
# tools: List[Any] = None,
dynamic_temperature: bool = False,
sop: str = None,
saved_state_path: Optional[str] = "flow_state.json",
autosave: bool = False,
context_length: int = 8192,
user_name: str = "Human",
user_name: str = "Human:",
self_healing: bool = False,
**kwargs: Any,
):
self.llm = llm
@ -175,15 +179,20 @@ class Flow:
self.dynamic_temperature = dynamic_temperature
self.dynamic_loops = dynamic_loops
self.user_name = user_name
self.context_length = context_length
# SOPS to inject into the system prompt
self.sop = sop
# The max_loops will be set dynamically if the dynamic_loop
if self.dynamic_loops:
self.max_loops = "auto"
# self.tools = tools or []
self.system_prompt = system_prompt
self.agent_name = agent_name
self.agent_description = agent_description
self.saved_state_path = saved_state_path
self.autosave = autosave
self.response_filters = []
self.self_healing = self_healing
def provide_feedback(self, feedback: str) -> None:
"""Allow users to provide feedback on the responses."""
@ -333,13 +342,12 @@ class Flow:
--------------------------------------------
Flow loop is initializing for {self.max_loops} with the following configuration:
Model Configuration: {model_config}
----------------------------------------
Flow Configuration:
Name: {self.agent_name}
System Prompt: {self.system_prompt}
Description: {self.agent_description}
System Prompt: {self.system_prompt}
Task: {task}
Max Loops: {self.max_loops}
Stopping Condition: {self.stopping_condition}
@ -351,6 +359,7 @@ class Flow:
Dynamic Temperature: {self.dynamic_temperature}
Autosave: {self.autosave}
Saved State: {self.saved_state_path}
Model Configuration: {model_config}
----------------------------------------
""",
@ -395,77 +404,81 @@ class Flow:
5. Repeat until stopping condition is met or max_loops is reached
"""
# dynamic_prompt = self.construct_dynamic_prompt()
# combined_prompt = f"{dynamic_prompt}\n{task}"
# Activate Autonomous agent message
self.activate_autonomous_agent()
response = task # or combined_prompt
history = [f"{self.user_name}: {task}"]
# If dashboard = True then print the dashboard
if self.dashboard:
self.print_dashboard(task)
loop_count = 0
# for i in range(self.max_loops):
while self.max_loops == "auto" or loop_count < self.max_loops:
loop_count += 1
print(colored(f"\nLoop {loop_count} of {self.max_loops}", "blue"))
print("\n")
if self.stopping_token:
if self._check_stopping_condition(response) or parse_done_token(
response
):
break
# Adjust temperature, comment if no work
if self.dynamic_temperature:
self.dynamic_temperature()
# Preparing the prompt
task = self.agent_history_prompt(FLOW_SYSTEM_PROMPT, response)
attempt = 0
while attempt < self.retry_attempts:
try:
response = self.llm(
task,
**kwargs,
)
# If there are any tools then parse and execute them
# if self.tools:
# self.parse_and_execute_tools(response)
if self.interactive:
print(f"AI: {response}")
history.append(f"AI: {response}")
response = input("You: ")
history.append(f"Human: {response}")
else:
print(f"AI: {response}")
history.append(f"AI: {response}")
print(response)
break
except Exception as e:
logging.error(f"Error generating response: {e}")
attempt += 1
time.sleep(self.retry_interval)
history.append(response)
time.sleep(self.loop_interval)
self.memory.append(history)
if self.autosave:
save_path = self.saved_state_path or "flow_state.json"
print(colored(f"Autosaving flow state to {save_path}", "green"))
self.save_state(save_path)
if self.return_history:
return response, history
try:
# dynamic_prompt = self.construct_dynamic_prompt()
# combined_prompt = f"{dynamic_prompt}\n{task}"
# Activate Autonomous agent message
self.activate_autonomous_agent()
response = task # or combined_prompt
history = [f"{self.user_name}: {task}"]
# If dashboard = True then print the dashboard
if self.dashboard:
self.print_dashboard(task)
loop_count = 0
# for i in range(self.max_loops):
while self.max_loops == "auto" or loop_count < self.max_loops:
loop_count += 1
print(colored(f"\nLoop {loop_count} of {self.max_loops}", "blue"))
print("\n")
if self.stopping_token:
if self._check_stopping_condition(response) or parse_done_token(
response
):
break
# Adjust temperature, comment if no work
if self.dynamic_temperature:
self.dynamic_temperature()
# Preparing the prompt
task = self.agent_history_prompt(FLOW_SYSTEM_PROMPT, response)
attempt = 0
while attempt < self.retry_attempts:
try:
response = self.llm(
task,
**kwargs,
)
# If there are any tools then parse and execute them
# if self.tools:
# self.parse_and_execute_tools(response)
if self.interactive:
print(f"AI: {response}")
history.append(f"AI: {response}")
response = input("You: ")
history.append(f"Human: {response}")
else:
print(f"AI: {response}")
history.append(f"AI: {response}")
# print(response)
break
except Exception as e:
logging.error(f"Error generating response: {e}")
attempt += 1
time.sleep(self.retry_interval)
history.append(response)
time.sleep(self.loop_interval)
self.memory.append(history)
if self.autosave:
save_path = self.saved_state_path or "flow_state.json"
print(colored(f"Autosaving flow state to {save_path}", "green"))
self.save_state(save_path)
if self.return_history:
return response, history
return response
return response
except Exception as error:
print(f"Error running flow: {error}")
raise
async def arun(self, task: str, **kwargs):
"""
@ -565,13 +578,27 @@ class Flow:
Returns:
str: The agent history prompt
"""
system_prompt = system_prompt or self.system_prompt
agent_history_prompt = f"""
SYSTEM_PROMPT: {system_prompt}
if self.sop:
system_prompt = system_prompt or self.system_prompt
agent_history_prompt = f"""
SYSTEM_PROMPT: {system_prompt}
Follow this standard operating procedure (SOP) to complete tasks:
{self.sop}
-----------------
History of conversations between yourself and your user {self.user_name}: {history}
"""
return agent_history_prompt
else:
system_prompt = system_prompt or self.system_prompt
agent_history_prompt = f"""
SYSTEM_PROMPT: {system_prompt}
History: {history}
"""
return agent_history_prompt
History: {history}
"""
return agent_history_prompt
async def run_concurrent(self, tasks: List[str], **kwargs):
"""
@ -688,14 +715,6 @@ class Flow:
return "Timeout"
return response
# def backup_memory_to_s3(self, bucket_name: str, object_name: str):
# """Backup the memory to S3"""
# import boto3
# s3 = boto3.client("s3")
# s3.put_object(Bucket=bucket_name, Key=object_name, Body=json.dumps(self.memory))
# print(f"Backed up memory to S3: {bucket_name}/{object_name}")
def analyze_feedback(self):
"""Analyze the feedback for issues"""
feedback_counts = {}
@ -920,3 +939,40 @@ class Flow:
def update_retry_interval(self, retry_interval: int):
"""Update the retry interval"""
self.retry_interval = retry_interval
def self_healing(self, **kwargs):
"""
Self healing by debugging errors and refactoring its own code
Args:
**kwargs (Any): Any additional keyword arguments
"""
# Run the flow
response = self.run_with_timeout("flow")
# If an error occurs, save the state
if not self.validate_response(response):
self.save_state("previous_state.txt")
# Refactor the code
self.refactor_code()
# Run the flow again
response = self.run_with_timeout("flow")
# If the error occurs again, revert to the previous state
if not self.validate_response(response):
self.load_state("previous_state.txt")
# If the error does not occur, continue
else:
print("Self-healing successful! Bug fixed!")
return response
def refactor_code(self):
"""
Refactor the code
"""
# Add your code here to refactor the code
pass

@ -107,6 +107,8 @@ class SequentialWorkflow:
tasks: List[Task] = field(default_factory=list)
max_loops: int = 1
autosave: bool = False
name: str = (None,)
description: str = (None,)
saved_state_filepath: Optional[str] = "sequential_workflow_state.json"
restore_state_filepath: Optional[str] = None
dashboard: bool = False
@ -248,6 +250,8 @@ class SequentialWorkflow:
f"""
Sequential Workflow Dashboard
--------------------------------
Name: {self.name}
Description: {self.description}
Tasks: {len(self.tasks)}
Max Loops: {self.max_loops}
Autosave: {self.autosave}

@ -1,174 +0,0 @@
from __future__ import annotations
import json
import pprint
import uuid
from abc import ABC, abstractmethod
from enum import Enum
from typing import Any, List, Optional, Union
from pydantic import BaseModel, Field, StrictStr
from swarms.artifacts.main import Artifact
from swarms.artifacts.error_artifact import ErrorArtifact
class BaseTask(ABC):
class State(Enum):
PENDING = 1
EXECUTING = 2
FINISHED = 3
def __init__(self):
self.id: str = uuid.uuid4().hex
self.state: BaseTask.State = self.State.PENDING
self.parent_ids: List[str] = []
self.child_ids: List[str] = []
self.output: Optional[Union[Artifact, ErrorArtifact]] = None
self.structure = None
@property
@abstractmethod
def input(self) -> Any:
pass
@property
def parents(self) -> List[BaseTask]:
return [self.structure.find_task(parent_id) for parent_id in self.parent_ids]
@property
def children(self) -> List[BaseTask]:
return [self.structure.find_task(child_id) for child_id in self.child_ids]
def __rshift__(self, child: BaseTask) -> BaseTask:
return self.add_child(child)
def __lshift__(self, child: BaseTask) -> BaseTask:
return self.add_parent(child)
def preprocess(self, structure) -> BaseTask:
self.structure = structure
return self
def add_child(self, child: BaseTask) -> BaseTask:
if self.structure:
child.structure = self.structure
elif child.structure:
self.structure = child.structure
if child not in self.structure.tasks:
self.structure.tasks.append(child)
if self not in self.structure.tasks:
self.structure.tasks.append(self)
if child.id not in self.child_ids:
self.child_ids.append(child.id)
if self.id not in child.parent_ids:
child.parent_ids.append(self.id)
return child
def add_parent(self, parent: BaseTask) -> BaseTask:
if self.structure:
parent.structure = self.structure
elif parent.structure:
self.structure = parent.structure
if parent not in self.structure.tasks:
self.structure.tasks.append(parent)
if self not in self.structure.tasks:
self.structure.tasks.append(self)
if parent.id not in self.parent_ids:
self.parent_ids.append(parent.id)
if self.id not in parent.child_ids:
parent.child_ids.append(self.id)
return parent
def is_pending(self) -> bool:
return self.state == self.State.PENDING
def is_finished(self) -> bool:
return self.state == self.State.FINISHED
def is_executing(self) -> bool:
return self.state == self.State.EXECUTING
def before_run(self) -> None:
pass
def after_run(self) -> None:
pass
def execute(self) -> Optional[Union[Artifact, ErrorArtifact]]:
try:
self.state = self.State.EXECUTING
self.before_run()
self.output = self.run()
self.after_run()
except Exception as e:
self.output = ErrorArtifact(str(e))
finally:
self.state = self.State.FINISHED
return self.output
def can_execute(self) -> bool:
return self.state == self.State.PENDING and all(
parent.is_finished() for parent in self.parents
)
def reset(self) -> BaseTask:
self.state = self.State.PENDING
self.output = None
return self
@abstractmethod
def run(self) -> Optional[Union[Artifact, ErrorArtifact]]:
pass
class Task(BaseModel):
input: Optional[StrictStr] = Field(None, description="Input prompt for the task")
additional_input: Optional[Any] = Field(
None, description="Input parameters for the task. Any value is allowed"
)
task_id: StrictStr = Field(..., description="ID of the task")
class Config:
allow_population_by_field_name = True
validate_assignment = True
def to_str(self) -> str:
return pprint.pformat(self.dict(by_alias=True))
def to_json(self) -> str:
return json.dumps(self.dict(by_alias=True, exclude_none=True))
@classmethod
def from_json(cls, json_str: str) -> "Task":
return cls.parse_raw(json_str)
def to_dict(self) -> dict:
_dict = self.dict(by_alias=True, exclude_none=True)
if self.artifacts:
_dict["artifacts"] = [
artifact.dict(by_alias=True, exclude_none=True)
for artifact in self.artifacts
]
return _dict
@classmethod
def from_dict(cls, obj: dict) -> "Task":
if obj is None:
return None
if not isinstance(obj, dict):
raise ValueError("Input must be a dictionary.")
if "artifacts" in obj:
obj["artifacts"] = [
Artifact.parse_obj(artifact) for artifact in obj["artifacts"]
]
return cls.parse_obj(obj)

@ -1,83 +0,0 @@
from __future__ import annotations
import uuid
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, List, Optional
from swarms.structs.task import Task
class Workflow:
"""
Workflows are ideal for prescriptive processes that need to be executed
sequentially.
They string together multiple tasks of varying types, and can use Short-Term Memory
or pass specific arguments downstream.
Usage
llm = LLM()
workflow = Workflow(llm)
workflow.add("What's the weather in miami")
workflow.add("Provide details for {{ parent_output }}")
workflow.add("Summarize the above information: {{ parent_output}})
workflow.run()
"""
def __init__(self, agent, parallel: bool = False):
"""__init__"""
self.agent = agent
self.tasks: List[Task] = []
self.parallel = parallel
def add(self, task: str) -> Task:
"""Add a task"""
task = Task(task_id=uuid.uuid4().hex, input=task)
if self.last_task():
self.last_task().add_child(task)
else:
task.structure = self
self.tasks.append(task)
return task
def first_task(self) -> Optional[Task]:
"""Add first task"""
return self.tasks[0] if self.tasks else None
def last_task(self) -> Optional[Task]:
"""Last task"""
return self.tasks[-1] if self.tasks else None
def run(self, task: str) -> Task:
"""Run tasks"""
self.add(task)
if self.parallel:
with ThreadPoolExecutor() as executor:
list(executor.map(self.__run_from_task, [self.first_task]))
else:
self.__run_from_task(self.first_task())
return self.last_task()
def context(self, task: Task) -> Dict[str, Any]:
"""Context in tasks"""
return {
"parent_output": task.parents[0].output
if task.parents and task.parents[0].output
else None,
"parent": task.parents[0] if task.parents else None,
"child": task.children[0] if task.children else None,
}
def __run_from_task(self, task: Optional[Task]) -> None:
"""Run from task"""
if task is None:
return
else:
if isinstance(task.execute(), Exception):
return
else:
self.__run_from_task(next(iter(task.children), None))

@ -14,8 +14,8 @@ class AutoScaler:
"""
The AutoScaler is like a kubernetes pod, that autoscales an agent or worker or boss!
Wraps around a structure like SequentialWorkflow
and or Flow and parallelizes them on multiple threads so they're split across devices
Wraps around a structure like SequentialWorkflow
and or Flow and parallelizes them on multiple threads so they're split across devices
and you can use them like that
Args:

@ -2,10 +2,12 @@ from swarms.utils.display_markdown import display_markdown_message
from swarms.utils.futures import execute_futures_dict
from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.parse_code import extract_code_in_backticks_in_string
from swarms.utils.pdf_to_text import pdf_to_text
__all__ = [
"display_markdown_message",
"execute_futures_dict",
"SubprocessCodeInterpreter",
"extract_code_in_backticks_in_string",
"pdf_to_text",
]

@ -0,0 +1,44 @@
import sys
import os
try:
import PyPDF2
except ImportError:
print("PyPDF2 not installed. Please install it using: pip install PyPDF2")
sys.exit(1)
def pdf_to_text(pdf_path):
"""
Converts a PDF file to a string of text.
Args:
pdf_path (str): The path to the PDF file to be converted.
Returns:
str: The text extracted from the PDF.
Raises:
FileNotFoundError: If the PDF file is not found at the specified path.
Exception: If there is an error in reading the PDF file.
"""
try:
# Open the PDF file
with open(pdf_path, "rb") as file:
pdf_reader = PyPDF2.PdfReader(file)
text = ""
# Iterate through each page and extract text
for page in pdf_reader.pages:
text += page.extract_text() + "\n"
return text
except FileNotFoundError:
raise FileNotFoundError(f"The file at {pdf_path} was not found.")
except Exception as e:
raise Exception(f"An error occurred while reading the PDF file: {e}")
# Example usage
# text = pdf_to_text("test.pdf")
# print(text)
Loading…
Cancel
Save