parent
36492c1e0f
commit
7a35e329f2
@ -0,0 +1,100 @@
|
|||||||
|
"""
|
||||||
|
$ pip install swarms
|
||||||
|
|
||||||
|
- Add docs into the database
|
||||||
|
- Use better llm
|
||||||
|
- use better prompts [System and SOPs]
|
||||||
|
- Use a open source model like Command R
|
||||||
|
- Better SOPS ++ System Prompts
|
||||||
|
-
|
||||||
|
"""
|
||||||
|
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.models.llama3_hosted import llama3Hosted
|
||||||
|
from playground.memory.chromadb_example import ChromaDB
|
||||||
|
from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api
|
||||||
|
|
||||||
|
|
||||||
|
# Let's create a text file with the provided prompt.
|
||||||
|
|
||||||
|
research_system_prompt = """
|
||||||
|
Research Agent LLM Prompt: Summarizing Sources and Content
|
||||||
|
|
||||||
|
Objective:
|
||||||
|
Your task is to summarize the provided sources and the content within those sources. The goal is to create concise, accurate, and informative summaries that capture the key points of the original content.
|
||||||
|
|
||||||
|
Instructions:
|
||||||
|
|
||||||
|
1. Identify Key Information:
|
||||||
|
- Extract the most important information from each source. Focus on key facts, main ideas, significant arguments, and critical data.
|
||||||
|
|
||||||
|
2. Summarize Clearly and Concisely:
|
||||||
|
- Use clear and straightforward language. Avoid unnecessary details and keep the summary concise.
|
||||||
|
- Ensure that the summary is coherent and easy to understand.
|
||||||
|
|
||||||
|
3. Preserve Original Meaning:
|
||||||
|
- While summarizing, maintain the original meaning and intent of the content. Do not omit essential information that changes the context or understanding.
|
||||||
|
|
||||||
|
4. Include Relevant Details:
|
||||||
|
- Mention the source title, author, publication date, and any other relevant details that provide context.
|
||||||
|
|
||||||
|
5. Structure:
|
||||||
|
- Begin with a brief introduction to the source.
|
||||||
|
- Follow with a summary of the main content.
|
||||||
|
- Conclude with any significant conclusions or implications presented in the source.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize
|
||||||
|
memory = ChromaDB(
|
||||||
|
output_dir="research_base",
|
||||||
|
n_results=2,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
llm = llama3Hosted(temperature=0.2, max_tokens=3500)
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Research Agent",
|
||||||
|
system_prompt=research_system_prompt,
|
||||||
|
llm=llm,
|
||||||
|
max_loops="auto",
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
interactive=True,
|
||||||
|
long_term_memory=memory,
|
||||||
|
# tools=[fetch_web_articles_bing_api],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def perplexity_agent(task: str = None, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
This function takes a task as input and uses the Bing API to fetch web articles related to the task.
|
||||||
|
It then combines the task and the fetched articles as prompts and runs them through an agent.
|
||||||
|
The agent generates a response based on the prompts and returns it.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task (str): The task for which web articles need to be fetched.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The response generated by the agent.
|
||||||
|
"""
|
||||||
|
out = fetch_web_articles_bing_api(
|
||||||
|
task,
|
||||||
|
subscription_key=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Sources
|
||||||
|
sources = [task, out]
|
||||||
|
sources_prompts = "".join(sources)
|
||||||
|
|
||||||
|
# Run a question
|
||||||
|
agent_response = agent.run(sources_prompts)
|
||||||
|
return agent_response
|
||||||
|
|
||||||
|
|
||||||
|
out = perplexity_agent("What are the best ways to hold a cat?")
|
||||||
|
print(out)
|
After Width: | Height: | Size: 174 KiB |
After Width: | Height: | Size: 21 KiB |
After Width: | Height: | Size: 295 KiB |
After Width: | Height: | Size: 30 KiB |
@ -0,0 +1,71 @@
|
|||||||
|
"""
|
||||||
|
$ pip install swarms
|
||||||
|
|
||||||
|
|
||||||
|
Todo [Improvements]
|
||||||
|
- Add docs into the database
|
||||||
|
- Use better llm
|
||||||
|
- use better prompts [System and SOPs]
|
||||||
|
- Use a open source model like Command R
|
||||||
|
"""
|
||||||
|
|
||||||
|
from swarms import Agent
|
||||||
|
from swarms.models.llama3_hosted import llama3Hosted
|
||||||
|
from playground.memory.chromadb_example import ChromaDB
|
||||||
|
|
||||||
|
|
||||||
|
# Model
|
||||||
|
llm = llama3Hosted(
|
||||||
|
temperature=0.4,
|
||||||
|
max_tokens=3500,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize
|
||||||
|
memory = ChromaDB(
|
||||||
|
output_dir="compliance_swarm",
|
||||||
|
n_results=2,
|
||||||
|
limit_tokens=400,
|
||||||
|
# docs_folder="ppi_docs" # A folder loaded with docs
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add docs
|
||||||
|
# memory.add("Your Docs")
|
||||||
|
|
||||||
|
|
||||||
|
compliance_system_prompt = """
|
||||||
|
"You are a Compliance Agent specializing in the regulation and certification of Personal Protective Equipment (PPE) within the European Union (EU). Your primary objective is to ensure that all PPE products meet the stringent safety and quality standards set by EU regulations.
|
||||||
|
To do this effectively, you need to be familiar with the relevant EU directives, understand the certification process, and be able to identify non-compliant products. Always prioritize safety, accuracy, and thoroughness in your assessments."
|
||||||
|
"""
|
||||||
|
|
||||||
|
eu_sop = """
|
||||||
|
As a Compliance Agent, it is crucial to have an in-depth understanding of the EU directives that govern Personal Protective Equipment (PPE). Focus on Directive 2016/425, which lays down the health and safety requirements for PPE. Your task is to interpret the directive's requirements, apply them to various PPE products, and ensure that manufacturers adhere to these standards. Be vigilant about changes and updates to the directive, and ensure your knowledge remains current.
|
||||||
|
"""
|
||||||
|
|
||||||
|
second_eu_prompt = """
|
||||||
|
|
||||||
|
"To ensure PPE compliance in the EU, you must be well-versed in the certification and conformity assessment procedures. This involves understanding the roles of Notified Bodies, the significance of the CE marking, and the various conformity assessment modules (A, B, C, D, E, F, G, H). Evaluate the technical documentation provided by manufacturers, including risk assessments and test reports. Ensure that all documentation is complete, accurate, and up-to-date."
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
# Initialize the agent
|
||||||
|
agent = Agent(
|
||||||
|
agent_name="Compliance Agent",
|
||||||
|
system_prompt=compliance_system_prompt,
|
||||||
|
sop_list=[eu_sop, second_eu_prompt],
|
||||||
|
llm=llm,
|
||||||
|
max_loops="auto",
|
||||||
|
autosave=True,
|
||||||
|
dashboard=False,
|
||||||
|
interactive=True,
|
||||||
|
long_term_memory=memory,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run a question
|
||||||
|
out = agent.run(
|
||||||
|
"where should pii be housed depending on residence of person in question"
|
||||||
|
)
|
||||||
|
print(out)
|
@ -0,0 +1,24 @@
|
|||||||
|
from swarms import BaseSwarm, AutoSwarmRouter
|
||||||
|
|
||||||
|
|
||||||
|
class FinancialReportSummarization(BaseSwarm):
|
||||||
|
def __init__(self, name: str = None, *args, **kwargs):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
def run(self, task, *args, **kwargs):
|
||||||
|
return task
|
||||||
|
|
||||||
|
|
||||||
|
# Add swarm to router
|
||||||
|
router = AutoSwarmRouter(swarms=[FinancialReportSummarization])
|
||||||
|
|
||||||
|
# Create AutoSwarm Instance
|
||||||
|
autoswarm = AutoSwarmRouter(
|
||||||
|
name="kyegomez/FinancialReportSummarization",
|
||||||
|
description="A swarm for financial document summarizing and generation",
|
||||||
|
verbose=True,
|
||||||
|
router=router,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Run the AutoSwarm
|
||||||
|
autoswarm.run("Analyze these documents and give me a summary:")
|
@ -0,0 +1,10 @@
|
|||||||
|
from swarms.marketplace.add_all_swarms import autoswarm, router
|
||||||
|
from swarms.marketplace.agricultural_optimization import (
|
||||||
|
AgricultureOptimizationSwarm,
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"autoswarm",
|
||||||
|
"router",
|
||||||
|
"AgricultureOptimizationSwarm",
|
||||||
|
]
|
@ -0,0 +1,58 @@
|
|||||||
|
import os
|
||||||
|
import importlib.util
|
||||||
|
from typing import List, Type
|
||||||
|
from swarms import AutoSwarm, AutoSwarmRouter, BaseSwarm
|
||||||
|
|
||||||
|
|
||||||
|
def find_base_swarm_classes(
|
||||||
|
folder_path: str = "prebuilt",
|
||||||
|
) -> List[Type[BaseSwarm]]:
|
||||||
|
"""
|
||||||
|
Find and return a list of all classes that inherit from the BaseSwarm class
|
||||||
|
within the specified folder path.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
folder_path (str): The path to the folder containing the swarm classes.
|
||||||
|
Defaults to "prebuilt".
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Type[BaseSwarm]]: A list of all classes that inherit from the BaseSwarm class.
|
||||||
|
"""
|
||||||
|
base_swarm_classes: List[Type[BaseSwarm]] = []
|
||||||
|
|
||||||
|
for root, _, files in os.walk(folder_path):
|
||||||
|
for file in files:
|
||||||
|
if file == "__init__.py":
|
||||||
|
module_path: str = os.path.join(root, file)
|
||||||
|
spec = importlib.util.spec_from_file_location(
|
||||||
|
"module.name", module_path
|
||||||
|
)
|
||||||
|
module = importlib.util.module_from_spec(spec)
|
||||||
|
spec.loader.exec_module(module)
|
||||||
|
|
||||||
|
for name, obj in module.__dict__.items():
|
||||||
|
if (
|
||||||
|
isinstance(obj, type)
|
||||||
|
and issubclass(obj, BaseSwarm)
|
||||||
|
and obj is not BaseSwarm
|
||||||
|
):
|
||||||
|
base_swarm_classes.append(obj)
|
||||||
|
|
||||||
|
return base_swarm_classes
|
||||||
|
|
||||||
|
|
||||||
|
# Define the folder containing the prebuilt swarms
|
||||||
|
prebuilt_folder: str = "prebuilt"
|
||||||
|
|
||||||
|
# Find all BaseSwarm classes in the prebuilt folder
|
||||||
|
prebuilt_swarms: List[Type[BaseSwarm]] = find_base_swarm_classes(
|
||||||
|
prebuilt_folder
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add all swarms to the AutoSwarmRouter
|
||||||
|
router: AutoSwarmRouter = AutoSwarmRouter(swarms=prebuilt_swarms)
|
||||||
|
|
||||||
|
# Create an AutoSwarm instance
|
||||||
|
autoswarm: AutoSwarm = AutoSwarm(
|
||||||
|
router=router,
|
||||||
|
)
|
@ -0,0 +1,85 @@
|
|||||||
|
import os
|
||||||
|
import requests
|
||||||
|
from typing import List, Dict
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
|
||||||
|
def check_bing_api_key():
|
||||||
|
try:
|
||||||
|
load_dotenv()
|
||||||
|
return os.getenv("BING_API_KEY")
|
||||||
|
except Exception as error:
|
||||||
|
print(f"Error {error}")
|
||||||
|
raise None
|
||||||
|
|
||||||
|
|
||||||
|
def parse_and_merge_logs(logs: List[Dict[str, str]]) -> str:
|
||||||
|
"""
|
||||||
|
Parses logs and merges them into a single string for input to an LLM.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
logs (List[Dict[str, str]]): A list of dictionaries where each dictionary represents a log entry.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: A single string containing all log entries concatenated.
|
||||||
|
"""
|
||||||
|
|
||||||
|
merged_logs = ""
|
||||||
|
for log in logs:
|
||||||
|
log_entries = [f"{key}: {value}" for key, value in log.items()]
|
||||||
|
log_string = "\n".join(log_entries)
|
||||||
|
merged_logs += log_string + "\n\n"
|
||||||
|
|
||||||
|
return merged_logs.strip()
|
||||||
|
|
||||||
|
|
||||||
|
def fetch_web_articles_bing_api(
|
||||||
|
query: str = None,
|
||||||
|
subscription_key: str = check_bing_api_key(),
|
||||||
|
return_str: bool = False,
|
||||||
|
) -> List[Dict[str, str]]:
|
||||||
|
"""
|
||||||
|
Fetches four articles from Bing Web Search API based on the given query.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
query (str): The search query to retrieve articles.
|
||||||
|
subscription_key (str): The Bing Search API subscription key.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Dict[str, str]]: A list of dictionaries containing article details.
|
||||||
|
"""
|
||||||
|
if subscription_key is None:
|
||||||
|
subscription_key = check_bing_api_key()
|
||||||
|
|
||||||
|
url = "https://api.bing.microsoft.com/v7.0/search"
|
||||||
|
headers = {"Ocp-Apim-Subscription-Key": subscription_key}
|
||||||
|
params = {"q": query, "count": 4, "mkt": "en-US"}
|
||||||
|
|
||||||
|
response = requests.get(url, headers=headers, params=params)
|
||||||
|
response.raise_for_status()
|
||||||
|
search_results = response.json()
|
||||||
|
|
||||||
|
articles = []
|
||||||
|
for i, result in enumerate(
|
||||||
|
search_results.get("webPages", {}).get("value", [])
|
||||||
|
):
|
||||||
|
article_info = {
|
||||||
|
"query": query,
|
||||||
|
"url": result.get("url"),
|
||||||
|
"title": result.get("name"),
|
||||||
|
"publishedDate": result.get("dateLastCrawled"),
|
||||||
|
"author": (
|
||||||
|
result.get("provider")[0]["name"]
|
||||||
|
if result.get("provider")
|
||||||
|
else "Unknown"
|
||||||
|
),
|
||||||
|
"id": str(i + 1), # Generating a simple unique ID
|
||||||
|
}
|
||||||
|
articles.append(article_info)
|
||||||
|
|
||||||
|
articles = parse_and_merge_logs(articles)
|
||||||
|
return articles
|
||||||
|
|
||||||
|
|
||||||
|
# out = fetch_web_articles_bing_api("swarms ai github")
|
||||||
|
# print(out)
|
Loading…
Reference in new issue