From f08a7f80e07b126b17b0cdc0083123c4b2f4226e Mon Sep 17 00:00:00 2001 From: Kye Date: Mon, 26 Jun 2023 18:03:45 -0400 Subject: [PATCH] auto gpt --- requirements.txt | 43 ++++++++ swarms/agents/auto_agent.py | 213 ++++++++++++++++++++++++++++++++++++ 2 files changed, 256 insertions(+) create mode 100644 requirements.txt create mode 100644 swarms/agents/auto_agent.py diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..cd7aa9f5 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,43 @@ +transformers +openai + + +langchain==0.0.101 +torch==1.13.1 +torchvision==0.14.1 + + +asyncio +nest_asyncio + +# faiss +bs4 +playwright +duckduckgo_search +faiss-cpu + +wget==3.2 +accelerate +addict +albumentations +basicsr +controlnet-aux +diffusers +einops +gradio +imageio +imageio-ffmpeg +invisible-watermark +kornia +numpy +omegaconf +open_clip_torch +opencv-python +prettytable +safetensors +streamlit +test-tube +timm +torchmetrics +webdataset +yapf \ No newline at end of file diff --git a/swarms/agents/auto_agent.py b/swarms/agents/auto_agent.py new file mode 100644 index 00000000..5ea7bf55 --- /dev/null +++ b/swarms/agents/auto_agent.py @@ -0,0 +1,213 @@ +# General +import os +import pandas as pd +from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT +from langchain.chat_models import ChatOpenAI + +from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent +from langchain.docstore.document import Document +import asyncio +import nest_asyncio + + +llm = ChatOpenAI(model_name="gpt-4", temperature=1.0, openai_api_key="") + +# Tools +import os +from contextlib import contextmanager +from typing import Optional +from langchain.agents import tool +from langchain.tools.file_management.read import ReadFileTool +from langchain.tools.file_management.write import WriteFileTool + +ROOT_DIR = "./data/" + +from langchain.tools import BaseTool, DuckDuckGoSearchRun +from langchain.text_splitter import RecursiveCharacterTextSplitter + +from pydantic import Field +from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain, BaseCombineDocumentsChain + +# Memory +import faiss +from langchain.vectorstores import FAISS +from langchain.docstore import InMemoryDocstore +from langchain.embeddings import OpenAIEmbeddings +from langchain.tools.human.tool import HumanInputRun + + + + + + +embeddings_model = OpenAIEmbeddings(openai_api_key="") +embedding_size = 1536 +index = faiss.IndexFlatL2(embedding_size) +vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {}) + + + + +@contextmanager +def pushd(new_dir): + """Context manager for changing the current working directory.""" + prev_dir = os.getcwd() + os.chdir(new_dir) + try: + yield + finally: + os.chdir(prev_dir) + +@tool +def process_csv( + csv_file_path: str, instructions: str, output_path: Optional[str] = None +) -> str: + """Process a CSV by with pandas in a limited REPL.\ + Only use this after writing data to disk as a csv file.\ + Any figures must be saved to disk to be viewed by the human.\ + Instructions should be written in natural language, not code. Assume the dataframe is already loaded.""" + with pushd(ROOT_DIR): + try: + df = pd.read_csv(csv_file_path) + except Exception as e: + return f"Error: {e}" + agent = create_pandas_dataframe_agent(llm, df, max_iterations=30, verbose=True) + if output_path is not None: + instructions += f" Save output to disk at {output_path}" + try: + result = agent.run(instructions) + return result + except Exception as e: + return f"Error: {e}" + + +async def async_load_playwright(url: str) -> str: + """Load the specified URLs using Playwright and parse using BeautifulSoup.""" + from bs4 import BeautifulSoup + from playwright.async_api import async_playwright + + results = "" + async with async_playwright() as p: + browser = await p.chromium.launch(headless=True) + try: + page = await browser.new_page() + await page.goto(url) + + page_source = await page.content() + soup = BeautifulSoup(page_source, "html.parser") + + for script in soup(["script", "style"]): + script.extract() + + text = soup.get_text() + lines = (line.strip() for line in text.splitlines()) + chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) + results = "\n".join(chunk for chunk in chunks if chunk) + except Exception as e: + results = f"Error: {e}" + await browser.close() + return results + +def run_async(coro): + event_loop = asyncio.get_event_loop() + return event_loop.run_until_complete(coro) + +@tool +def browse_web_page(url: str) -> str: + """Verbose way to scrape a whole webpage. Likely to cause issues parsing.""" + return run_async(async_load_playwright(url)) + + +def _get_text_splitter(): + return RecursiveCharacterTextSplitter( + # Set a really small chunk size, just to show. + chunk_size = 500, + chunk_overlap = 20, + length_function = len, + ) + + +class WebpageQATool(BaseTool): + name = "query_webpage" + description = "Browse a webpage and retrieve the information relevant to the question." + text_splitter: RecursiveCharacterTextSplitter = Field(default_factory=_get_text_splitter) + qa_chain: BaseCombineDocumentsChain + + def _run(self, url: str, question: str) -> str: + """Useful for browsing websites and scraping the text information.""" + result = browse_web_page.run(url) + docs = [Document(page_content=result, metadata={"source": url})] + web_docs = self.text_splitter.split_documents(docs) + results = [] + # TODO: Handle this with a MapReduceChain + for i in range(0, len(web_docs), 4): + input_docs = web_docs[i:i+4] + window_result = self.qa_chain({"input_documents": input_docs, "question": question}, return_only_outputs=True) + results.append(f"Response from window {i} - {window_result}") + results_docs = [Document(page_content="\n".join(results), metadata={"source": url})] + return self.qa_chain({"input_documents": results_docs, "question": question}, return_only_outputs=True) + + async def _arun(self, url: str, question: str) -> str: + raise NotImplementedError + + +query_website_tool = WebpageQATool(qa_chain=load_qa_with_sources_chain(llm)) + +# !pip install duckduckgo_search +web_search = DuckDuckGoSearchRun() + + +tools = [ + web_search, + WriteFileTool(root_dir="./data"), + ReadFileTool(root_dir="./data"), + process_csv, + query_website_tool, + # HumanInputRun(), # Activate if you want the permit asking for help from the human +] + +agent = AutoGPT.from_llm_and_tools( + ai_name="Tree of Thoughts", + ai_role="Assistant", + tools=tools, + llm=llm, + memory=vectorstore.as_retriever(search_kwargs={"k": 8}), + # prompt="" + human_in_the_loop=True, # Set to True if you want to add feedback at each step. +) + +agent.chain.verbose = True + + + +tree_of_thoughts_prompt = """ + +Imagine three different experts are answering this question. All experts will write down each chain of thought of each step of their thinking, then share it with the group. Then all experts will go on to the next step, etc. If any expert realises they're wrong at any point then they leave. The question is... + + +""" + + +#Input problem +input_problem = """ + + +Input: 2 8 8 14 +Possible next steps: +2 + 8 = 10 (left: 8 10 14) +8 / 2 = 4 (left: 4 8 14) +14 + 2 = 16 (left: 8 8 16) +2 * 8 = 16 (left: 8 14 16) +8 - 2 = 6 (left: 6 8 14) +14 - 8 = 6 (left: 2 6 8) +14 / 2 = 7 (left: 7 8 8) +14 - 2 = 12 (left: 8 8 12) +Input: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation +Possible next steps: + + + + +""" + +agent.run([f"{tree_of_thoughts_prompt} {input_problem}"]) \ No newline at end of file