main
Kye 2 years ago
parent 6f3c5a88ed
commit e7cd99a3d4

@ -0,0 +1,37 @@
# Use an official Python runtime as a parent image
FROM python:3.9-slim-buster
# Set environment variables
ENV EVAL_PORT=8000 \
MODEL_NAME=gpt-4 \
CELERY_BROKER_URL=redis://localhost:6379 \
SERVER=http://localhost:${EVAL_PORT} \
USE_GPU=False \
PLAYGROUND_DIR=playground \
LOG_LEVEL=INFO \
BOT_NAME=Orca \
# You will need to set these environment variables to your actual keys in production
OPENAI_API_KEY=your_openai_api_key \
WINEDB_HOST=your_winedb_host \
WINEDB_PASSWORD=your_winedb_password \
BING_SEARCH_URL=your_bing_search_url \
BING_SUBSCRIPTION_KEY=your_bing_subscription_key \
SERPAPI_API_KEY=your_serpapi_api_key
# Set work directory
WORKDIR /usr/src/app
# Add requirements file
COPY requirements.txt ./
# Install any needed packages specified in requirements.txt
RUN pip install --no-cache-dir -r requirements.txt
# Bundle app source
COPY . .
# Expose port
EXPOSE ${EVAL_PORT}
# Run example.py when the container launches
CMD ["python", "example.py"]

@ -39,7 +39,7 @@ vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {
# General ------------------------- WORKER NODE #-------------------------------------------------------------------------- WORKER NODE
import pandas as pd import pandas as pd
from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT from langchain.experimental.autonomous_agents.autogpt.agent import AutoGPT
from langchain.chat_models import ChatOpenAI from langchain.chat_models import ChatOpenAI
@ -47,23 +47,22 @@ from langchain.chat_models import ChatOpenAI
from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent
from langchain.docstore.document import Document from langchain.docstore.document import Document
import asyncio import asyncio
import nest_asyncio
llm = ChatOpenAI(model_name="gpt-4", temperature=1.0, openai_api_key="") import nest_asyncio
# Tools # Tools
import os import os
from contextlib import contextmanager from contextlib import contextmanager
from typing import Optional from typing import Optional
from langchain.tools.file_management.read import ReadFileTool from langchain.tools.file_management.read import ReadFileTool
from langchain.tools.file_management.write import WriteFileTool from langchain.tools.file_management.write import WriteFileTool
ROOT_DIR = "./data/" ROOT_DIR = "./data/"
from langchain.tools import BaseTool, DuckDuckGoSearchRun from langchain.tools import BaseTool, DuckDuckGoSearchRun
from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain, BaseCombineDocumentsChain from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain, BaseCombineDocumentsChain
from langchain.tools.human.tool import HumanInputRun from langchain.tools.human.tool import HumanInputRun
from swarms.agents.workers.auto_agent import MultiModalVisualAgent from swarms.agents.workers.auto_agent import MultiModalVisualAgent
from swarms.tools.main import Terminal, CodeWriter, CodeEditor, process_csv, WebpageQATool from swarms.tools.main import Terminal, CodeWriter, CodeEditor, process_csv, WebpageQATool
@ -84,7 +83,7 @@ class MultiModalVisualAgentTool(BaseTool):
llm = ChatOpenAI(model_name="gpt-4", temperature=1.0, openai_api_key="")
@ -159,49 +158,6 @@ class WorkerNode:
self.agent.run([prompt]) self.agent.run([prompt])
#worker node example
worker_node = WorkerNode(llm, tools, vectorstore)
worker_node.create_agent(
ai_name="Worker",
ai_role="Assistant",
human_in_the_loop=True,
search_kwargs={"k": 8}
)
tree_of_thoughts_prompt = """
Imagine three different experts are answering this question. All experts will write down each chain of thought of each step of their thinking, then share it with the group. Then all experts will go on to the next step, etc. If any expert realises they're wrong at any point then they leave. The question is...
"""
#Input problem
input_problem = """
Input: 2 8 8 14
Possible next steps:
2 + 8 = 10 (left: 8 10 14)
8 / 2 = 4 (left: 4 8 14)
14 + 2 = 16 (left: 8 8 16)
2 * 8 = 16 (left: 8 14 16)
8 - 2 = 6 (left: 6 8 14)
14 - 8 = 6 (left: 2 6 8)
14 / 2 = 7 (left: 7 8 8)
14 - 2 = 12 (left: 8 8 12)
Input: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation
Possible next steps:
"""
worker_node.run_agent([f"{tree_of_thoughts_prompt} {input_problem}"])
@ -284,15 +240,6 @@ class BossNode:
self.baby_agi(task) self.baby_agi(task)
# Initialize boss node with given parameters
boss_node = BossNode()
# Create and execute a task
task = boss_node.create_task("Write a weather report for SF today")
boss_node.execute_task(task)
@ -336,3 +283,66 @@ class MultiAgentDebate(Swarms):
def execute(self, task): def execute(self, task):
pass pass
#worker node example
worker_node = WorkerNode(llm, tools, vectorstore)
worker_node.create_agent(
ai_name="Worker",
ai_role="Assistant",
human_in_the_loop=True,
search_kwargs={"k": 8}
)
tree_of_thoughts_prompt = """
Imagine three different experts are answering this question. All experts will write down each chain of thought of each step of their thinking, then share it with the group. Then all experts will go on to the next step, etc. If any expert realises they're wrong at any point then they leave. The question is...
"""
#Input problem
input_problem = """
Input: 2 8 8 14
Possible next steps:
2 + 8 = 10 (left: 8 10 14)
8 / 2 = 4 (left: 4 8 14)
14 + 2 = 16 (left: 8 8 16)
2 * 8 = 16 (left: 8 14 16)
8 - 2 = 6 (left: 6 8 14)
14 - 8 = 6 (left: 2 6 8)
14 / 2 = 7 (left: 7 8 8)
14 - 2 = 12 (left: 8 8 12)
Input: use 4 numbers and basic arithmetic operations (+-*/) to obtain 24 in 1 equation
Possible next steps:
"""
worker_node.run_agent([f"{tree_of_thoughts_prompt} {input_problem}"])
###########################
# Initialize boss node with given parameters
boss_node = BossNode()
# Create and execute a task
task = boss_node.create_task("Write a weather report for SF today")
boss_node.execute_task(task)

Loading…
Cancel
Save