code quality

pull/58/head
Kye 1 year ago
parent 2144e962eb
commit 45f19a6a50

@ -75,13 +75,22 @@ for result in results:
- The `Worker` is an fully feature complete agent with an llm, tools, and a vectorstore for long term memory! - The `Worker` is an fully feature complete agent with an llm, tools, and a vectorstore for long term memory!
```python ```python
from langchain.models import ChatOpenAI
from swarms import Worker from swarms import Worker
llm = ChatOpenAI(
model_name='gpt-4',
openai_api_key="api-key",
temperature=0.5
)
node = Worker( node = Worker(
openai_api_key="", llm=llm,
ai_name="Optimus Prime", ai_name="Optimus Prime",
ai_role="Worker in a swarm",
external_tools = None,
human_in_the_loop = False,
temperature = 0.5,
) )
task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times." task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times."

@ -1,4 +1,3 @@
import discord
from discord.ext import commands from discord.ext import commands
from langchain.llms import OpenAIChat from langchain.llms import OpenAIChat
from swarms.agents import OmniModalAgent from swarms.agents import OmniModalAgent

@ -1,4 +1,4 @@
from swarms import Model, Agent, WorkerNode, vectorstore, tools, orchestrator from swarms import Model, Agent, vectorstore, tools, orchestrator
#1 model #1 model
Model(openai) Model(openai)

@ -1,7 +1,11 @@
from langchain.models import OpenAIChat from langchain.models import ChatOpenAI
from swarms import Worker from swarms import Worker
llm = OpenAIChat() llm = ChatOpenAI(
model_name='gpt-4',
openai_api_key="api-key",
temperature=0.5
)
node = Worker( node = Worker(
llm=llm, llm=llm,

@ -1,5 +1,4 @@
import re from typing import Callable, List
from typing import Callable, Dict, List
import numpy as np import numpy as np
import tenacity import tenacity
@ -7,8 +6,6 @@ from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import RegexParser from langchain.output_parsers import RegexParser
from langchain.prompts import PromptTemplate from langchain.prompts import PromptTemplate
from langchain.schema import ( from langchain.schema import (
AIMessage,
BaseMessage,
HumanMessage, HumanMessage,
SystemMessage, SystemMessage,
) )

@ -15,6 +15,6 @@ llms = [
god_mode = GodMode(llms) god_mode = GodMode(llms)
task = f"What are the biggest risks facing humanity?" task = "What are the biggest risks facing humanity?"
god_mode.print_responses(task) god_mode.print_responses(task)

@ -20,4 +20,4 @@ from swarms import swarms
from swarms.swarms.orchestrate import Orchestrator from swarms.swarms.orchestrate import Orchestrator
#agents #agents
from swarms import agents from swarms import agents

@ -1,3 +1,4 @@
"""Agent Infrastructure, models, memory, utils, tools""" """Agent Infrastructure, models, memory, utils, tools"""
#agents #agents
@ -10,4 +11,4 @@ from swarms.agents.omni_modal_agent import OmniModalAgent
#utils #utils
from swarms.agents.message import Message from swarms.agents.message import Message
from swarms.agents.stream_response import stream from swarms.agents.stream_response import stream

@ -112,9 +112,9 @@ class OpenAI:
rejected_solutions=None rejected_solutions=None
): ):
if (type(state) == str): if (type(state) == str):
state_text = state pass
else: else:
state_text = '\n'.join(state) '\n'.join(state)
print("New state generating thought:", state, "\n\n") print("New state generating thought:", state, "\n\n")
prompt = f""" prompt = f"""
Accomplish the task below by decomposing it as many very explicit subtasks as possible, be very explicit and thorough denoted by Accomplish the task below by decomposing it as many very explicit subtasks as possible, be very explicit and thorough denoted by

@ -7,12 +7,8 @@ import uuid
from PIL import Image, ImageDraw, ImageOps, ImageFont from PIL import Image, ImageDraw, ImageOps, ImageFont
import math import math
import numpy as np import numpy as np
import argparse
import inspect import inspect
import tempfile
from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline, StableDiffusionInstructPix2PixPipeline from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline, StableDiffusionInstructPix2PixPipeline
from diffusers import EulerAncestralDiscreteScheduler from diffusers import EulerAncestralDiscreteScheduler
@ -29,7 +25,6 @@ from langchain.llms.openai import OpenAI
# Grounding DINO # Grounding DINO
import groundingdino.datasets.transforms as T import groundingdino.datasets.transforms as T
from groundingdino.models import build_model from groundingdino.models import build_model
from groundingdino.util import box_ops
from groundingdino.util.slconfig import SLConfig from groundingdino.util.slconfig import SLConfig
from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap

@ -1,4 +1,3 @@
import os
import re import re
from typing import Any, Callable, Dict, List, Union from typing import Any, Callable, Dict, List, Union
@ -18,7 +17,6 @@ from langchain.vectorstores import Chroma
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from swarms.models.prompts.sales import SALES_AGENT_TOOLS_PROMPT, conversation_stages from swarms.models.prompts.sales import SALES_AGENT_TOOLS_PROMPT, conversation_stages
from swarms.tools.interpreter_tool import compile from swarms.tools.interpreter_tool import compile
from swarms.agents.omni_modal_agent import OmniModalAgent
# classes # classes

@ -1,6 +1,5 @@
import logging import logging
import os import os
from typing import Optional
import faiss import faiss
from langchain import LLMChain, OpenAI, PromptTemplate from langchain import LLMChain, OpenAI, PromptTemplate

@ -1,3 +1,6 @@
# from swarms.models.palm import GooglePalm
# from swarms.models.openai import OpenAIChat
#prompts
from swarms.models.anthropic import Anthropic from swarms.models.anthropic import Anthropic
# from swarms.models.palm import GooglePalm # from swarms.models.palm import GooglePalm
from swarms.models.petals import Petals from swarms.models.petals import Petals

@ -1 +0,0 @@
from exa import Inference, GPTQInference, MultiModalInference

@ -1,5 +1,5 @@
import time import time
from typing import Any, Callable, List from typing import Any, List
from swarms.models.prompts.agent_prompt_generator import get_prompt from swarms.models.prompts.agent_prompt_generator import get_prompt
class TokenUtils: class TokenUtils:

@ -38,10 +38,4 @@ def debate_monitor(game_description, word_limit, character_names):
def generate_character_header(game_description, topic, character_name, character_description): def generate_character_header(game_description, topic, character_name, character_description):
prompt = f"""{game_description} pass
Your name is {character_name}.
You are a presidential candidate.
Your description is as follows: {character_description}
You are debating the topic: {topic}.
Your goal is to be as creative as possible and make the voters think you are the best candidate.
"""

@ -1,3 +1,4 @@
#structs #structs
#structs
from swarms.structs.workflow import Workflow from swarms.structs.workflow import Workflow
from swarms.structs.task import Task from swarms.structs.task import Task

@ -1,4 +1,5 @@
# swarms # swarms
# swarms
from swarms.swarms.dialogue_simulator import DialogueSimulator from swarms.swarms.dialogue_simulator import DialogueSimulator
from swarms.swarms.autoscaler import AutoScaler from swarms.swarms.autoscaler import AutoScaler
from swarms.swarms.orchestrate import Orchestrator from swarms.swarms.orchestrate import Orchestrator

@ -29,7 +29,10 @@ class GodMode:
""" """
def __init__(self, llms): def __init__(
self,
llms
):
self.llms = llms self.llms = llms
def run(self, task): def run(self, task):

@ -7,11 +7,12 @@ def select_speaker(step: int, agents: List[Worker]) -> int:
# This function selects the speaker in a round-robin fashion # This function selects the speaker in a round-robin fashion
return step % len(agents) return step % len(agents)
class MultiAgentDebate: class MultiAgentDebate:
""" """
MultiAgentDebate MultiAgentDebate
Args:
""" """
def __init__( def __init__(

@ -281,13 +281,13 @@ class Orchestrator:
) )
#store the mesage in the vector database #store the mesage in the vector database
added = self.collection.add( self.collection.add(
embeddings=[message_vector], embeddings=[message_vector],
documents=[message], documents=[message],
ids=[f"{sender_id}_to_{receiver_id}"] ids=[f"{sender_id}_to_{receiver_id}"]
) )
result = self.run( self.run(
objective=f"chat with agent {receiver_id} about {message}" objective=f"chat with agent {receiver_id} about {message}"
) )

@ -8,17 +8,13 @@ from typing import Optional
import pandas as pd import pandas as pd
from langchain.agents import tool from langchain.agents import tool
from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
from langchain.docstore.document import Document from langchain.docstore.document import Document
ROOT_DIR = "./data/" ROOT_DIR = "./data/"
from langchain.chains.qa_with_sources.loading import BaseCombineDocumentsChain from langchain.chains.qa_with_sources.loading import BaseCombineDocumentsChain
from langchain.chat_models import ChatOpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.tools import BaseTool from langchain.tools import BaseTool
from langchain.tools.file_management.read import ReadFileTool
from langchain.tools.file_management.write import WriteFileTool
from pydantic import Field from pydantic import Field
from swarms.utils.logger import logger from swarms.utils.logger import logger

@ -1,6 +1,6 @@
#props to shroominic #props to shroominic
from swarms.tools.base import Tool, ToolException from swarms.tools.base import Tool, ToolException
from typing import Callable, Any, List from typing import Any, List
from codeinterpreterapi import CodeInterpreterSession, File, ToolException from codeinterpreterapi import CodeInterpreterSession, File, ToolException
class CodeInterpreter(Tool): class CodeInterpreter(Tool):

@ -75,7 +75,7 @@ class SpeechToText:
use_auth_token=self.hf_api_key, use_auth_token=self.hf_api_key,
device=device device=device
) )
diarize_segments = diarize_model(audio_file) diarize_model(audio_file)
try: try:
segments = result["segments"] segments = result["segments"]
@ -113,7 +113,7 @@ class SpeechToText:
device=self.device device=self.device
) )
diarize_segments = diarize_model(audio_file) diarize_model(audio_file)
try: try:
segments = result["segments"] segments = result["segments"]

@ -17,16 +17,9 @@ from swarms.tools.autogpt import (
) )
from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator
# self.llm = ChatOpenAI(
# model_name=model_name,
# openai_api_key=self.openai_api_key,
# temperature=self.temperature
# )
#cache #cache
ROOT_DIR = "./data/" ROOT_DIR = "./data/"
#main #main
class Worker: class Worker:
""" """
@ -79,9 +72,6 @@ class Worker:
self.setup_tools(external_tools) self.setup_tools(external_tools)
self.setup_memory() self.setup_memory()
self.setup_agent() self.setup_agent()
# self.task_queue = []
# self.executor = concurrent.futures.ThreadPoolExecutor()
def reset(self): def reset(self):
""" """

@ -1,15 +1,5 @@
import pytest import pytest
from langchain.base_language import BaseLanguageModel from langchain.base_language import BaseLanguageModel
from langchain_experimental.autonomous_agents.hugginggpt.repsonse_generator import (
load_response_generator,
)
from langchain_experimental.autonomous_agents.hugginggpt.task_executor import (
TaskExecutor,
)
from langchain_experimental.autonomous_agents.hugginggpt.task_planner import (
load_chat_planner,
)
from transformers import load_tool
from swarms.agents import ( from swarms.agents import (
OmniModalAgent, # Replace `your_module_name` with the appropriate module name OmniModalAgent, # Replace `your_module_name` with the appropriate module name

Loading…
Cancel
Save