code quality

pull/58/head
Kye 1 year ago
parent 2144e962eb
commit 45f19a6a50

@ -75,13 +75,22 @@ for result in results:
- The `Worker` is an fully feature complete agent with an llm, tools, and a vectorstore for long term memory!
```python
from langchain.models import ChatOpenAI
from swarms import Worker
llm = ChatOpenAI(
model_name='gpt-4',
openai_api_key="api-key",
temperature=0.5
)
node = Worker(
openai_api_key="",
llm=llm,
ai_name="Optimus Prime",
ai_role="Worker in a swarm",
external_tools = None,
human_in_the_loop = False,
temperature = 0.5,
)
task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times."

@ -1,4 +1,3 @@
import discord
from discord.ext import commands
from langchain.llms import OpenAIChat
from swarms.agents import OmniModalAgent

@ -1,4 +1,4 @@
from swarms import Model, Agent, WorkerNode, vectorstore, tools, orchestrator
from swarms import Model, Agent, vectorstore, tools, orchestrator
#1 model
Model(openai)

@ -1,7 +1,11 @@
from langchain.models import OpenAIChat
from langchain.models import ChatOpenAI
from swarms import Worker
llm = OpenAIChat()
llm = ChatOpenAI(
model_name='gpt-4',
openai_api_key="api-key",
temperature=0.5
)
node = Worker(
llm=llm,

@ -1,5 +1,4 @@
import re
from typing import Callable, Dict, List
from typing import Callable, List
import numpy as np
import tenacity
@ -7,8 +6,6 @@ from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import RegexParser
from langchain.prompts import PromptTemplate
from langchain.schema import (
AIMessage,
BaseMessage,
HumanMessage,
SystemMessage,
)

@ -15,6 +15,6 @@ llms = [
god_mode = GodMode(llms)
task = f"What are the biggest risks facing humanity?"
task = "What are the biggest risks facing humanity?"
god_mode.print_responses(task)

@ -20,4 +20,4 @@ from swarms import swarms
from swarms.swarms.orchestrate import Orchestrator
#agents
from swarms import agents
from swarms import agents

@ -1,3 +1,4 @@
"""Agent Infrastructure, models, memory, utils, tools"""
#agents
@ -10,4 +11,4 @@ from swarms.agents.omni_modal_agent import OmniModalAgent
#utils
from swarms.agents.message import Message
from swarms.agents.stream_response import stream
from swarms.agents.stream_response import stream

@ -112,9 +112,9 @@ class OpenAI:
rejected_solutions=None
):
if (type(state) == str):
state_text = state
pass
else:
state_text = '\n'.join(state)
'\n'.join(state)
print("New state generating thought:", state, "\n\n")
prompt = f"""
Accomplish the task below by decomposing it as many very explicit subtasks as possible, be very explicit and thorough denoted by

@ -7,12 +7,8 @@ import uuid
from PIL import Image, ImageDraw, ImageOps, ImageFont
import math
import numpy as np
import argparse
import inspect
import tempfile
from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline, StableDiffusionInstructPix2PixPipeline
from diffusers import EulerAncestralDiscreteScheduler
@ -29,7 +25,6 @@ from langchain.llms.openai import OpenAI
# Grounding DINO
import groundingdino.datasets.transforms as T
from groundingdino.models import build_model
from groundingdino.util import box_ops
from groundingdino.util.slconfig import SLConfig
from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap

@ -1,4 +1,3 @@
import os
import re
from typing import Any, Callable, Dict, List, Union
@ -18,7 +17,6 @@ from langchain.vectorstores import Chroma
from pydantic import BaseModel, Field
from swarms.models.prompts.sales import SALES_AGENT_TOOLS_PROMPT, conversation_stages
from swarms.tools.interpreter_tool import compile
from swarms.agents.omni_modal_agent import OmniModalAgent
# classes

@ -1,6 +1,5 @@
import logging
import os
from typing import Optional
import faiss
from langchain import LLMChain, OpenAI, PromptTemplate

@ -1,3 +1,6 @@
# from swarms.models.palm import GooglePalm
# from swarms.models.openai import OpenAIChat
#prompts
from swarms.models.anthropic import Anthropic
# from swarms.models.palm import GooglePalm
from swarms.models.petals import Petals

@ -1 +0,0 @@
from exa import Inference, GPTQInference, MultiModalInference

@ -1,5 +1,5 @@
import time
from typing import Any, Callable, List
from typing import Any, List
from swarms.models.prompts.agent_prompt_generator import get_prompt
class TokenUtils:

@ -38,10 +38,4 @@ def debate_monitor(game_description, word_limit, character_names):
def generate_character_header(game_description, topic, character_name, character_description):
prompt = f"""{game_description}
Your name is {character_name}.
You are a presidential candidate.
Your description is as follows: {character_description}
You are debating the topic: {topic}.
Your goal is to be as creative as possible and make the voters think you are the best candidate.
"""
pass

@ -1,3 +1,4 @@
#structs
#structs
from swarms.structs.workflow import Workflow
from swarms.structs.task import Task
from swarms.structs.task import Task

@ -1,4 +1,5 @@
# swarms
# swarms
from swarms.swarms.dialogue_simulator import DialogueSimulator
from swarms.swarms.autoscaler import AutoScaler
from swarms.swarms.orchestrate import Orchestrator

@ -29,7 +29,10 @@ class GodMode:
"""
def __init__(self, llms):
def __init__(
self,
llms
):
self.llms = llms
def run(self, task):

@ -7,11 +7,12 @@ def select_speaker(step: int, agents: List[Worker]) -> int:
# This function selects the speaker in a round-robin fashion
return step % len(agents)
class MultiAgentDebate:
"""
MultiAgentDebate
Args:
"""
def __init__(

@ -281,13 +281,13 @@ class Orchestrator:
)
#store the mesage in the vector database
added = self.collection.add(
self.collection.add(
embeddings=[message_vector],
documents=[message],
ids=[f"{sender_id}_to_{receiver_id}"]
)
result = self.run(
self.run(
objective=f"chat with agent {receiver_id} about {message}"
)

@ -8,17 +8,13 @@ from typing import Optional
import pandas as pd
from langchain.agents import tool
from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
from langchain.docstore.document import Document
ROOT_DIR = "./data/"
from langchain.chains.qa_with_sources.loading import BaseCombineDocumentsChain
from langchain.chat_models import ChatOpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.tools import BaseTool
from langchain.tools.file_management.read import ReadFileTool
from langchain.tools.file_management.write import WriteFileTool
from pydantic import Field
from swarms.utils.logger import logger

@ -1,6 +1,6 @@
#props to shroominic
from swarms.tools.base import Tool, ToolException
from typing import Callable, Any, List
from typing import Any, List
from codeinterpreterapi import CodeInterpreterSession, File, ToolException
class CodeInterpreter(Tool):

@ -75,7 +75,7 @@ class SpeechToText:
use_auth_token=self.hf_api_key,
device=device
)
diarize_segments = diarize_model(audio_file)
diarize_model(audio_file)
try:
segments = result["segments"]
@ -113,7 +113,7 @@ class SpeechToText:
device=self.device
)
diarize_segments = diarize_model(audio_file)
diarize_model(audio_file)
try:
segments = result["segments"]

@ -17,16 +17,9 @@ from swarms.tools.autogpt import (
)
from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator
# self.llm = ChatOpenAI(
# model_name=model_name,
# openai_api_key=self.openai_api_key,
# temperature=self.temperature
# )
#cache
ROOT_DIR = "./data/"
#main
class Worker:
"""
@ -79,9 +72,6 @@ class Worker:
self.setup_tools(external_tools)
self.setup_memory()
self.setup_agent()
# self.task_queue = []
# self.executor = concurrent.futures.ThreadPoolExecutor()
def reset(self):
"""

@ -1,15 +1,5 @@
import pytest
from langchain.base_language import BaseLanguageModel
from langchain_experimental.autonomous_agents.hugginggpt.repsonse_generator import (
load_response_generator,
)
from langchain_experimental.autonomous_agents.hugginggpt.task_executor import (
TaskExecutor,
)
from langchain_experimental.autonomous_agents.hugginggpt.task_planner import (
load_chat_planner,
)
from transformers import load_tool
from swarms.agents import (
OmniModalAgent, # Replace `your_module_name` with the appropriate module name

Loading…
Cancel
Save