pull/68/head^2
Kye 1 year ago
parent 62f80ff949
commit 6c325a745e

@ -2,7 +2,7 @@ from swarms.models import OpenAIChat
from swarms import Worker
from swarms.prompts import PRODUCT_AGENT_PROMPT
api_key = "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"
api_key = ""
llm = OpenAIChat(
openai_api_key=api_key,

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "1.9.1"
version = "1.9.2"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]

@ -36,6 +36,7 @@ griptape
addict
albumentations
basicsr
termcolor
controlnet-aux
diffusers
einops

File diff suppressed because one or more lines are too long

@ -8,14 +8,14 @@ warnings.filterwarnings("ignore", category=UserWarning)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from swarms import workers
from swarms.workers.worker import Worker
# from swarms import chunkers
from swarms.workers import *
from swarms.workers.worker import Worker
from swarms.chunkers import *
from swarms.models import * # import * only works when __all__ = [] is defined in __init__.py
from swarms import structs
from swarms import swarms
from swarms import agents
from swarms.logo import logo
from swarms.structs import *
from swarms.swarms import *
from swarms.agents import *
from swarms.logo import print_colored_logo
print(logo)
print_colored_logo()

@ -10,4 +10,12 @@ from swarms.agents.idea_to_image_agent import Idea2Image
"""Agent Infrastructure, models, memory, utils, tools"""
# utils
__all__ = [
"OmniModalAgent",
"HFAgent",
"Message",
"stream",
"AbstractAgent",
"Registry",
"Idea2Image",
]

@ -1,3 +0,0 @@
"""
from swarms.apps import App
"""

@ -1,25 +0,0 @@
# base App class
class App:
"""
This is a base app class for examples
Args:
worker: Worker Agent
Usage
app = App(Worker)
app.run()
"""
def __init__(
self,
worker,
):
self.worker = worker
self.worker.app = self
def run(self, task):
"""Run the app"""
pass

@ -1,5 +1,13 @@
from swarms.chunkers.base import BaseChunker
from swarms.chunkers.chunk_seperator import ChunkSeparator
from swarms.chunkers.markdown import MarkdownChunker
from swarms.chunkers.text import TextChunker
from swarms.chunkers.pdf import PdfChunker
# from swarms.chunkers.base import BaseChunker
# from swarms.chunkers.markdown import MarkdownChunker
# from swarms.chunkers.text import TextChunker
# from swarms.chunkers.pdf import PdfChunker
# __all__ = [
# "BaseChunker",
# "ChunkSeparator",
# "MarkdownChunker",
# "TextChunker",
# "PdfChunker",
# ]

@ -1,3 +1,29 @@
from rich import print as rich_print
from rich.markdown import Markdown
from rich.rule import Rule
from termcolor import colored, cprint
def display_markdown_message(message):
"""
Display markdown message. Works with multiline strings with lots of indentation.
Will automatically make single line > tags beautiful.
"""
for line in message.split("\n"):
line = line.strip()
if line == "":
print("")
elif line == "---":
rich_print(Rule(style="white"))
else:
rich_print(Markdown(line))
if "\n" not in message and message.startswith(">"):
# Aesthetic choice. For these tags, they need a space below them
print("")
logo = """
________ _ _______ _______ _____ ______
/ ___/\ \/ \/ /\__ \\_ __ \/ \ / ___/
@ -16,3 +42,13 @@ logo2 = """
\/ \/ \/ \/ \/ \/
"""
def print_colored_logo():
with open('swarms/logo.txt', 'r') as file:
logo = file.read()
text = colored(logo, 'red')
print(text)
# # Call the function
# print_colored_logo()

@ -0,0 +1,7 @@
_________ __ __ _____ __________ _____ _________
/ _____// \ / \ / _ \ \______ \ / \ / _____/
\_____ \ \ \/\/ // /_\ \ | _/ / \ / \ \_____ \
/ \ \ // | \| | \/ Y \ / \
/_______ / \__/\ / \____|__ /|____|_ /\____|__ //_______ /
\/ \/ \/ \/ \/ \/

@ -2,3 +2,10 @@ from swarms.memory.vector_stores.pinecone import PineconeVector
from swarms.memory.vector_stores.base import BaseVectorStore
from swarms.memory.vector_stores.pg import PgVectorVectorStore
from swarms.memory.ocean import OceanDB
__all__ = [
"BaseVectorStore",
"PineconeVector",
"PgVectorVectorStore",
"OceanDB",
]

@ -5,7 +5,7 @@ from swarms.models.mistral import Mistral
from swarms.models.openai_models import OpenAI, AzureOpenAI, OpenAIChat
from swarms.models.zephyr import Zephyr
from swarms.models.biogpt import BioGPT
from swarms.models.huggingface import HuggingFace
from swarms.models.huggingface import HuggingfaceLLM
# MultiModal Models
@ -36,5 +36,5 @@ __all__ = [
"Nougat",
"LayoutLMDocumentQA",
"BioGPT",
"HuggingFace",
"HuggingfaceLLM",
]

@ -1,4 +1,4 @@
OperationAgentPromot = """
GROWTH_AGENT_PROMPT = """
**Standard Operating Procedure (SOP) for Autonomous Agents: Mastery in Growth Agent**

@ -1,2 +1,7 @@
from swarms.structs.workflow import Workflow
from swarms.structs.task import Task
__all__ = [
"Workflow",
"Task",
]

@ -5,3 +5,16 @@ from swarms.swarms.god_mode import GodMode
from swarms.swarms.simple_swarm import SimpleSwarm
from swarms.swarms.multi_agent_debate import MultiAgentDebate, select_speaker
from swarms.swarms.groupchat import GroupChat, GroupChatManager
__all__ = [
"DialogueSimulator",
"AutoScaler",
"Orchestrator",
"GodMode",
"SimpleSwarm",
"MultiAgentDebate",
"select_speaker",
"GroupChat",
"GroupChatManager",
]

@ -1,2 +1,3 @@
from swarms.utils.display_markdown import display_markdown_message
from swarms.utils.futures import execute_futures_dict
from swarms.utils.code_interpreter import SubprocessCodeInterpreter

@ -0,0 +1,162 @@
import subprocess
import threading
import queue
import time
import traceback
class BaseCodeInterpreter:
"""
.run is a generator that yields a dict with attributes: active_line, output
"""
def __init__(self):
pass
def run(self, code):
pass
def terminate(self):
pass
class SubprocessCodeInterpreter(BaseCodeInterpreter):
"""
SubprocessCodeinterpreter is a base class for code interpreters that run code in a subprocess.
"""
def __init__(self):
self.start_cmd = ""
self.process = None
self.debug_mode = False
self.output_queue = queue.Queue()
self.done = threading.Event()
def detect_active_line(self, line):
return None
def detect_end_of_execution(self, line):
return None
def line_postprocessor(self, line):
return line
def preprocess_code(self, code):
"""
This needs to insert an end_of_execution marker of some kind,
which can be detected by detect_end_of_execution.
Optionally, add active line markers for detect_active_line.
"""
return code
def terminate(self):
self.process.terminate()
def start_process(self):
if self.process:
self.terminate()
self.process = subprocess.Popen(
self.start_cmd.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
bufsize=0,
universal_newlines=True,
)
threading.Thread(
target=self.handle_stream_output,
args=(self.process.stdout, False),
daemon=True,
).start()
threading.Thread(
target=self.handle_stream_output,
args=(self.process.stderr, True),
daemon=True,
).start()
def run(self, code):
retry_count = 0
max_retries = 3
# Setup
try:
code = self.preprocess_code(code)
if not self.process:
self.start_process()
except:
yield {"output": traceback.format_exc()}
return
while retry_count <= max_retries:
if self.debug_mode:
print(f"Running code:\n{code}\n---")
self.done.clear()
try:
self.process.stdin.write(code + "\n")
self.process.stdin.flush()
break
except:
if retry_count != 0:
# For UX, I like to hide this if it happens once. Obviously feels better to not see errors
# Most of the time it doesn't matter, but we should figure out why it happens frequently with:
# applescript
yield {"output": traceback.format_exc()}
yield {"output": f"Retrying... ({retry_count}/{max_retries})"}
yield {"output": "Restarting process."}
self.start_process()
retry_count += 1
if retry_count > max_retries:
yield {"output": "Maximum retries reached. Could not execute code."}
return
while True:
if not self.output_queue.empty():
yield self.output_queue.get()
else:
time.sleep(0.1)
try:
output = self.output_queue.get(timeout=0.3) # Waits for 0.3 seconds
yield output
except queue.Empty:
if self.done.is_set():
# Try to yank 3 more times from it... maybe there's something in there...
# (I don't know if this actually helps. Maybe we just need to yank 1 more time)
for _ in range(3):
if not self.output_queue.empty():
yield self.output_queue.get()
time.sleep(0.2)
break
def handle_stream_output(self, stream, is_error_stream):
for line in iter(stream.readline, ""):
if self.debug_mode:
print(f"Received output line:\n{line}\n---")
line = self.line_postprocessor(line)
if line is None:
continue # `line = None` is the postprocessor's signal to discard completely
if self.detect_active_line(line):
active_line = self.detect_active_line(line)
self.output_queue.put({"active_line": active_line})
elif self.detect_end_of_execution(line):
self.output_queue.put({"active_line": None})
time.sleep(0.1)
self.done.set()
elif is_error_stream and "KeyboardInterrupt" in line:
self.output_queue.put({"output": "KeyboardInterrupt"})
time.sleep(0.1)
self.done.set()
else:
self.output_queue.put({"output": line})

@ -0,0 +1,23 @@
from rich import print as rich_print
from rich.markdown import Markdown
from rich.rule import Rule
def display_markdown_message(message):
"""
Display markdown message. Works with multiline strings with lots of indentation.
Will automatically make single line > tags beautiful.
"""
for line in message.split("\n"):
line = line.strip()
if line == "":
print("")
elif line == "---":
rich_print(Rule(style="white"))
else:
rich_print(Markdown(line))
if "\n" not in message and message.startswith(">"):
# Aesthetic choice. For these tags, they need a space below them
print("")
Loading…
Cancel
Save