[FIXES++] [FEATS][find_image_path] [Simple Agent] [auto_update] [check_for_update] [system_info]

pull/336/head
Kye 1 year ago
parent d454733dff
commit 36b022ed41

@ -459,6 +459,57 @@ print(video_path)
```
### Simple Conversational Agent
- Plug in and play conversational agent with `GPT4`, `Mixytral`, or any of our models
- Reliable conversational structure to hold messages together with dynamic handling for long context conversations and interactions with auto chunking
- Reliable, this simple system will always provide responses you want.
```python
import os
from dotenv import load_dotenv
from swarms import (
OpenAIChat,
Conversation,
)
conv = Conversation()
# Load the environment variables
load_dotenv()
# Get the API key from the environment
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(openai_api_key=api_key, model_name="gpt-4")
# Run the language model in a loop
def interactive_conversation(llm):
conv = Conversation()
while True:
user_input = input("User: ")
conv.add("user", user_input)
if user_input.lower() == "quit":
break
task = (
conv.return_history_as_string()
) # Get the conversation history
out = llm(task)
conv.add("assistant", out)
print(
f"Assistant: {out}", #color="cyan"
)
conv.display_conversation()
conv.export_conversation("conversation.txt")
# Replace with your LLM instance
interactive_conversation(llm)
```
---
# Features 🤖

@ -1,26 +1,43 @@
from swarms.agents.simple_agent import SimpleAgent
from swarms.structs import Agent
from swarms.models import OpenAIChat
import os
api_key = ""
from dotenv import load_dotenv
llm = OpenAIChat(
openai_api_key=api_key,
temperature=0.5,
from swarms import (
OpenAIChat,
Conversation,
# display_markdown_message,
)
# Initialize the agent
agent = Agent(
llm=llm,
max_loops=5,
)
conv = Conversation()
# Load the environment variables
load_dotenv()
# Get the API key from the environment
api_key = os.environ.get("OPENAI_API_KEY")
# Initialize the language model
llm = OpenAIChat(openai_api_key=api_key, model_name="gpt-4")
# Run the language model in a loop
def interactive_conversation(llm):
conv = Conversation()
while True:
user_input = input("User: ")
conv.add("user", user_input)
if user_input.lower() == "quit":
break
task = (
conv.return_history_as_string()
) # Get the conversation history
out = llm(task)
conv.add("assistant", out)
print(
f"Assistant: {out}", #color="cyan"
)
conv.display_conversation()
conv.export_conversation("conversation.txt")
agent = SimpleAgent(
name="Optimus Prime",
agent=agent,
# Memory
)
out = agent.run("Generate a 10,000 word blog on health and wellness.")
print(out)
# Replace with your LLM instance
interactive_conversation(llm)

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "2.6.0"
version = "2.6.1"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]

@ -49,11 +49,11 @@ class Fuyu(BaseMultiModalModel):
self.processor = FuyuProcessor(
image_processor=self.image_processor,
tokenizer=self.tokenizer,
**kwargs,
)
self.model = FuyuForCausalLM.from_pretrained(
model_name,
device_map=device_map,
*args,
**kwargs,
)
@ -62,7 +62,7 @@ class Fuyu(BaseMultiModalModel):
image_pil = Image.open(img)
return image_pil
def run(self, text: str, img: str, *args, **kwargs):
def run(self, text: str = None, img: str = None, *args, **kwargs):
"""Run the pipeline
Args:
@ -78,8 +78,6 @@ class Fuyu(BaseMultiModalModel):
text=text,
images=[img],
device=self.device_map,
*args,
**kwargs,
)
for k, v in model_inputs.items():
@ -94,8 +92,6 @@ class Fuyu(BaseMultiModalModel):
text = self.processor.batch_decode(
output[:, -7:],
skip_special_tokens=True,
*args,
**kwargs,
)
return print(str(text))
except Exception as error:

@ -94,7 +94,7 @@ class ZeroscopeTTV:
width=self.width,
num_frames=self.num_frames,
*args,
**kwargs
**kwargs,
).frames
video_path = export_to_video(video_frames)
return video_path

@ -0,0 +1,11 @@
import subprocess
from swarms.telemetry.check_update import check_for_update
def auto_update():
"""auto update swarms"""
try:
if check_for_update():
subprocess.run(["pip", "install", "--upgrade", "swarms"])
except Exception as e:
print(e)

@ -0,0 +1,46 @@
import pkg_resources
import requests
from packaging import version
import importlib.util
import sys
# borrowed from: https://stackoverflow.com/a/1051266/656011
def check_for_package(package):
if package in sys.modules:
return True
elif (spec := importlib.util.find_spec(package)) is not None:
try:
module = importlib.util.module_from_spec(spec)
sys.modules[package] = module
spec.loader.exec_module(module)
return True
except ImportError:
return False
else:
return False
def check_for_update():
"""Check for updates
Returns:
BOOL: Flag to indicate if there is an update
"""
# Fetch the latest version from the PyPI API
response = requests.get(f"https://pypi.org/pypi/swarms/json")
latest_version = response.json()["info"]["version"]
# Get the current version using pkg_resources
current_version = pkg_resources.get_distribution("swarms").version
return version.parse(latest_version) > version.parse(
current_version
)
# out = check_for_update()
# print(out)

@ -0,0 +1,158 @@
import platform
import subprocess
import pkg_resources
import psutil
import toml
def get_python_version():
return platform.python_version()
def get_pip_version():
try:
pip_version = (
subprocess.check_output(["pip", "--version"])
.decode()
.split()[1]
)
except Exception as e:
pip_version = str(e)
return pip_version
def get_oi_version():
try:
oi_version_cmd = (
subprocess.check_output(["interpreter", "--version"])
.decode()
.split()[1]
)
except Exception as e:
oi_version_cmd = str(e)
oi_version_pkg = pkg_resources.get_distribution(
"open-interpreter"
).version
oi_version = oi_version_cmd, oi_version_pkg
return oi_version
def get_os_version():
return platform.platform()
def get_cpu_info():
return platform.processor()
def get_ram_info():
vm = psutil.virtual_memory()
used_ram_gb = vm.used / (1024**3)
free_ram_gb = vm.free / (1024**3)
total_ram_gb = vm.total / (1024**3)
return (
f"{total_ram_gb:.2f} GB, used: {used_ram_gb:.2f}, free:"
f" {free_ram_gb:.2f}"
)
def get_package_mismatches(file_path="pyproject.toml"):
with open(file_path, "r") as file:
pyproject = toml.load(file)
dependencies = pyproject["tool"]["poetry"]["dependencies"]
dev_dependencies = pyproject["tool"]["poetry"]["group"]["dev"][
"dependencies"
]
dependencies.update(dev_dependencies)
installed_packages = {
pkg.key: pkg.version for pkg in pkg_resources.working_set
}
mismatches = []
for package, version_info in dependencies.items():
if isinstance(version_info, dict):
version_info = version_info["version"]
installed_version = installed_packages.get(package)
if installed_version and version_info.startswith("^"):
expected_version = version_info[1:]
if not installed_version.startswith(expected_version):
mismatches.append(
f"\t {package}: Mismatch,"
f" pyproject.toml={expected_version},"
f" pip={installed_version}"
)
else:
mismatches.append(f"\t {package}: Not found in pip list")
return "\n" + "\n".join(mismatches)
def interpreter_info(interpreter):
try:
if interpreter.offline and interpreter.llm.api_base:
try:
curl = subprocess.check_output(
f"curl {interpreter.llm.api_base}"
)
except Exception as e:
curl = str(e)
else:
curl = "Not local"
messages_to_display = []
for message in interpreter.messages:
message = message.copy()
try:
if len(message["content"]) > 600:
message["content"] = (
message["content"][:300]
+ "..."
+ message["content"][-300:]
)
except Exception as e:
print(str(e), "for message:", message)
messages_to_display.append(message)
return f"""
# Interpreter Info
Vision: {interpreter.llm.supports_vision}
Model: {interpreter.llm.model}
Function calling: {interpreter.llm.supports_functions}
Context window: {interpreter.llm.context_window}
Max tokens: {interpreter.llm.max_tokens}
Auto run: {interpreter.auto_run}
API base: {interpreter.llm.api_base}
Offline: {interpreter.offline}
Curl output: {curl}
# Messages
System Message: {interpreter.system_message}
""" + "\n\n".join([str(m) for m in messages_to_display])
except:
return "Error, couldn't get interpreter info"
def system_info(interpreter):
oi_version = get_oi_version()
print(f"""
Python Version: {get_python_version()}
Pip Version: {get_pip_version()}
Open-interpreter Version: cmd:{oi_version[0]}, pkg: {oi_version[1]}
OS Version and Architecture: {get_os_version()}
CPU Info: {get_cpu_info()}
RAM Info: {get_ram_info()}
{interpreter_info(interpreter)}
""")
# Removed the following, as it causes `FileNotFoundError: [Errno 2] No such file or directory: 'pyproject.toml'`` on prod
# (i think it works on dev, but on prod the pyproject.toml will not be in the cwd. might not be accessible at all)
# Package Version Mismatches:
# {get_package_mismatches()}

@ -11,6 +11,7 @@ from swarms.utils.load_model_torch import load_model_torch
from swarms.utils.prep_torch_model_inference import (
prep_torch_inference,
)
from swarms.utils.find_img_path import find_image_path
__all__ = [
"display_markdown_message",
@ -22,4 +23,5 @@ __all__ = [
"check_device",
"load_model_torch",
"prep_torch_inference",
"find_image_path",
]

@ -5,22 +5,7 @@ import time
import traceback
class BaseCodeInterpreter:
"""
.run is a generator that yields a dict with attributes: active_line, output
"""
def __init__(self):
pass
def run(self, code):
pass
def terminate(self):
pass
class SubprocessCodeInterpreter(BaseCodeInterpreter):
class SubprocessCodeInterpreter:
"""
SubprocessCodeinterpreter is a base class for code interpreters that run code in a subprocess.
@ -43,12 +28,36 @@ class SubprocessCodeInterpreter(BaseCodeInterpreter):
self.done = threading.Event()
def detect_active_line(self, line):
"""Detect if the line is an active line
Args:
line (_type_): _description_
Returns:
_type_: _description_
"""
return None
def detect_end_of_execution(self, line):
"""detect if the line is an end of execution line
Args:
line (_type_): _description_
Returns:
_type_: _description_
"""
return None
def line_postprocessor(self, line):
"""Line postprocessor
Args:
line (_type_): _description_
Returns:
_type_: _description_
"""
return line
def preprocess_code(self, code):
@ -61,9 +70,11 @@ class SubprocessCodeInterpreter(BaseCodeInterpreter):
return code
def terminate(self):
"""terminate the subprocess"""
self.process.terminate()
def start_process(self):
"""start the subprocess"""
if self.process:
self.terminate()
@ -88,6 +99,14 @@ class SubprocessCodeInterpreter(BaseCodeInterpreter):
).start()
def run(self, code: str):
"""Run the code in the subprocess
Args:
code (str): _description_
Yields:
_type_: _description_
"""
retry_count = 0
max_retries = 3
@ -157,6 +176,12 @@ class SubprocessCodeInterpreter(BaseCodeInterpreter):
break
def handle_stream_output(self, stream, is_error_stream):
"""Handle the output from the subprocess
Args:
stream (_type_): _description_
is_error_stream (bool): _description_
"""
for line in iter(stream.readline, ""):
if self.debug_mode:
print(f"Received output line:\n{line}\n---")
@ -179,3 +204,12 @@ class SubprocessCodeInterpreter(BaseCodeInterpreter):
self.done.set()
else:
self.output_queue.put({"output": line})
interpreter = SubprocessCodeInterpreter()
interpreter.start_cmd = "python3"
for output in interpreter.run("""
print("hello")
print("world")
"""):
print(output)

@ -0,0 +1,24 @@
import os
import re
def find_image_path(text):
"""Find the image path from the text
Args:
text (_type_): _description_
Returns:
_type_: _description_
"""
pattern = r"([A-Za-z]:\\[^:\n]*?\.(png|jpg|jpeg|PNG|JPG|JPEG))|(/[^:\n]*?\.(png|jpg|jpeg|PNG|JPG|JPEG))"
matches = [
match.group()
for match in re.finditer(pattern, text)
if match.group()
]
matches += [match.replace("\\", "") for match in matches if match]
existing_paths = [
match for match in matches if os.path.exists(match)
]
return max(existing_paths, key=len) if existing_paths else None

@ -1,23 +1,27 @@
from rich import print as rich_print
from rich.console import Console
from rich.markdown import Markdown
from rich.rule import Rule
def display_markdown_message(message: str):
def display_markdown_message(message: str, color: str = "cyan"):
"""
Display markdown message. Works with multiline strings with lots of indentation.
Will automatically make single line > tags beautiful.
"""
console = Console()
for line in message.split("\n"):
line = line.strip()
if line == "":
print("")
console.print("")
elif line == "---":
rich_print(Rule(style="white"))
console.print(Rule(style=color))
else:
rich_print(Markdown(line))
console.print(Markdown(line, style=color))
if "\n" not in message and message.startswith(">"):
# Aesthetic choice. For these tags, they need a space below them
print("")
console.print("")
# display_markdown_message("I love you and you are beautiful.", "cyan")

Loading…
Cancel
Save