pull/339/merge
evelynmitchell 1 year ago committed by GitHub
commit de2eb44f0b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,97 @@
import os
from dotenv import load_dotenv
from transformers import AutoModelForCausalLM, AutoTokenizer
# Import the models, structs, and telemetry modules
from swarms import (
Gemini,
GPT4VisionAPI,
Mixtral,
OpenAI,
ToolAgent,
BlocksList,
)
# Load the environment variables
load_dotenv()
# Get the environment variables
openai_api_key = os.getenv("OPENAI_API_KEY")
gemini_api_key = os.getenv("GEMINI_API_KEY")
# Tool Agent
model = AutoModelForCausalLM.from_pretrained(
"databricks/dolly-v2-12b"
)
tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-12b")
json_schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "number"},
"is_student": {"type": "boolean"},
"courses": {"type": "array", "items": {"type": "string"}},
},
}
toolagent = ToolAgent(
model=model, tokenizer=tokenizer, json_schema=json_schema
)
# Blocks List which enables you to build custom swarms by adding classes or functions
swarm = BlocksList(
"SocialMediaSwarm",
"A swarm of social media agents",
[
OpenAI(openai_api_key=openai_api_key),
Mixtral(),
GPT4VisionAPI(openai_api_key=openai_api_key),
Gemini(gemini_api_key=gemini_api_key),
],
)
# Add the new block to the swarm
swarm.add(toolagent)
# Remove a block from the swarm
swarm.remove(toolagent)
# Update a block in the swarm
swarm.update(toolagent)
# Get a block at a specific index
block_at_index = swarm.get(0)
# Get all blocks in the swarm
all_blocks = swarm.get_all()
# Get blocks by name
openai_blocks = swarm.get_by_name("OpenAI")
# Get blocks by type
gpt4_blocks = swarm.get_by_type("GPT4VisionAPI")
# Get blocks by ID
block_by_id = swarm.get_by_id(toolagent.id)
# Get blocks by parent
blocks_by_parent = swarm.get_by_parent(swarm)
# Get blocks by parent ID
blocks_by_parent_id = swarm.get_by_parent_id(swarm.id)
# Get blocks by parent name
blocks_by_parent_name = swarm.get_by_parent_name(swarm.name)
# Get blocks by parent type
blocks_by_parent_type = swarm.get_by_parent_type(type(swarm).__name__)
# Get blocks by parent description
blocks_by_parent_description = swarm.get_by_parent_description(
swarm.description
)
# Run the block in the swarm
inference = swarm.run_block(toolagent, "Hello World")
print(inference)

@ -15,13 +15,13 @@ Pillow==9.4.0
faiss-cpu==1.7.4
openai==0.28.0
attrs==22.2.0
datasets==2.10.1
datasets==2.14.5
pydantic==1.10.12
bitsandbytes
soundfile==0.12.1
arize-phoenix
weaviate-client==3.25.3
huggingface-hub==0.16.4
huggingface-hub==0.19.3
google-generativeai==0.3.1
sentencepiece==0.1.98
requests_mock

@ -25,6 +25,39 @@ from swarms.structs.schemas import (
)
####################
from swarms.structs.agent import Agent
from swarms.structs.autoscaler import AutoScaler
from swarms.structs.base import BaseStructure
from swarms.structs.base_swarm import AbstractSwarm
from swarms.structs.base_workflow import BaseWorkflow
from swarms.structs.concurrent_workflow import ConcurrentWorkflow
from swarms.structs.conversation import Conversation
from swarms.structs.groupchat import GroupChat, GroupChatManager
from swarms.structs.model_parallizer import ModelParallelizer
from swarms.structs.multi_agent_collab import MultiAgentCollaboration
from swarms.structs.nonlinear_workflow import NonlinearWorkflow
from swarms.structs.recursive_workflow import RecursiveWorkflow
from swarms.structs.schemas import (
Artifact,
ArtifactUpload,
StepInput,
TaskInput,
)
from swarms.structs.sequential_workflow import SequentialWorkflow
from swarms.structs.swarm_net import SwarmNetwork
from swarms.structs.utils import (
distribute_tasks,
extract_key_from_json,
extract_tokens_from_text,
find_agent_by_id,
find_token_in_text,
parse_tasks,
)
from dotenv import load_dotenv
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
@ -35,19 +68,25 @@ model = OpenAIChat(
)
def process_documentation(cls):
def process_documentation(
item,
module: str = "swarms.structs",
docs_folder_path: str = "docs/swarms/structs",
):
"""
Process the documentation for a given class using OpenAI model and save it in a Markdown file.
Process the documentation for a given class or function using OpenAI model and save it in a Python file.
"""
doc = inspect.getdoc(cls)
source = inspect.getsource(cls)
doc = inspect.getdoc(item)
source = inspect.getsource(item)
is_class = inspect.isclass(item)
item_type = "Class Name" if is_class else "Name"
input_content = (
"Class Name:"
f" {cls.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f"{item_type}:"
f" {item.__name__}\n\nDocumentation:\n{doc}\n\nSource"
f" Code:\n{source}"
)
# Process with OpenAI model (assuming the model's __call__ method takes this input and returns processed content)
# Process with OpenAI model
processed_content = model(
DOCUMENTATION_WRITER_SOP(input_content, "swarms.structs")
)
@ -59,8 +98,8 @@ def process_documentation(cls):
dir_path = "docs/swarms/structs"
os.makedirs(dir_path, exist_ok=True)
# Write the processed documentation to a Markdown file
file_path = os.path.join(dir_path, f"{cls.__name__.lower()}.md")
# Write the processed documentation to a Python file
file_path = os.path.join(dir_path, f"{item.__name__.lower()}.md")
with open(file_path, "w") as file:
file.write(doc_content)
@ -82,9 +121,35 @@ def main():
ArtifactUpload,
StepInput,
TaskInput,
items = [
Agent,
SequentialWorkflow,
AutoScaler,
Conversation,
TaskInput,
Artifact,
ArtifactUpload,
StepInput,
SwarmNetwork,
ModelParallelizer,
MultiAgentCollaboration,
AbstractSwarm,
GroupChat,
GroupChatManager,
parse_tasks,
find_agent_by_id,
distribute_tasks,
find_token_in_text,
extract_key_from_json,
extract_tokens_from_text,
ConcurrentWorkflow,
RecursiveWorkflow,
NonlinearWorkflow,
BaseWorkflow,
BaseStructure,
]
threads = []
for cls in classes:
for cls in items:
thread = threading.Thread(
target=process_documentation, args=(cls,)
)

@ -170,6 +170,7 @@ class HuggingfaceLLM(AbstractLLM):
"bnb_4bit_use_double_quant": True,
"bnb_4bit_quant_type": "nf4",
"bnb_4bit_compute_dtype": dtype,
"bnb_4bit_compute_dtype": dtype,
}
bnb_config = BitsAndBytesConfig(**quantization_config)
@ -189,6 +190,7 @@ class HuggingfaceLLM(AbstractLLM):
self.model_id, *args, **kwargs
).to(self.device)
def print_error(self, error: str):
"""Print error"""
print(colored(f"Error: {error}", "red"))
@ -263,7 +265,7 @@ class HuggingfaceLLM(AbstractLLM):
*args,
**kwargs,
)
return self.tokenizer.decode(
outputs[0], skip_special_tokens=True
)

@ -1,3 +1,4 @@
""" This module lists all the data structures used in the swarms package."""
from swarms.structs.agent import Agent
from swarms.structs.autoscaler import AutoScaler
from swarms.structs.base import BaseStructure
@ -54,9 +55,27 @@ __all__ = [
"ConcurrentWorkflow",
"RecursiveWorkflow",
"NonlinearWorkflow",
<<<<<<< HEAD
"BaseStruct", # from swarms/structs/base.py
=======
"BaseWorkflow",
<<<<<<< HEAD
<<<<<<< HEAD
>>>>>>> db2dbf3 ([CODE QUALITY])
=======
"BaseStructure"
>>>>>>> a9b3d7d ([CLEANUP][__init__])
=======
"BaseStructure",
"detect_markdown",
<<<<<<< HEAD
>>>>>>> 1df42a3 ([BUGFIX][Conversation] [swarm.tools])
=======
"Task",
<<<<<<< HEAD
"block"
>>>>>>> 8e1a024 ([FEATS] [BlockList] [BlockDict] [block])
=======
"block",
>>>>>>> 4055db3 ([CODE QUALITY])
]

@ -75,8 +75,8 @@ class BlocksList(BaseStructure):
def get_all(self):
return self.blocks
def run_block(self, block: Any, task: str, *args, **kwargs):
def run_block(self, block: Any, task: str, *args, **kwargs):
"""Run the block for the specified task.
Args:

@ -1,3 +1,4 @@
""" This module lists all the telemetry related functions. """
from swarms.telemetry.log_all import log_all_calls, log_calls
from swarms.telemetry.sys_info import (
get_cpu_info,
@ -10,7 +11,19 @@ from swarms.telemetry.sys_info import (
interpreter_info,
system_info,
)
from swarms.telemetry.sys_info import (
get_cpu_info,
get_oi_version,
get_os_version,
get_package_mismatches,
get_pip_version,
get_python_version,
get_ram_info,
interpreter_info,
system_info,
)
from swarms.telemetry.user_utils import (
generate_unique_identifier,
generate_unique_identifier,
generate_user_id,
get_machine_id,
@ -24,7 +37,11 @@ __all__ = [
"get_machine_id",
"get_system_info",
"generate_unique_identifier",
<<<<<<< HEAD
"get_python_version", # from swarms/telemetry/sys_info.py
=======
"get_python_version",
>>>>>>> 1df42a3 ([BUGFIX][Conversation] [swarm.tools])
"get_pip_version",
"get_oi_version",
"get_os_version",

@ -1,158 +1,158 @@
import platform
import subprocess
import pkg_resources
import psutil
import toml
def get_python_version():
return platform.python_version()
def get_pip_version():
try:
pip_version = (
subprocess.check_output(["pip", "--version"])
.decode()
.split()[1]
)
except Exception as e:
pip_version = str(e)
return pip_version
def get_oi_version():
try:
oi_version_cmd = (
subprocess.check_output(["interpreter", "--version"])
.decode()
.split()[1]
)
except Exception as e:
oi_version_cmd = str(e)
oi_version_pkg = pkg_resources.get_distribution(
"open-interpreter"
).version
oi_version = oi_version_cmd, oi_version_pkg
return oi_version
def get_os_version():
return platform.platform()
def get_cpu_info():
return platform.processor()
def get_ram_info():
vm = psutil.virtual_memory()
used_ram_gb = vm.used / (1024**3)
free_ram_gb = vm.free / (1024**3)
total_ram_gb = vm.total / (1024**3)
return (
f"{total_ram_gb:.2f} GB, used: {used_ram_gb:.2f}, free:"
f" {free_ram_gb:.2f}"
)
def get_package_mismatches(file_path="pyproject.toml"):
with open(file_path, "r") as file:
pyproject = toml.load(file)
dependencies = pyproject["tool"]["poetry"]["dependencies"]
dev_dependencies = pyproject["tool"]["poetry"]["group"]["dev"][
"dependencies"
]
dependencies.update(dev_dependencies)
installed_packages = {
pkg.key: pkg.version for pkg in pkg_resources.working_set
}
mismatches = []
for package, version_info in dependencies.items():
if isinstance(version_info, dict):
version_info = version_info["version"]
installed_version = installed_packages.get(package)
if installed_version and version_info.startswith("^"):
expected_version = version_info[1:]
if not installed_version.startswith(expected_version):
mismatches.append(
f"\t {package}: Mismatch,"
f" pyproject.toml={expected_version},"
f" pip={installed_version}"
)
else:
mismatches.append(f"\t {package}: Not found in pip list")
return "\n" + "\n".join(mismatches)
def interpreter_info(interpreter):
try:
if interpreter.offline and interpreter.llm.api_base:
try:
curl = subprocess.check_output(
f"curl {interpreter.llm.api_base}"
)
except Exception as e:
curl = str(e)
else:
curl = "Not local"
messages_to_display = []
for message in interpreter.messages:
message = message.copy()
try:
if len(message["content"]) > 600:
message["content"] = (
message["content"][:300]
+ "..."
+ message["content"][-300:]
)
except Exception as e:
print(str(e), "for message:", message)
messages_to_display.append(message)
return f"""
# Interpreter Info
# import platform
# import subprocess
# import pkg_resources
# import psutil
# import toml
# def get_python_version():
# return platform.python_version()
# def get_pip_version():
# try:
# pip_version = (
# subprocess.check_output(["pip", "--version"])
# .decode()
# .split()[1]
# )
# except Exception as e:
# pip_version = str(e)
# return pip_version
# def get_oi_version():
# try:
# oi_version_cmd = (
# subprocess.check_output(["interpreter", "--version"])
# .decode()
# .split()[1]
# )
# except Exception as e:
# oi_version_cmd = str(e)
# oi_version_pkg = pkg_resources.get_distribution(
# "open-interpreter"
# ).version
# oi_version = oi_version_cmd, oi_version_pkg
# return oi_version
# def get_os_version():
# return platform.platform()
# def get_cpu_info():
# return platform.processor()
# def get_ram_info():
# vm = psutil.virtual_memory()
# used_ram_gb = vm.used / (1024**3)
# free_ram_gb = vm.free / (1024**3)
# total_ram_gb = vm.total / (1024**3)
# return (
# f"{total_ram_gb:.2f} GB, used: {used_ram_gb:.2f}, free:"
# f" {free_ram_gb:.2f}"
# )
# def get_package_mismatches(file_path="pyproject.toml"):
# with open(file_path, "r") as file:
# pyproject = toml.load(file)
# dependencies = pyproject["tool"]["poetry"]["dependencies"]
# dev_dependencies = pyproject["tool"]["poetry"]["group"]["dev"][
# "dependencies"
# ]
# dependencies.update(dev_dependencies)
# installed_packages = {
# pkg.key: pkg.version for pkg in pkg_resources.working_set
# }
# mismatches = []
# for package, version_info in dependencies.items():
# if isinstance(version_info, dict):
# version_info = version_info["version"]
# installed_version = installed_packages.get(package)
# if installed_version and version_info.startswith("^"):
# expected_version = version_info[1:]
# if not installed_version.startswith(expected_version):
# mismatches.append(
# f"\t {package}: Mismatch,"
# f" pyproject.toml={expected_version},"
# f" pip={installed_version}"
# )
# else:
# mismatches.append(f"\t {package}: Not found in pip list")
# return "\n" + "\n".join(mismatches)
# def interpreter_info(interpreter):
# try:
# if interpreter.offline and interpreter.llm.api_base:
# try:
# curl = subprocess.check_output(
# f"curl {interpreter.llm.api_base}"
# )
# except Exception as e:
# curl = str(e)
# else:
# curl = "Not local"
# messages_to_display = []
# for message in interpreter.messages:
# message = message.copy()
# try:
# if len(message["content"]) > 600:
# message["content"] = (
# message["content"][:300]
# + "..."
# + message["content"][-300:]
# )
# except KeyError as e:
# print(f"KeyError {str(e)} for message: {message}")
# messages_to_display.append(message)
# return f"""
# # Interpreter Info
Vision: {interpreter.llm.supports_vision}
Model: {interpreter.llm.model}
Function calling: {interpreter.llm.supports_functions}
Context window: {interpreter.llm.context_window}
Max tokens: {interpreter.llm.max_tokens}
Auto run: {interpreter.auto_run}
API base: {interpreter.llm.api_base}
Offline: {interpreter.offline}
Curl output: {curl}
# Messages
System Message: {interpreter.system_message}
""" + "\n\n".join([str(m) for m in messages_to_display])
except:
return "Error, couldn't get interpreter info"
def system_info(interpreter):
oi_version = get_oi_version()
print(f"""
Python Version: {get_python_version()}
Pip Version: {get_pip_version()}
Open-interpreter Version: cmd:{oi_version[0]}, pkg: {oi_version[1]}
OS Version and Architecture: {get_os_version()}
CPU Info: {get_cpu_info()}
RAM Info: {get_ram_info()}
{interpreter_info(interpreter)}
""")
# Removed the following, as it causes `FileNotFoundError: [Errno 2] No such file or directory: 'pyproject.toml'`` on prod
# (i think it works on dev, but on prod the pyproject.toml will not be in the cwd. might not be accessible at all)
# Package Version Mismatches:
# {get_package_mismatches()}
# Vision: {interpreter.llm.supports_vision}
# Model: {interpreter.llm.model}
# Function calling: {interpreter.llm.supports_functions}
# Context window: {interpreter.llm.context_window}
# Max tokens: {interpreter.llm.max_tokens}
# Auto run: {interpreter.auto_run}
# API base: {interpreter.llm.api_base}
# Offline: {interpreter.offline}
# Curl output: {curl}
# # Messages
# System Message: {interpreter.system_message}
# """ + "\n\n".join([str(m) for m in messages_to_display])
# except AttributeError as e:
# return f"Error, couldn't get interpreter info: {str(e)}"
# def system_info(interpreter):
# oi_version = get_oi_version()
# print(f"""
# Python Version: {get_python_version()}
# Pip Version: {get_pip_version()}
# Open-interpreter Version: cmd:{oi_version[0]}, pkg: {oi_version[1]}
# OS Version and Architecture: {get_os_version()}
# CPU Info: {get_cpu_info()}
# RAM Info: {get_ram_info()}
# {interpreter_info(interpreter)}
# """)
# # Removed the following, as it causes `FileNotFoundError: [Errno 2] No such file or directory: 'pyproject.toml'`` on prod
# # (i think it works on dev, but on prod the pyproject.toml will not be in the cwd. might not be accessible at all)
# # Package Version Mismatches:
# # {get_package_mismatches()}

@ -1,3 +1,5 @@
"""Logits processors for the GPT-Neo model."""
from transformers import (
PreTrainedTokenizer,
LogitsWarper,
@ -48,7 +50,7 @@ class NumberStoppingCriteria(StoppingCriteria):
scores: torch.FloatTensor,
) -> bool:
decoded = self.tokenizer.decode(
input_ids[0][self.prompt_length :],
input_ids[0][self.prompt_length:],
skip_special_tokens=True,
)

@ -1,4 +1,6 @@
"""Base implementation for tools or skills."""
# flake8: noqa E501
from __future__ import annotations
import asyncio

@ -1,3 +1,6 @@
""" This module contains a function that scrapes the docstrings and parameters of a function decorated with `tool` and returns a formatted string. """
# flake8: noqa E501
import inspect
from typing import Callable
from termcolor import colored

@ -12,7 +12,7 @@ from swarms.utils.prep_torch_model_inference import (
prep_torch_inference,
)
from swarms.utils.token_count_tiktoken import limit_tokens_from_string
from swarms.utils.try_except_wrapper import try_except_wrapper
__all__ = [
"SubprocessCodeInterpreter",

@ -1,7 +1,11 @@
""" This module contains the data structure of the APA framework. """
# flake8: noqa W291
from enum import Enum, unique, auto
import abc
from typing import List, Optional
import json
from json.decoder import JSONDecodeError
from dataclasses import dataclass, field
@ -108,7 +112,8 @@ class Action:
def to_json(self):
try:
tool_output = json.loads(self.tool_output)
except:
except JSONDecodeError:
# print("Failed to decode JSON. Using raw output instead.")
tool_output = self.tool_output
return {
"thought": self.thought,

@ -1,3 +1,6 @@
""" This module contains the SubprocessCodeInterpreter class, which is a base class for code interpreters that run code in a subprocess. """
# flake8: noqa E501
import subprocess
import threading
import queue

@ -1,3 +1,6 @@
"""Execute a dictionary of futures and return the results. """
# flake8: noqa E501
from concurrent import futures
from concurrent.futures import Future
from typing import TypeVar, Dict

@ -1,3 +1,6 @@
""" Find the image path from the text """
# flake8: noqa E501
import os
import re

@ -1,3 +1,6 @@
""" Load a PyTorch model from a given path and move it to the specified device. """
# flake8: noqa E501
import torch
from torch import nn

@ -1,4 +1,6 @@
"""Logging modules"""
# flake8: noqa E501
import logging
import os
import random

@ -1,3 +1,8 @@
"""
This file contains all the utility functions and classes used in the project.
It also contains the code for uploading files to S3 and static folder.
"""
# flake8: noqa E501
import os
import random
import shutil
@ -390,7 +395,7 @@ class FileHandler:
"SERVER", "http://localhost:8000"
)
)
+ 1 :
+ 1:
]
local_filename = (
Path("file") / local_filepath.split("/")[-1]

@ -1,3 +1,5 @@
""" Markdown message display. """
# flake8: noqa E501
from rich.console import Console
from rich.markdown import Markdown
from rich.rule import Rule

@ -1,3 +1,5 @@
""" This module contains functions for parsing code from Markdown files. """
# flake8: noqa E501
import re

@ -1,3 +1,5 @@
""" Serializable base class. """
# flake8: noqa E501
from abc import ABC
from typing import Any, Dict, List, Literal, TypedDict, Union, cast

@ -1,3 +1,5 @@
""" (In preview) An abstract class for AI worker. """
# flake8: noqa E501
from typing import Dict, List, Optional, Union

@ -0,0 +1,37 @@
"""Tests for the sys_info module."""
# import pytest
# from unittest.mock import Mock
# from sys_info import interpreter_info, system_info
# def test_interpreter_info(mocker):
# """Test interpreter_info."""
# mocker.patch('subprocess.check_output', return_value='curl output')
# interpreter = Mock()
# interpreter.offline = True
# interpreter.llm.api_base = 'http://api_base'
# interpreter.llm.supports_vision = True
# interpreter.llm.model = 'model'
# interpreter.llm.supports_functions = True
# interpreter.llm.context_window = 'context_window'
# interpreter.llm.max_tokens = 100
# interpreter.auto_run = True
# interpreter.llm.api_base = 'http://api_base'
# interpreter.offline = True
# interpreter.system_message = 'system_message'
# interpreter.messages = [{'content': 'message_content'}]
# result = interpreter_info(interpreter)
# assert 'curl output' in result
# def test_system_info(mocker):
# """Test system_info."""
# mocker.patch('your_module.get_oi_version', return_value=('cmd_version', 'pkg_version')) # replace with your actual module name
# mocker.patch('your_module.get_python_version', return_value='python_version') # replace with your actual module name
# mocker.patch('your_module.get_pip_version', return_value='pip_version') # replace with your actual module name
# mocker.patch('your_module.get_os_version', return_value='os_version') # replace with your actual module name
# mocker.patch('your_module.get_cpu_info', return_value='cpu_info') # replace with your actual module name
# mocker.patch('your_module.get_ram_info', return_value='ram_info') # replace with your actual module name
# mocker.patch('your_module.interpreter_info', return_value='interpreter_info') # replace with your actual module name
# interpreter = Mock()
# result = system_info(interpreter)
# assert 'interpreter_info' in result
Loading…
Cancel
Save