[BUFG][CLEANUP]]

pull/432/head
Kye 10 months ago
parent 32a42b4e83
commit 41c5295239

@ -1,6 +1,5 @@
Creative Commons Attribution 4.0 International Public License
# Creative Commons Attribution 4.0 International Public License
By exercising the Licensed Rights (defined below), You accept and agree By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons to be bound by the terms and conditions of this Creative Commons

@ -213,6 +213,52 @@ print(out)
``` ```
# `Agent` with Long Term Memory ++ Tools!
An LLM equipped with long term memory and tools, a full stack agent capable of automating all and any digital tasks given a good prompt.
```python
from swarms import Agent, ChromaDB, OpenAIChat, tool
# Making an instance of the ChromaDB class
memory = ChromaDB(
metric="cosine",
n_results=3,
output_dir="results",
docs_folder="docs",
)
# Initialize a tool
@tool
def search_api(query: str):
# Add your logic here
return query
# Initializing the agent with the Gemini instance and other parameters
agent = Agent(
agent_name="Covid-19-Chat",
agent_description=(
"This agent provides information about COVID-19 symptoms."
),
llm=OpenAIChat(),
max_loops="auto",
autosave=True,
verbose=True,
long_term_memory=memory,
stopping_condition="finish",
tools=[search_api],
)
# Defining the task and image path
task = ("What are the symptoms of COVID-19?",)
# Running the agent with the specified task and image
out = agent.run(task)
print(out)
```
@ -873,6 +919,10 @@ agent = Agent(
# Run the workflow on a task # Run the workflow on a task
agent.run(task=task, img=img) agent.run(task=task, img=img)
``` ```
----
## Build your own LLMs, Agents, and Swarms!
### Swarms Compliant Model Interface ### Swarms Compliant Model Interface
```python ```python
@ -935,6 +985,16 @@ agent = MyCustomAgent()
out = agent("Analyze and summarize these financial documents: ") out = agent("Analyze and summarize these financial documents: ")
print(out) print(out)
```
### Compliant Interface for Multi-Agent Collaboration
```python
from swarms import Agent, AbstractSwarm
``` ```
--- ---

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "4.6.1" version = "4.6.7"
description = "Swarms - Pytorch" description = "Swarms - Pytorch"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]

@ -11,12 +11,10 @@ activate_sentry()
from swarms.agents import * # noqa: E402, F403 from swarms.agents import * # noqa: E402, F403
from swarms.artifacts import * # noqa: E402, F403 from swarms.artifacts import * # noqa: E402, F403
from swarms.chunkers import * # noqa: E402, F403
from swarms.memory import * # noqa: E402, F403 from swarms.memory import * # noqa: E402, F403
from swarms.models import * # noqa: E402, F403 from swarms.models import * # noqa: E402, F403
from swarms.prompts import * # noqa: E402, F403 from swarms.prompts import * # noqa: E402, F403
from swarms.structs import * # noqa: E402, F403 from swarms.structs import * # noqa: E402, F403
from swarms.telemetry import * # noqa: E402, F403 from swarms.telemetry import * # noqa: E402, F403
from swarms.tools import * # noqa: E402, F403 from swarms.tools import * # noqa: E402, F403
from swarms.utils import * # noqa: E402, F403 from swarms.utils import * # noqa: E402, F403
from swarms.schedulers import * # noqa: E402, F403

@ -1,9 +1,8 @@
from __future__ import annotations from __future__ import annotations
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import Callable
from swarms.artifacts.base_artifact import BaseArtifact from swarms.artifacts.base_artifact import BaseArtifact
from swarms.tokenizers.base_tokenizer import BaseTokenizer
@dataclass @dataclass
@ -31,6 +30,7 @@ class TextArtifact(BaseArtifact):
value: str value: str
encoding: str = "utf-8" encoding: str = "utf-8"
encoding_error_handler: str = "strict" encoding_error_handler: str = "strict"
tokenizer: Callable = None
_embedding: list[float] = field(default_factory=list) _embedding: list[float] = field(default_factory=list)
@property @property
@ -49,8 +49,8 @@ class TextArtifact(BaseArtifact):
return self.embedding return self.embedding
def token_count(self, tokenizer: BaseTokenizer) -> int: def token_count(self) -> int:
return tokenizer.count_tokens(str(self.value)) return self.tokenizer.count_tokens(str(self.value))
def to_bytes(self) -> bytes: def to_bytes(self) -> bytes:
return self.value.encode( return self.value.encode(

@ -38,7 +38,7 @@ from swarms.models.popular_llms import (
ReplicateLLM as Replicate, ReplicateLLM as Replicate,
) )
from swarms.models.qwen import QwenVLMultiModal # noqa: E402 from swarms.models.qwen import QwenVLMultiModal # noqa: E402
from swarms.models.sam_supervision import SegmentAnythingMarkGenerator # from swarms.models.sam_supervision import SegmentAnythingMarkGenerator
from swarms.models.sampling_params import SamplingParams, SamplingType from swarms.models.sampling_params import SamplingParams, SamplingType
from swarms.models.together import TogetherLLM # noqa: E402 from swarms.models.together import TogetherLLM # noqa: E402
from swarms.models.types import ( # noqa: E402 from swarms.models.types import ( # noqa: E402
@ -79,7 +79,6 @@ __all__ = [
"Replicate", "Replicate",
"SamplingParams", "SamplingParams",
"SamplingType", "SamplingType",
"SegmentAnythingMarkGenerator",
"TextModality", "TextModality",
"TogetherLLM", "TogetherLLM",
"Vilt", "Vilt",

@ -4,11 +4,8 @@ from abc import ABC, abstractmethod
from dataclasses import dataclass, field from dataclasses import dataclass, field
import numpy as np import numpy as np
from typing import Callable
from swarms.artifacts.text_artifact import TextArtifact from swarms.artifacts.text_artifact import TextArtifact
from swarms.chunkers.base_chunker import BaseChunker
from swarms.chunkers.text_chunker import TextChunker
from swarms.tokenizers.base_tokenizer import BaseTokenizer
from swarms.utils.exponential_backoff import ExponentialBackoffMixin from swarms.utils.exponential_backoff import ExponentialBackoffMixin
@ -25,12 +22,8 @@ class BaseEmbeddingModel(
""" """
model: str = None model: str = None
tokenizer: BaseTokenizer | None = None tokenizer: Callable = None
chunker: BaseChunker = field(init=False) chunker: Callable = None
def __post_init__(self) -> None:
if self.tokenizer:
self.chunker = TextChunker(tokenizer=self.tokenizer)
def embed_text_artifact( def embed_text_artifact(
self, artifact: TextArtifact self, artifact: TextArtifact

@ -1,5 +1,4 @@
from typing import Optional from typing import Optional, Callable
import cv2 import cv2
import numpy as np import numpy as np
import supervision as sv import supervision as sv
@ -12,7 +11,6 @@ from transformers import (
) )
from swarms.models.base_multimodal_model import BaseMultiModalModel from swarms.models.base_multimodal_model import BaseMultiModalModel
from swarms.utils.supervision_masking import masks_to_marks
class SegmentAnythingMarkGenerator(BaseMultiModalModel): class SegmentAnythingMarkGenerator(BaseMultiModalModel):
@ -30,6 +28,7 @@ class SegmentAnythingMarkGenerator(BaseMultiModalModel):
device: str = "cpu", device: str = "cpu",
model_name: str = "facebook/sam-vit-huge", model_name: str = "facebook/sam-vit-huge",
visualize_marks: bool = False, visualize_marks: bool = False,
masks_to_marks: Callable = sv.masks_to_marks,
*args, *args,
**kwargs, **kwargs,
): ):
@ -37,6 +36,7 @@ class SegmentAnythingMarkGenerator(BaseMultiModalModel):
self.device = device self.device = device
self.model_name = model_name self.model_name = model_name
self.visualize_marks = visualize_marks self.visualize_marks = visualize_marks
self.masks_to_marks = masks_to_marks
self.model = SamModel.from_pretrained( self.model = SamModel.from_pretrained(
model_name, *args, **kwargs model_name, *args, **kwargs
@ -74,7 +74,7 @@ class SegmentAnythingMarkGenerator(BaseMultiModalModel):
if mask is None: if mask is None:
outputs = self.pipeline(image, points_per_batch=64) outputs = self.pipeline(image, points_per_batch=64)
masks = np.array(outputs["masks"]) masks = np.array(outputs["masks"])
return masks_to_marks(masks=masks) return self.masks_to_marks(masks=masks)
else: else:
inputs = self.processor(image, return_tensors="pt").to( inputs = self.processor(image, return_tensors="pt").to(
self.device self.device
@ -112,6 +112,6 @@ class SegmentAnythingMarkGenerator(BaseMultiModalModel):
) )
masks.append(mask) masks.append(mask)
masks = np.array(masks) masks = np.array(masks)
return masks_to_marks(masks=masks) return self.masks_to_marks(masks=masks)
# def visualize_img(self): # def visualize_img(self):

@ -69,7 +69,6 @@ from swarms.structs.task_queue_base import (
TaskQueueBase, TaskQueueBase,
synchronized_queue, synchronized_queue,
) )
from swarms.structs.tool_json_schema import JSON
from swarms.structs.utils import ( from swarms.structs.utils import (
detect_markdown, detect_markdown,
distribute_tasks, distribute_tasks,
@ -80,39 +79,47 @@ from swarms.structs.utils import (
parse_tasks, parse_tasks,
) )
__all__ = [ __all__ = [
"Agent", "Agent",
"SequentialWorkflow", "AgentJob",
"AgentProcess",
"AgentProcessQueue",
"AutoSwarm",
"AutoSwarmRouter",
"AutoScaler", "AutoScaler",
"Conversation", "BaseStructure",
"TaskInput",
"Artifact",
"ArtifactUpload",
"StepInput",
"SwarmNetwork",
"ModelParallelizer",
"MultiAgentCollaboration",
"AbstractSwarm", "AbstractSwarm",
"GroupChat",
"GroupChatManager",
"parse_tasks",
"find_agent_by_id",
"distribute_tasks",
"find_token_in_text",
"extract_key_from_json",
"extract_tokens_from_text",
"ConcurrentWorkflow",
"RecursiveWorkflow",
"NonlinearWorkflow",
"BaseWorkflow", "BaseWorkflow",
"BaseStructure",
"detect_markdown",
"Task",
"block", "block",
"ConcurrentWorkflow",
"Conversation",
"GraphWorkflow", "GraphWorkflow",
"Step", "GroupChat",
"Plan", "GroupChatManager",
"MajorityVoting",
"majority_voting",
"most_frequent",
"parse_code_completion",
"Message", "Message",
"ModelParallelizer",
"MultiAgentCollaboration",
"MultiProcessWorkflow",
"MultiThreadedWorkflow",
"NonlinearWorkflow",
"Plan",
"RecursiveWorkflow",
"Artifact",
"ArtifactUpload",
"StepInput",
"StepOutput",
"StepRequestBody",
"TaskInput",
"TaskRequestBody",
"SequentialWorkflow",
"Step",
"SwarmNetwork",
"broadcast", "broadcast",
"circular_swarm", "circular_swarm",
"exponential_swarm", "exponential_swarm",
@ -131,21 +138,14 @@ __all__ = [
"sigmoid_swarm", "sigmoid_swarm",
"staircase_swarm", "staircase_swarm",
"star_swarm", "star_swarm",
"StepOutput", "Task",
"StepRequestBody",
"TaskRequestBody",
"JSON",
"most_frequent",
"parse_code_completion",
"majority_voting",
"MajorityVoting",
"synchronized_queue",
"TaskQueueBase", "TaskQueueBase",
"MultiProcessWorkflow", "synchronized_queue",
"MultiThreadedWorkflow", "detect_markdown",
"AgentJob", "distribute_tasks",
"AutoSwarm", "extract_key_from_json",
"AutoSwarmRouter", "extract_tokens_from_text",
"AgentProcess", "find_agent_by_id",
"AgentProcessQueue", "find_token_in_text",
] "parse_tasks",
]

@ -19,13 +19,11 @@ from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
) )
from swarms.prompts.worker_prompt import worker_tools_sop_promp from swarms.prompts.worker_prompt import worker_tools_sop_promp
from swarms.structs.conversation import Conversation from swarms.structs.conversation import Conversation
from swarms.tokenizers.base_tokenizer import BaseTokenizer
from swarms.tools.tool import BaseTool from swarms.tools.tool import BaseTool
from swarms.utils.code_interpreter import SubprocessCodeInterpreter from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.data_to_text import data_to_text from swarms.utils.data_to_text import data_to_text
from swarms.utils.parse_code import extract_code_from_markdown from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text from swarms.utils.pdf_to_text import pdf_to_text
from swarms.utils.token_count_tiktoken import limit_tokens_from_string
# Utils # Utils
@ -184,7 +182,7 @@ class Agent:
multi_modal: Optional[bool] = None, multi_modal: Optional[bool] = None,
pdf_path: Optional[str] = None, pdf_path: Optional[str] = None,
list_of_pdf: Optional[str] = None, list_of_pdf: Optional[str] = None,
tokenizer: Optional[BaseTokenizer] = None, tokenizer: Optional[Any] = None,
long_term_memory: Optional[AbstractVectorDatabase] = None, long_term_memory: Optional[AbstractVectorDatabase] = None,
preset_stopping_token: Optional[bool] = False, preset_stopping_token: Optional[bool] = False,
traceback: Any = None, traceback: Any = None,
@ -208,6 +206,7 @@ class Agent:
sentiment_threshold: Optional[float] = None, sentiment_threshold: Optional[float] = None,
custom_exit_command: Optional[str] = "exit", custom_exit_command: Optional[str] = "exit",
sentiment_analyzer: Optional[Callable] = None, sentiment_analyzer: Optional[Callable] = None,
limit_tokens_from_string: Optional[Callable] = None,
*args, *args,
**kwargs, **kwargs,
): ):
@ -267,6 +266,7 @@ class Agent:
self.sentiment_threshold = sentiment_threshold self.sentiment_threshold = sentiment_threshold
self.custom_exit_command = custom_exit_command self.custom_exit_command = custom_exit_command
self.sentiment_analyzer = sentiment_analyzer self.sentiment_analyzer = sentiment_analyzer
self.limit_tokens_from_string = limit_tokens_from_string
# The max_loops will be set dynamically if the dynamic_loop # The max_loops will be set dynamically if the dynamic_loop
if self.dynamic_loops: if self.dynamic_loops:
@ -1262,7 +1262,7 @@ class Agent:
_type_: _description_ _type_: _description_
""" """
text = text or self.pdf_connector() text = text or self.pdf_connector()
text = limit_tokens_from_string(text, num_limits) text = self.limit_tokens_from_string(text, num_limits)
return text return text
def ingest_docs(self, docs: List[str], *args, **kwargs): def ingest_docs(self, docs: List[str], *args, **kwargs):

@ -4,6 +4,7 @@ from pydantic import BaseModel
from swarms.structs.omni_agent_types import agents from swarms.structs.omni_agent_types import agents
from swarms.utils.loguru_logger import logger from swarms.utils.loguru_logger import logger
from typing import Callable
class AgentProcess(BaseModel): class AgentProcess(BaseModel):
@ -11,10 +12,11 @@ class AgentProcess(BaseModel):
agent_name: str agent_name: str
prompt: str prompt: str
response: str = None response: str = None
time: callable = datetime.now().strftime("%Y-%m-%d %H:%M:%S") time: Callable = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
priority: int = 0 priority: int = 0
status: str = "Waiting" status: str = "Waiting"
pid: int = None pid: int = None
def set_pid(self, pid: int): def set_pid(self, pid: int):
self.pid = pid self.pid = pid

@ -6,7 +6,7 @@ from termcolor import colored
from swarms.memory.base_db import AbstractDatabase from swarms.memory.base_db import AbstractDatabase
from swarms.structs.base import BaseStructure from swarms.structs.base import BaseStructure
from swarms.tokenizers.base_tokenizer import BaseTokenizer from typing import Any
class Conversation(BaseStructure): class Conversation(BaseStructure):
@ -67,7 +67,7 @@ class Conversation(BaseStructure):
database: AbstractDatabase = None, database: AbstractDatabase = None,
autosave: bool = False, autosave: bool = False,
save_filepath: str = None, save_filepath: str = None,
tokenizer: BaseTokenizer = None, tokenizer: Any = None,
context_length: int = 8192, context_length: int = 8192,
*args, *args,
**kwargs, **kwargs,

@ -41,7 +41,6 @@ from swarms.utils.remove_json_whitespace import (
) )
from swarms.utils.save_logs import parse_log_file from swarms.utils.save_logs import parse_log_file
from swarms.utils.supervision_visualizer import MarkVisualizer from swarms.utils.supervision_visualizer import MarkVisualizer
from swarms.utils.token_count_tiktoken import limit_tokens_from_string
from swarms.utils.try_except_wrapper import try_except_wrapper from swarms.utils.try_except_wrapper import try_except_wrapper
from swarms.utils.yaml_output_parser import YamlOutputParser from swarms.utils.yaml_output_parser import YamlOutputParser
from swarms.utils.concurrent_utils import execute_concurrently from swarms.utils.concurrent_utils import execute_concurrently
@ -79,7 +78,7 @@ __all__ = [
"remove_whitespace_from_yaml", "remove_whitespace_from_yaml",
"parse_log_file", "parse_log_file",
"MarkVisualizer", "MarkVisualizer",
"limit_tokens_from_string", # "limit_tokens_from_string",
"try_except_wrapper", "try_except_wrapper",
"YamlOutputParser", "YamlOutputParser",
"execute_concurrently", "execute_concurrently",

Loading…
Cancel
Save