pull/421/head
Kye 10 months ago
parent 5a60eb4f2d
commit 50215f229f

@ -1,4 +1,4 @@
from swarms import Agent, AnthropicChat
from swarms import Agent, Anthropic
from langchain.tools import tool
@ -13,7 +13,7 @@ def search_api(query: str, max_results: int = 10):
## Initialize the workflow
agent = Agent(
llm=AnthropicChat(),
llm=Anthropic(),
max_loops="auto",
autosave=True,
dashboard=False,

@ -0,0 +1,23 @@
"""
Boss selects what agent to use
B -> W1, W2, W3
"""
from typing import List, Optional
from pydantic import BaseModel, Field
from swarms.utils.json_utils import str_to_json
class HierarchicalSwarm(BaseModel):
class Config:
arbitrary_types_allowed = True
agents: Optional[List[str]] = Field(
None, title="List of agents in the hierarchical swarm"
)
task: Optional[str] = Field(None, title="Task to be done by the agents")
all_agents = HierarchicalSwarm()
agents_schema = HierarchicalSwarm.model_json_schema()
agents_schema = str_to_json(agents_schema)
print(agents_schema)

@ -47,7 +47,7 @@ tiktoken = "0.4.0"
ratelimit = "2.2.1"
loguru = "0.7.2"
huggingface-hub = "*"
pydantic = "*"
pydantic = "2.6.4"
tenacity = "8.2.2"
Pillow = "9.4.0"
chromadb = "*"

@ -1,12 +1,3 @@
from swarms.models.popular_llms import (
AnthropicChat,
CohereChat,
MosaicMLChat,
OpenAILLM,
ReplicateLLM,
AzureOpenAILLM,
OpenAIChatLLM,
)
from swarms.models.base_embedding_model import BaseEmbeddingModel
from swarms.models.base_llm import AbstractLLM # noqa: E402
from swarms.models.base_multimodal_model import BaseMultiModalModel
@ -15,7 +6,6 @@ from swarms.models.clipq import CLIPQ # noqa: E402
from swarms.models.fire_function import FireFunctionCaller
from swarms.models.fuyu import Fuyu # noqa: E402
from swarms.models.gemini import Gemini # noqa: E402
from swarms.models.gigabind import Gigabind # noqa: E402
from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402
from swarms.models.huggingface import HuggingfaceLLM # noqa: E402
from swarms.models.idefics import Idefics # noqa: E402
@ -28,8 +18,30 @@ from swarms.models.mpt import MPT7B # noqa: E402
from swarms.models.nougat import Nougat # noqa: E402
from swarms.models.openai_tts import OpenAITTS # noqa: E402
from swarms.models.petals import Petals # noqa: E402
from swarms.models.popular_llms import (
AnthropicChat as Anthropic,
)
from swarms.models.popular_llms import (
AzureOpenAILLM as AzureOpenAI,
)
from swarms.models.popular_llms import (
CohereChat as Cohere,
)
from swarms.models.popular_llms import (
MosaicMLChat as MosaicML,
)
from swarms.models.popular_llms import (
OpenAIChatLLM as OpenAIChat,
)
from swarms.models.popular_llms import (
OpenAILLM as OpenAI,
)
from swarms.models.popular_llms import (
ReplicateLLM as Replicate,
)
from swarms.models.qwen import QwenVLMultiModal # noqa: E402
from swarms.models.roboflow_model import RoboflowMultiModal
# from swarms.models.roboflow_model import RoboflowMultiModal
from swarms.models.sam_supervision import SegmentAnythingMarkGenerator
from swarms.models.sampling_params import SamplingParams, SamplingType
from swarms.models.timm import TimmModel # noqa: E402
@ -41,57 +53,54 @@ from swarms.models.types import ( # noqa: E402
TextModality,
VideoModality,
)
from swarms.models.ultralytics_model import UltralyticsModel
# from swarms.models.ultralytics_model import UltralyticsModel
from swarms.models.vilt import Vilt # noqa: E402
from swarms.models.wizard_storytelling import WizardLLMStoryTeller
from swarms.models.zephyr import Zephyr # noqa: E402
from swarms.models.zeroscope import ZeroscopeTTV # noqa: E402
__all__ = [
"AbstractLLM",
"Anthropic",
"AzureOpenAI",
"BaseEmbeddingModel",
"BaseMultiModalModel",
"BioGPT",
"CLIPQ",
"Cohere",
"FireFunctionCaller",
"Fuyu",
"Gigabind",
"GPT4VisionAPI",
"Gemini",
"HuggingfaceLLM",
"Idefics",
"Kosmos",
"LayoutLMDocumentQA",
"LavaMultiModal",
"Replicate",
"MPT7B",
"Mistral",
"Mixtral",
"MPT7B",
"MosaicML",
"Nougat",
"OpenAI",
"OpenAIChat",
"OpenAITTS",
"Petals",
"QwenVLMultiModal",
"RoboflowMultiModal",
"SamplingParams",
"SamplingType",
"SegmentAnythingMarkGenerator",
"TextModality",
"TimmModel",
"TogetherLLM",
"UltralyticsModel",
"Vilt",
"VideoModality",
"WizardLLMStoryTeller",
"Zephyr",
"ZeroscopeTTV",
"AnthropicChat",
"CohereChat",
"MosaicMLChat",
"OpenAILLM",
"ReplicateLLM",
"AzureOpenAILLM",
"OpenAIChatLLM",
"AudioModality",
"ImageModality",
"MultimodalData",
"TextModality",
"Gemini",
"VideoModality",
]

@ -0,0 +1,6 @@
from langchain.tools import (
BaseTool,
Tool,
tool,
StructuredTool,
) # noqa

@ -45,7 +45,7 @@ class OmniTool(BaseModel):
Tuple: A tuple containing the arguments and keyword arguments.
"""
try:
try:
self.transform_models_to_tools()
logger.info(f"Number of tools: {len(self.tools)}")
try:
@ -53,9 +53,13 @@ class OmniTool(BaseModel):
logger.info(f"Running tool: {tool}")
tool(*args, **kwargs)
except Exception as e:
logger.error(f"Error occurred while running tools: {e}")
logger.error(
f"Error occurred while running tools: {e}"
)
return args, kwargs
except Exception as error:
logger.error(f"Error occurred while running tools: {error}")
return args, kwargs
logger.error(
f"Error occurred while running tools: {error}"
)
return args, kwargs

@ -3,7 +3,7 @@ import json
from pydantic import BaseModel
def base_model_schema_to_json(model: BaseModel):
def base_model_schema_to_json(model: BaseModel, indent: int = 3):
"""
Converts the JSON schema of a base model to a formatted JSON string.
@ -13,7 +13,7 @@ def base_model_schema_to_json(model: BaseModel):
Returns:
str: The JSON schema of the base model as a formatted JSON string.
"""
return json.dumps(model.model_json_schema(), indent=2)
return json.dumps(model.model_json_schema(), indent=indent)
def extract_json_from_str(response: str):
@ -48,3 +48,19 @@ def base_model_to_json(base_model_instance: BaseModel) -> str:
json_string = json.dumps(model_dict)
return json_string
def str_to_json(response: str, indent: int = 3):
"""
Converts a string representation of JSON to a JSON object.
Args:
response (str): The string representation of JSON.
indent (int, optional): The number of spaces to use for indentation in the JSON output. Defaults to 3.
Returns:
str: The JSON object as a string.
"""
return json.dumps(response, indent=indent)
Loading…
Cancel
Save