hhcs fix swarms info

pull/812/merge
Kye Gomez 3 days ago
parent d06610c6e5
commit f84a0fe9f7

@ -121,7 +121,7 @@ class HybridHierarchicalClusterSwarm:
self.router_agent = Agent(
agent_name="Router Agent",
agent_description="A router agent that routes tasks to the appropriate swarms.",
system_prompt=f"{router_system_prompt}\n\n{get_swarms_info()}",
system_prompt=f"{router_system_prompt}\n\n{get_swarms_info(swarms=self.swarms)}",
tools_list_dictionary=tools,
model_name=router_agent_model_name,
max_loops=1,

@ -1404,44 +1404,44 @@ class HuggingFaceModelWrapper(ModelWithCustomRunMethod):
raise ValueError(f"Unsupported task: {task}")
# Example usage
if __name__ == "__main__":
# Initialize model manager
manager = ModelGrid(
allocation_strategy=GPUAllocationStrategy.MEMORY_OPTIMIZED,
memory_buffer=0.5,
max_cpu_models=1,
use_multiprocessing=True,
log_level="INFO",
)
# # Add models
model1 = torch.nn.Sequential(
torch.nn.Linear(10, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 2),
)
manager.add_model("small_model", model1, ModelType.PYTORCH)
# Add more models if available
if TRANSFORMERS_AVAILABLE:
manager.add_model(
"bert_model", "bert-base-uncased", ModelType.HUGGINGFACE
)
# Allocate and load models
manager.load_all_models()
# Print GPU status
print("GPU Status:")
for gpu in manager.get_gpu_status():
print(
f"GPU {gpu['id']}: {gpu['available_memory']:.2f} GB / {gpu['total_memory']:.2f} GB"
)
print(f" Models: {', '.join(gpu['models'])}")
# Run a task on all models
results = manager.run("forward", input_data=torch.randn(1, 10))
# Unload all models
manager.unload_all_models()
# # Example usage
# if __name__ == "__main__":
# # Initialize model manager
# manager = ModelGrid(
# allocation_strategy=GPUAllocationStrategy.MEMORY_OPTIMIZED,
# memory_buffer=0.5,
# max_cpu_models=1,
# use_multiprocessing=True,
# log_level="INFO",
# )
# # # Add models
# model1 = torch.nn.Sequential(
# torch.nn.Linear(10, 10),
# torch.nn.ReLU(),
# torch.nn.Linear(10, 2),
# )
# manager.add_model("small_model", model1, ModelType.PYTORCH)
# # Add more models if available
# if TRANSFORMERS_AVAILABLE:
# manager.add_model(
# "bert_model", "bert-base-uncased", ModelType.HUGGINGFACE
# )
# # Allocate and load models
# manager.load_all_models()
# # Print GPU status
# print("GPU Status:")
# for gpu in manager.get_gpu_status():
# print(
# f"GPU {gpu['id']}: {gpu['available_memory']:.2f} GB / {gpu['total_memory']:.2f} GB"
# )
# print(f" Models: {', '.join(gpu['models'])}")
# # Run a task on all models
# results = manager.run("forward", input_data=torch.randn(1, 10))
# # Unload all models
# manager.unload_all_models()

@ -1,10 +1,13 @@
import threading
import time
import uuid
from typing import Any, Callable, Dict, List, Optional
from swarms.utils.any_to_str import any_to_str
from swarms.utils.loguru_logger import initialize_logger
from swarms.structs.conversation import Conversation
from swarms.utils.history_output_formatter import (
output_type,
)
logger = initialize_logger(log_folder="swarm_arange")
@ -13,20 +16,6 @@ def swarm_id():
return uuid.uuid4().hex
class SwarmArrangeInput:
id: str = uuid.uuid4().hex
time_stamp: str = time.strftime("%Y-%m-%d %H:%M:%S")
name: str
description: str
swarms: List[Callable] = []
output_type: str
flow: str = ""
class SwarmArrangeOutput:
input_config: SwarmArrangeInput = None
class SwarmRearrange:
"""
A class representing a swarm of swarms for rearranging tasks.
@ -69,6 +58,7 @@ class SwarmRearrange:
Callable[[str], str]
] = None,
return_json: bool = False,
output_type: output_type = "dict-all-except-first",
*args,
**kwargs,
):
@ -96,7 +86,9 @@ class SwarmRearrange:
self.verbose = verbose
self.human_in_the_loop = human_in_the_loop
self.custom_human_in_the_loop = custom_human_in_the_loop
self.output_type = output_type
self.return_json = return_json
self.swarm_history = {swarm.name: [] for swarm in swarms}
self.lock = threading.Lock()
self.id = uuid.uuid4().hex if id is None else id
@ -104,6 +96,9 @@ class SwarmRearrange:
# Run the reliability checks
self.reliability_checks()
# Conversation
self.conversation = Conversation()
def reliability_checks(self):
logger.info("Running reliability checks.")
if not self.swarms:
@ -283,6 +278,10 @@ class SwarmRearrange:
current_task, img, *args, **kwargs
)
result = any_to_str(result)
self.conversation.add(
role=swarm.name, content=result
)
logger.info(
f"Swarm {swarm_name} returned result of type: {type(result)}"
)
@ -325,6 +324,10 @@ class SwarmRearrange:
current_task, img, *args, **kwargs
)
result = any_to_str(result)
self.conversation.add(
role=swarm.name, content=result
)
logger.info(
f"Swarm {swarm_name} returned result of type: {type(result)}"
)

Loading…
Cancel
Save