[EXAMPLES CLEANUP] [FIX][SwarmMatcher] [Remove old examples]

pull/1051/head
Kye Gomez 1 week ago
parent e8f161beea
commit adb6930439

@ -1,311 +0,0 @@
import torch
from torch import Tensor
from loguru import logger
from typing import Tuple
import matplotlib.pyplot as plt
try:
# ipywidgets is available in interactive environments like Jupyter.
from ipywidgets import interact, IntSlider
HAS_IPYWIDGETS = True
except ImportError:
HAS_IPYWIDGETS = False
logger.warning(
"ipywidgets not installed. Interactive slicing will be disabled."
)
class GaussianSplat4DStateSpace:
"""
4D Gaussian splatting with a state space model in PyTorch.
Each Gaussian is defined by an 8D state vector:
[x, y, z, w, vx, vy, vz, vw],
where the first four dimensions are the spatial coordinates and the last
four are the velocities. Only the spatial (first four) dimensions are used
for the 4D Gaussian splat, with a corresponding 4×4 covariance matrix.
Attributes:
num_gaussians (int): Number of Gaussians.
state_dim (int): Dimension of the state vector (should be 8).
states (Tensor): Current state for each Gaussian of shape (num_gaussians, state_dim).
covariances (Tensor): Covariance matrices for the spatial dimensions, shape (num_gaussians, 4, 4).
A (Tensor): State transition matrix of shape (state_dim, state_dim).
dt (float): Time step for state updates.
"""
def __init__(
self,
num_gaussians: int,
init_states: Tensor,
init_covariances: Tensor,
dt: float = 1.0,
) -> None:
"""
Initialize the 4D Gaussian splat model.
Args:
num_gaussians (int): Number of Gaussians.
init_states (Tensor): Initial states of shape (num_gaussians, 8).
Each state is assumed to be
[x, y, z, w, vx, vy, vz, vw].
init_covariances (Tensor): Initial covariance matrices for the spatial dimensions,
shape (num_gaussians, 4, 4).
dt (float): Time step for the state update.
"""
if init_states.shape[1] != 8:
raise ValueError(
"init_states should have shape (N, 8) where 8 = 4 position + 4 velocity."
)
if init_covariances.shape[1:] != (4, 4):
raise ValueError(
"init_covariances should have shape (N, 4, 4)."
)
self.num_gaussians = num_gaussians
self.states = init_states.clone() # shape: (N, 8)
self.covariances = (
init_covariances.clone()
) # shape: (N, 4, 4)
self.dt = dt
self.state_dim = init_states.shape[1]
# Create an 8x8 constant-velocity state transition matrix:
# New position = position + velocity*dt, velocity remains unchanged.
I4 = torch.eye(
4, dtype=init_states.dtype, device=init_states.device
)
zeros4 = torch.zeros(
(4, 4), dtype=init_states.dtype, device=init_states.device
)
top = torch.cat([I4, dt * I4], dim=1)
bottom = torch.cat([zeros4, I4], dim=1)
self.A = torch.cat([top, bottom], dim=0) # shape: (8, 8)
logger.info(
"Initialized 4D GaussianSplatStateSpace with {} Gaussians.",
num_gaussians,
)
def update_states(self) -> None:
"""
Update the state of each Gaussian using the constant-velocity state space model.
Applies:
state_next = A @ state_current.
"""
self.states = (
self.A @ self.states.t()
).t() # shape: (num_gaussians, 8)
logger.debug("States updated: {}", self.states)
def _compute_gaussian(
self, pos: Tensor, cov: Tensor, coords: Tensor
) -> Tensor:
"""
Compute the 4D Gaussian function over a grid of coordinates.
Args:
pos (Tensor): The center of the Gaussian (4,).
cov (Tensor): The 4×4 covariance matrix.
coords (Tensor): A grid of coordinates of shape (..., 4).
Returns:
Tensor: Evaluated Gaussian values on the grid with shape equal to coords.shape[:-1].
"""
try:
cov_inv = torch.linalg.inv(cov)
except RuntimeError as e:
logger.warning(
"Covariance inversion failed; using pseudo-inverse. Error: {}",
e,
)
cov_inv = torch.linalg.pinv(cov)
# Broadcast pos over the grid
diff = coords - pos.view(
*(1 for _ in range(coords.ndim - 1)), 4
)
mahal = torch.einsum("...i,ij,...j->...", diff, cov_inv, diff)
gaussian = torch.exp(-0.5 * mahal)
return gaussian
def render(
self,
canvas_size: Tuple[int, int, int, int],
sigma_scale: float = 1.0,
normalize: bool = False,
) -> Tensor:
"""
Render the current 4D Gaussian splats onto a 4D canvas.
Args:
canvas_size (Tuple[int, int, int, int]): The size of the canvas (d1, d2, d3, d4).
sigma_scale (float): Scaling factor for the covariance (affects spread).
normalize (bool): Whether to normalize the final canvas to [0, 1].
Returns:
Tensor: A 4D tensor (canvas) with the accumulated contributions from all Gaussians.
"""
d1, d2, d3, d4 = canvas_size
# Create coordinate grids for each dimension.
grid1 = torch.linspace(
0, d1 - 1, d1, device=self.states.device
)
grid2 = torch.linspace(
0, d2 - 1, d2, device=self.states.device
)
grid3 = torch.linspace(
0, d3 - 1, d3, device=self.states.device
)
grid4 = torch.linspace(
0, d4 - 1, d4, device=self.states.device
)
# Create a 4D meshgrid (using indexing "ij")
grid = torch.stack(
torch.meshgrid(grid1, grid2, grid3, grid4, indexing="ij"),
dim=-1,
) # shape: (d1, d2, d3, d4, 4)
# Initialize the canvas.
canvas = torch.zeros(
(d1, d2, d3, d4),
dtype=self.states.dtype,
device=self.states.device,
)
for i in range(self.num_gaussians):
pos = self.states[i, :4] # spatial center (4,)
cov = (
self.covariances[i] * sigma_scale
) # scaled covariance
gaussian = self._compute_gaussian(pos, cov, grid)
canvas += gaussian
logger.debug(
"Rendered Gaussian {} at position {}", i, pos.tolist()
)
if normalize:
max_val = canvas.max()
if max_val > 0:
canvas = canvas / max_val
logger.debug("Canvas normalized.")
logger.info("4D Rendering complete.")
return canvas
def interactive_slice(canvas: Tensor) -> None:
"""
Display an interactive 2D slice of the 4D canvas using ipywidgets.
This function fixes two of the four dimensions (d3 and d4) via sliders and
displays the resulting 2D slice (over dimensions d1 and d2).
Args:
canvas (Tensor): A 4D tensor with shape (d1, d2, d3, d4).
"""
d1, d2, d3, d4 = canvas.shape
def display_slice(slice_d3: int, slice_d4: int):
slice_2d = canvas[:, :, slice_d3, slice_d4].cpu().numpy()
plt.figure(figsize=(6, 6))
plt.imshow(slice_2d, cmap="hot", origin="lower")
plt.title(f"2D Slice at d3={slice_d3}, d4={slice_d4}")
plt.colorbar()
plt.show()
interact(
display_slice,
slice_d3=IntSlider(min=0, max=d3 - 1, step=1, value=d3 // 2),
slice_d4=IntSlider(min=0, max=d4 - 1, step=1, value=d4 // 2),
)
def mip_projection(canvas: Tensor) -> None:
"""
Render a 2D view of the 4D canvas using maximum intensity projection (MIP)
along the 3rd and 4th dimensions.
Args:
canvas (Tensor): A 4D tensor with shape (d1, d2, d3, d4).
"""
# MIP along dimension 3
mip_3d = canvas.max(dim=2)[0] # shape: (d1, d2, d4)
# MIP along dimension 4
mip_2d = mip_3d.max(dim=2)[0] # shape: (d1, d2)
plt.figure(figsize=(6, 6))
plt.imshow(mip_2d.cpu().numpy(), cmap="hot", origin="lower")
plt.title("2D MIP (Projecting dimensions d3 and d4)")
plt.colorbar()
plt.show()
def main() -> None:
"""
Main function that:
- Creates a 4D Gaussian splat model.
- Updates the states to simulate motion.
- Renders a 4D canvas.
- Visualizes the 4D volume via interactive slicing (if available) or MIP.
"""
torch.manual_seed(42)
num_gaussians = 2
# Define initial states for each Gaussian:
# Each state is [x, y, z, w, vx, vy, vz, vw].
init_states = torch.tensor(
[
[10.0, 15.0, 20.0, 25.0, 0.5, -0.2, 0.3, 0.1],
[30.0, 35.0, 40.0, 45.0, -0.3, 0.4, -0.1, 0.2],
],
dtype=torch.float32,
)
# Define initial 4x4 covariance matrices for the spatial dimensions.
init_covariances = torch.stack(
[
torch.diag(
torch.tensor(
[5.0, 5.0, 5.0, 5.0], dtype=torch.float32
)
),
torch.diag(
torch.tensor(
[3.0, 3.0, 3.0, 3.0], dtype=torch.float32
)
),
]
)
# Create the 4D Gaussian splat model.
model = GaussianSplat4DStateSpace(
num_gaussians, init_states, init_covariances, dt=1.0
)
# Update states to simulate one time step.
model.update_states()
# Render the 4D canvas.
canvas_size = (20, 20, 20, 20)
canvas = model.render(
canvas_size, sigma_scale=1.0, normalize=True
)
# Visualize the 4D data.
if HAS_IPYWIDGETS:
logger.info("Launching interactive slicing tool for 4D data.")
interactive_slice(canvas)
else:
logger.info(
"ipywidgets not available; using maximum intensity projection instead."
)
mip_projection(canvas)
if __name__ == "__main__":
main()

@ -1,46 +0,0 @@
from swarms.utils.vllm_wrapper import VLLMWrapper
def main():
# Initialize the vLLM wrapper with a model
# Note: You'll need to have the model downloaded or specify a HuggingFace model ID
llm = VLLMWrapper(
model_name="meta-llama/Llama-2-7b-chat-hf", # Replace with your model path or HF model ID
temperature=0.7,
max_tokens=1000,
)
# Example task
task = "What are the benefits of using vLLM for inference?"
# Run inference
response = llm.run(task)
print("Response:", response)
# Example with system prompt
llm_with_system = VLLMWrapper(
model_name="meta-llama/Llama-2-7b-chat-hf", # Replace with your model path or HF model ID
system_prompt="You are a helpful AI assistant that provides concise answers.",
temperature=0.7,
)
# Run inference with system prompt
response = llm_with_system.run(task)
print("\nResponse with system prompt:", response)
# Example with batched inference
tasks = [
"What is vLLM?",
"How does vLLM improve inference speed?",
"What are the main features of vLLM?",
]
responses = llm.batched_run(tasks, batch_size=2)
print("\nBatched responses:")
for task, response in zip(tasks, responses):
print(f"\nTask: {task}")
print(f"Response: {response}")
if __name__ == "__main__":
main()

@ -1,148 +0,0 @@
import concurrent.futures
import os
from typing import Any
from loguru import logger
try:
from vllm import LLM, SamplingParams
except ImportError:
import subprocess
import sys
print("Installing vllm")
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-U", "vllm"]
)
print("vllm installed")
from vllm import LLM, SamplingParams
class VLLMWrapper:
"""
A wrapper class for vLLM that provides a similar interface to LiteLLM.
This class handles model initialization and inference using vLLM.
"""
def __init__(
self,
model_name: str = "meta-llama/Llama-2-7b-chat-hf",
system_prompt: str | None = None,
stream: bool = False,
temperature: float = 0.5,
max_tokens: int = 4000,
max_completion_tokens: int = 4000,
tools_list_dictionary: list[dict[str, Any]] | None = None,
tool_choice: str = "auto",
parallel_tool_calls: bool = False,
*args,
**kwargs,
):
"""
Initialize the vLLM wrapper with the given parameters.
Args:
model_name (str): The name of the model to use. Defaults to "meta-llama/Llama-2-7b-chat-hf".
system_prompt (str, optional): The system prompt to use. Defaults to None.
stream (bool): Whether to stream the output. Defaults to False.
temperature (float): The temperature for sampling. Defaults to 0.5.
max_tokens (int): The maximum number of tokens to generate. Defaults to 4000.
max_completion_tokens (int): The maximum number of completion tokens. Defaults to 4000.
tools_list_dictionary (List[Dict[str, Any]], optional): List of available tools. Defaults to None.
tool_choice (str): How to choose tools. Defaults to "auto".
parallel_tool_calls (bool): Whether to allow parallel tool calls. Defaults to False.
"""
self.model_name = model_name
self.system_prompt = system_prompt
self.stream = stream
self.temperature = temperature
self.max_tokens = max_tokens
self.max_completion_tokens = max_completion_tokens
self.tools_list_dictionary = tools_list_dictionary
self.tool_choice = tool_choice
self.parallel_tool_calls = parallel_tool_calls
# Initialize vLLM
self.llm = LLM(model=model_name, **kwargs)
self.sampling_params = SamplingParams(
temperature=temperature,
max_tokens=max_tokens,
)
def _prepare_prompt(self, task: str) -> str:
"""
Prepare the prompt for the given task.
Args:
task (str): The task to prepare the prompt for.
Returns:
str: The prepared prompt.
"""
if self.system_prompt:
return f"{self.system_prompt}\n\nUser: {task}\nAssistant:"
return f"User: {task}\nAssistant:"
def run(self, task: str, *args, **kwargs) -> str:
"""
Run the model for the given task.
Args:
task (str): The task to run the model for.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
str: The model's response.
"""
try:
prompt = self._prepare_prompt(task)
outputs = self.llm.generate(prompt, self.sampling_params)
response = outputs[0].outputs[0].text.strip()
return response
except Exception as error:
logger.error(f"Error in VLLMWrapper: {error}")
raise error
def __call__(self, task: str, *args, **kwargs) -> str:
"""
Call the model for the given task.
Args:
task (str): The task to run the model for.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
str: The model's response.
"""
return self.run(task, *args, **kwargs)
def batched_run(
self, tasks: list[str], batch_size: int = 10
) -> list[str]:
"""
Run the model for multiple tasks in batches.
Args:
tasks (List[str]): List of tasks to run.
batch_size (int): Size of each batch. Defaults to 10.
Returns:
List[str]: List of model responses.
"""
# Calculate the worker count based on 95% of available CPU cores
num_workers = max(1, int((os.cpu_count() or 1) * 0.95))
with concurrent.futures.ThreadPoolExecutor(
max_workers=num_workers
) as executor:
futures = [
executor.submit(self.run, task) for task in tasks
]
return [
future.result()
for future in concurrent.futures.as_completed(futures)
]

@ -7,15 +7,6 @@ of the Talk Structurally, Act Hierarchically framework.
All components are now in one file: hierarchical_structured_communication_framework.py
"""
import os
import sys
# Add the project root to the Python path
project_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..")
)
sys.path.insert(0, project_root)
from dotenv import load_dotenv
# Import everything from the single file

@ -11,9 +11,6 @@ import sys
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union, Any
import warnings
warnings.filterwarnings("ignore")
import cv2
import numpy as np
import torch
@ -22,6 +19,10 @@ from PIL import Image
import open3d as o3d
from loguru import logger
warnings.filterwarnings("ignore")
# Third-party model imports
try:
import timm

@ -1,5 +1,6 @@
from swarms.structs.agent import Agent
from swarms.structs.agent_builder import AgentsBuilder
from swarms.structs.agent_loader import AgentLoader
from swarms.structs.agent_rearrange import AgentRearrange, rearrange
from swarms.structs.auto_swarm_builder import AutoSwarmBuilder
from swarms.structs.base_structure import BaseStructure
@ -103,7 +104,6 @@ from swarms.structs.swarming_architectures import (
staircase_swarm,
star_swarm,
)
from swarms.structs.agent_loader import AgentLoader
__all__ = [
"Agent",

@ -1026,16 +1026,16 @@ class Agent:
self.short_memory.add(
role="system",
content=(
f"🔍 [RAG Query Initiated]\n"
f"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n"
f"📝 Query:\n{query}\n\n"
f"📚 Retrieved Knowledge (RAG Output):\n{output}\n"
f"━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n"
f"💡 The above information was retrieved from the agent's long-term memory using Retrieval-Augmented Generation (RAG). "
f"Use this context to inform your next response or reasoning step."
"[RAG Query Initiated]\n"
"----------------------------------\n"
f"Query:\n{query}\n\n"
f"Retrieved Knowledge (RAG Output):\n{output}\n"
"----------------------------------\n"
"The above information was retrieved from the agent's long-term memory using Retrieval-Augmented Generation (RAG). "
"Use this context to inform your next response or reasoning step."
),
)
except Exception as e:
except AgentMemoryError as e:
logger.error(
f"Agent: {self.agent_name} Error handling RAG query: {e} Traceback: {traceback.format_exc()}"
)

File diff suppressed because it is too large Load Diff

@ -5,7 +5,7 @@ import sys
import traceback
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple
from typing import Any, Dict, List, Optional
import psutil
import requests
@ -95,22 +95,8 @@ class SwarmsIssueReporter:
except:
return "Unknown"
def _get_gpu_info(self) -> Tuple[bool, Optional[str]]:
"""Get GPU information and CUDA availability."""
try:
import torch
cuda_available = torch.cuda.is_available()
if cuda_available:
gpu_info = torch.cuda.get_device_name(0)
return cuda_available, gpu_info
return False, None
except:
return False, None
def _get_system_info(self) -> SwarmSystemInfo:
"""Collect system and Swarms-specific information."""
cuda_available, gpu_info = self._get_gpu_info()
return SwarmSystemInfo(
os_name=platform.system(),
@ -120,8 +106,6 @@ class SwarmsIssueReporter:
memory_usage=psutil.virtual_memory().percent,
disk_usage=psutil.disk_usage("/").percent,
swarms_version=self._get_swarms_version(),
cuda_available=cuda_available,
gpu_info=gpu_info,
)
def _categorize_error(

Loading…
Cancel
Save