[NEW][Examples] [simulations.senatorassembly -> to swarms.sim]

pull/1018/head
Kye Gomez 3 weeks ago
parent ee458a0af5
commit 63a998bb1f

1
.gitignore vendored

@ -18,6 +18,7 @@ next_swarms_update.txt
runs runs
Financial-Analysis-Agent_state.json Financial-Analysis-Agent_state.json
conversations/ conversations/
models/
evolved_gpt2_models/ evolved_gpt2_models/
experimental experimental
ffn_alternatives ffn_alternatives

@ -0,0 +1,36 @@
import os
from swarms_client import SwarmsClient
from dotenv import load_dotenv
import json
load_dotenv()
client = SwarmsClient(
api_key=os.getenv("SWARMS_API_KEY"),
)
result = client.agent.run(
agent_config={
"agent_name": "Bloodwork Diagnosis Expert",
"description": "An expert doctor specializing in interpreting and diagnosing blood work results.",
"system_prompt": (
"You are an expert medical doctor specializing in the interpretation and diagnosis of blood work. "
"Your expertise includes analyzing laboratory results, identifying abnormal values, "
"explaining their clinical significance, and recommending next diagnostic or treatment steps. "
"Provide clear, evidence-based explanations and consider differential diagnoses based on blood test findings."
),
"model_name": "groq/moonshotai/kimi-k2-instruct",
"max_loops": 1,
"max_tokens": 1000,
"temperature": 0.5,
},
task=(
"A patient presents with the following blood work results: "
"Hemoglobin: 10.2 g/dL (low), WBC: 13,000 /µL (high), Platelets: 180,000 /µL (normal), "
"ALT: 65 U/L (high), AST: 70 U/L (high). "
"Please provide a detailed interpretation, possible diagnoses, and recommended next steps."
),
)
print(json.dumps(result, indent=4))

@ -0,0 +1,50 @@
import os
from swarms_client import SwarmsClient
from dotenv import load_dotenv
import json
load_dotenv()
client = SwarmsClient(
api_key=os.getenv("SWARMS_API_KEY"),
)
batch_requests = [
{
"agent_config": {
"agent_name": "Bloodwork Diagnosis Expert",
"description": "Expert in blood work interpretation.",
"system_prompt": (
"You are a doctor who interprets blood work. Give concise, clear explanations and possible diagnoses."
),
"model_name": "claude-sonnet-4-20250514",
"max_loops": 1,
"max_tokens": 1000,
"temperature": 0.5,
},
"task": (
"Blood work: Hemoglobin 10.2 (low), WBC 13,000 (high), Platelets 180,000 (normal), "
"ALT 65 (high), AST 70 (high). Interpret and suggest diagnoses."
),
},
{
"agent_config": {
"agent_name": "Radiology Report Summarizer",
"description": "Expert in summarizing radiology reports.",
"system_prompt": (
"You are a radiologist. Summarize the findings of radiology reports in clear, patient-friendly language."
),
"model_name": "claude-sonnet-4-20250514",
"max_loops": 1,
"max_tokens": 1000,
"temperature": 0.5,
},
"task": (
"Radiology report: Chest X-ray shows mild cardiomegaly, no infiltrates, no effusion. Summarize the findings."
),
},
]
result = client.agent.batch.run(body=batch_requests)
print(json.dumps(result, indent=4))

@ -0,0 +1,105 @@
import json
import os
from swarms_client import SwarmsClient
from dotenv import load_dotenv
load_dotenv()
client = SwarmsClient(
api_key=os.getenv("SWARMS_API_KEY"),
)
def create_medical_unit_swarm(client, patient_info):
"""
Creates and runs a simulated medical unit swarm with a doctor (leader), nurses, and a medical assistant.
Args:
client (SwarmsClient): The SwarmsClient instance.
patient_info (str): The patient symptoms and information.
Returns:
dict: The output from the swarm run.
"""
return client.swarms.run(
name="Hospital Medical Unit",
description="A simulated hospital unit with a doctor (leader), nurses, and a medical assistant collaborating on patient care.",
swarm_type="HiearchicalSwarm",
task=patient_info,
agents=[
{
"agent_name": "Dr. Smith - Attending Physician",
"description": "The lead doctor responsible for diagnosis, treatment planning, and team coordination.",
"system_prompt": (
"You are Dr. Smith, the attending physician and leader of the medical unit. "
"You review all information, make final decisions, and coordinate the team. "
"Provide a diagnosis, recommend next steps, and delegate tasks to the nurses and assistant."
),
"model_name": "gpt-4.1",
"role": "leader",
"max_loops": 1,
"max_tokens": 8192,
"temperature": 0.5,
},
{
"agent_name": "Nurse Alice",
"description": "A registered nurse responsible for patient assessment, vital signs, and reporting findings to the doctor.",
"system_prompt": (
"You are Nurse Alice, a registered nurse. "
"Assess the patient's symptoms, record vital signs, and report your findings to Dr. Smith. "
"Suggest any immediate nursing interventions if needed."
),
"model_name": "gpt-4.1",
"role": "worker",
"max_loops": 1,
"max_tokens": 4096,
"temperature": 0.5,
},
{
"agent_name": "Nurse Bob",
"description": "A registered nurse assisting with patient care, medication administration, and monitoring.",
"system_prompt": (
"You are Nurse Bob, a registered nurse. "
"Assist with patient care, administer medications as ordered, and monitor the patient's response. "
"Communicate any changes to Dr. Smith."
),
"model_name": "gpt-4.1",
"role": "worker",
"max_loops": 1,
"max_tokens": 4096,
"temperature": 0.5,
},
{
"agent_name": "Medical Assistant Jane",
"description": "A medical assistant supporting the team with administrative tasks and basic patient care.",
"system_prompt": (
"You are Medical Assistant Jane. "
"Support the team by preparing the patient, collecting samples, and handling administrative tasks. "
"Report any relevant observations to the nurses or Dr. Smith."
),
"model_name": "claude-sonnet-4-20250514",
"role": "worker",
"max_loops": 1,
"max_tokens": 2048,
"temperature": 0.5,
},
],
)
if __name__ == "__main__":
patient_symptoms = """
Patient: 45-year-old female
Chief Complaint: Chest pain and shortness of breath for 2 days
Symptoms:
- Sharp chest pain that worsens with deep breathing
- Shortness of breath, especially when lying down
- Mild fever (100.2°F)
- Dry cough
- Fatigue
"""
out = create_medical_unit_swarm(client, patient_symptoms)
print(json.dumps(out, indent=4))

@ -0,0 +1,63 @@
import json
import os
from swarms_client import SwarmsClient
from dotenv import load_dotenv
load_dotenv()
client = SwarmsClient(
api_key=os.getenv("SWARMS_API_KEY"),
)
patient_symptoms = """
Patient: 45-year-old female
Chief Complaint: Chest pain and shortness of breath for 2 days
Symptoms:
- Sharp chest pain that worsens with deep breathing
- Shortness of breath, especially when lying down
- Mild fever (100.2°F)
- Dry cough
- Fatigue
"""
out = client.swarms.run(
name="ICD Analysis Swarm",
description="A swarm that analyzes ICD codes",
swarm_type="ConcurrentWorkflow",
task=patient_symptoms,
agents=[
{
"agent_name": "ICD-Analyzer",
"description": "An agent that analyzes ICD codes",
"system_prompt": "You are an expert ICD code analyzer. Your task is to analyze the ICD codes and provide a detailed explanation of the codes.",
"model_name": "groq/openai/gpt-oss-120b",
"role": "worker",
"max_loops": 1,
"max_tokens": 8192,
"temperature": 0.5,
},
{
"agent_name": "ICD-Code-Explainer-Primary",
"description": "An agent that provides primary explanations for ICD codes",
"system_prompt": "You are an expert ICD code explainer. Your task is to provide a clear and thorough explanation of the ICD codes to the user, focusing on primary meanings and clinical context.",
"model_name": "groq/openai/gpt-oss-120b",
"role": "worker",
"max_loops": 1,
"max_tokens": 8192,
"temperature": 0.5,
},
{
"agent_name": "ICD-Code-Explainer-Secondary",
"description": "An agent that provides additional context and secondary explanations for ICD codes",
"system_prompt": "You are an expert ICD code explainer. Your task is to provide additional context, nuances, and secondary explanations for the ICD codes, including possible differential diagnoses and related codes.",
"model_name": "groq/openai/gpt-oss-120b",
"role": "worker",
"max_loops": 1,
"max_tokens": 8192,
"temperature": 0.5,
},
],
)
print(json.dumps(out, indent=4))

@ -0,0 +1,19 @@
from swarms.sims.senator_assembly import SenatorAssembly
def main():
"""
Simulate a Senate vote on a bill to invade Cuba and claim it as the 51st state.
This function initializes the SenatorAssembly and runs a concurrent vote simulation
on the specified bill.
"""
senator_simulation = SenatorAssembly()
# senator_simulation.simulate_vote_concurrent(
# "A bill proposing to deregulate the IPO (Initial Public Offering) market in the United States as extensively as possible. The bill seeks to remove or significantly reduce existing regulatory requirements and oversight for companies seeking to go public, with the aim of increasing market efficiency and access to capital. Senators must consider the potential economic, legal, and ethical consequences of such broad deregulation, and cast their votes accordingly.",
# batch_size=10,
# )
if __name__ == "__main__":
main()

@ -0,0 +1,662 @@
"""
Production-grade AI Vision Pipeline for depth estimation, segmentation, object detection,
and 3D point cloud generation.
This module provides a comprehensive pipeline that combines MiDaS for depth estimation,
SAM (Segment Anything Model) for semantic segmentation, YOLOv8 for object detection,
and Open3D for 3D point cloud generation.
"""
import sys
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union, Any
import warnings
warnings.filterwarnings("ignore")
import cv2
import numpy as np
import torch
import torchvision.transforms as transforms
from PIL import Image
import open3d as o3d
from loguru import logger
# Third-party model imports
try:
import timm
from segment_anything import (
SamAutomaticMaskGenerator,
sam_model_registry,
)
from ultralytics import YOLO
except ImportError as e:
logger.error(f"Missing required dependencies: {e}")
sys.exit(1)
class AIVisionPipeline:
"""
A comprehensive AI vision pipeline that performs depth estimation, semantic segmentation,
object detection, and 3D point cloud generation from input images.
This class integrates multiple state-of-the-art models:
- MiDaS for monocular depth estimation
- SAM (Segment Anything Model) for semantic segmentation
- YOLOv8 for object detection
- Open3D for 3D point cloud generation
Attributes:
model_dir (Path): Directory where models are stored
device (torch.device): Computing device (CPU/CUDA)
midas_model: Loaded MiDaS depth estimation model
midas_transform: MiDaS preprocessing transforms
sam_generator: SAM automatic mask generator
yolo_model: YOLOv8 object detection model
Example:
>>> pipeline = AIVisionPipeline()
>>> results = pipeline.process_image("path/to/image.jpg")
>>> point_cloud = results["point_cloud"]
"""
def __init__(
self,
model_dir: str = "./models",
device: Optional[str] = None,
midas_model_type: str = "MiDaS",
sam_model_type: str = "vit_b",
yolo_model_path: str = "yolov8n.pt",
log_level: str = "INFO",
) -> None:
"""
Initialize the AI Vision Pipeline.
Args:
model_dir: Directory to store downloaded models
device: Computing device ('cpu', 'cuda', or None for auto-detection)
midas_model_type: MiDaS model variant ('MiDaS', 'MiDaS_small', 'DPT_Large', etc.)
sam_model_type: SAM model type ('vit_b', 'vit_l', 'vit_h')
yolo_model_path: Path to YOLOv8 model weights
log_level: Logging level ('DEBUG', 'INFO', 'WARNING', 'ERROR')
Raises:
RuntimeError: If required models cannot be loaded
FileNotFoundError: If model files are not found
"""
# Setup logging
logger.remove()
logger.add(
sys.stdout,
level=log_level,
format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>",
)
# Initialize attributes
self.model_dir = Path(model_dir)
self.model_dir.mkdir(parents=True, exist_ok=True)
# Device setup
if device is None:
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
else:
self.device = torch.device(device)
logger.info(f"Using device: {self.device}")
# Model configuration
self.midas_model_type = midas_model_type
self.sam_model_type = sam_model_type
self.yolo_model_path = yolo_model_path
# Initialize model placeholders
self.midas_model: Optional[torch.nn.Module] = None
self.midas_transform: Optional[transforms.Compose] = None
self.sam_generator: Optional[SamAutomaticMaskGenerator] = None
self.yolo_model: Optional[YOLO] = None
# Load all models
self._setup_models()
logger.success("AI Vision Pipeline initialized successfully")
def _setup_models(self) -> None:
"""
Load and initialize all AI models with proper error handling.
Raises:
RuntimeError: If any model fails to load
"""
try:
self._load_midas_model()
self._load_sam_model()
self._load_yolo_model()
except Exception as e:
logger.error(f"Failed to setup models: {e}")
raise RuntimeError(f"Model initialization failed: {e}")
def _load_midas_model(self) -> None:
"""Load MiDaS depth estimation model."""
try:
logger.info(
f"Loading MiDaS model: {self.midas_model_type}"
)
# Load MiDaS model from torch hub
self.midas_model = torch.hub.load(
"intel-isl/MiDaS",
self.midas_model_type,
pretrained=True,
)
self.midas_model.to(self.device)
self.midas_model.eval()
# Load corresponding transforms
midas_transforms = torch.hub.load(
"intel-isl/MiDaS", "transforms"
)
if self.midas_model_type in ["DPT_Large", "DPT_Hybrid"]:
self.midas_transform = midas_transforms.dpt_transform
else:
self.midas_transform = (
midas_transforms.default_transform
)
logger.success("MiDaS model loaded successfully")
except Exception as e:
logger.error(f"Failed to load MiDaS model: {e}")
raise
def _load_sam_model(self) -> None:
"""Load SAM (Segment Anything Model) for semantic segmentation."""
try:
logger.info(f"Loading SAM model: {self.sam_model_type}")
# SAM model checkpoints mapping
sam_checkpoint_urls = {
"vit_b": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth",
"vit_l": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth",
"vit_h": "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth",
}
checkpoint_path = (
self.model_dir / f"sam_{self.sam_model_type}.pth"
)
# Download checkpoint if not exists
if not checkpoint_path.exists():
logger.info(
f"Downloading SAM checkpoint to {checkpoint_path}"
)
import urllib.request
urllib.request.urlretrieve(
sam_checkpoint_urls[self.sam_model_type],
checkpoint_path,
)
# Load SAM model
sam = sam_model_registry[self.sam_model_type](
checkpoint=str(checkpoint_path)
)
sam.to(self.device)
# Create automatic mask generator
self.sam_generator = SamAutomaticMaskGenerator(
model=sam,
points_per_side=32,
pred_iou_thresh=0.86,
stability_score_thresh=0.92,
crop_n_layers=1,
crop_n_points_downscale_factor=2,
min_mask_region_area=100,
)
logger.success("SAM model loaded successfully")
except Exception as e:
logger.error(f"Failed to load SAM model: {e}")
raise
def _load_yolo_model(self) -> None:
"""Load YOLOv8 object detection model."""
try:
logger.info(
f"Loading YOLOv8 model: {self.yolo_model_path}"
)
self.yolo_model = YOLO(self.yolo_model_path)
# Move to appropriate device
if self.device.type == "cuda":
self.yolo_model.to(self.device)
logger.success("YOLOv8 model loaded successfully")
except Exception as e:
logger.error(f"Failed to load YOLOv8 model: {e}")
raise
def _load_and_preprocess_image(
self, image_path: Union[str, Path]
) -> Tuple[np.ndarray, Image.Image]:
"""
Load and preprocess input image.
Args:
image_path: Path to the input image (JPG or PNG)
Returns:
Tuple of (opencv_image, pil_image)
Raises:
FileNotFoundError: If image file doesn't exist
ValueError: If image format is not supported
"""
image_path = Path(image_path)
if not image_path.exists():
raise FileNotFoundError(f"Image not found: {image_path}")
if image_path.suffix.lower() not in [".jpg", ".jpeg", ".png"]:
raise ValueError(
f"Unsupported image format: {image_path.suffix}"
)
try:
# Load with OpenCV (BGR format)
cv_image = cv2.imread(str(image_path))
if cv_image is None:
raise ValueError(
f"Could not load image: {image_path}"
)
# Convert BGR to RGB for PIL
rgb_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
pil_image = Image.fromarray(rgb_image)
logger.debug(
f"Loaded image: {image_path} ({rgb_image.shape})"
)
return rgb_image, pil_image
except Exception as e:
logger.error(f"Failed to load image {image_path}: {e}")
raise
def estimate_depth(self, image: np.ndarray) -> np.ndarray:
"""
Generate depth map using MiDaS model.
Args:
image: Input image as numpy array (H, W, 3) in RGB format
Returns:
Depth map as numpy array (H, W)
Raises:
RuntimeError: If depth estimation fails
"""
try:
logger.debug("Estimating depth with MiDaS")
# Preprocess image for MiDaS
input_tensor = self.midas_transform(image).to(self.device)
# Perform inference
with torch.no_grad():
depth_map = self.midas_model(input_tensor)
depth_map = torch.nn.functional.interpolate(
depth_map.unsqueeze(1),
size=image.shape[:2],
mode="bicubic",
align_corners=False,
).squeeze()
# Convert to numpy
depth_numpy = depth_map.cpu().numpy()
# Normalize depth values
depth_numpy = (depth_numpy - depth_numpy.min()) / (
depth_numpy.max() - depth_numpy.min()
)
logger.debug(
f"Depth estimation completed. Shape: {depth_numpy.shape}"
)
return depth_numpy
except Exception as e:
logger.error(f"Depth estimation failed: {e}")
raise RuntimeError(f"Depth estimation error: {e}")
def segment_image(
self, image: np.ndarray
) -> List[Dict[str, Any]]:
"""
Perform semantic segmentation using SAM.
Args:
image: Input image as numpy array (H, W, 3) in RGB format
Returns:
List of segmentation masks with metadata
Raises:
RuntimeError: If segmentation fails
"""
try:
logger.debug("Performing segmentation with SAM")
# Generate masks
masks = self.sam_generator.generate(image)
logger.debug(f"Generated {len(masks)} segmentation masks")
return masks
except Exception as e:
logger.error(f"Segmentation failed: {e}")
raise RuntimeError(f"Segmentation error: {e}")
def detect_objects(
self, image: np.ndarray
) -> List[Dict[str, Any]]:
"""
Perform object detection using YOLOv8.
Args:
image: Input image as numpy array (H, W, 3) in RGB format
Returns:
List of detected objects with bounding boxes and confidence scores
Raises:
RuntimeError: If object detection fails
"""
try:
logger.debug("Performing object detection with YOLOv8")
# Run inference
results = self.yolo_model(image, verbose=False)
# Extract detections
detections = []
for result in results:
boxes = result.boxes
if boxes is not None:
for i in range(len(boxes)):
detection = {
"bbox": boxes.xyxy[i]
.cpu()
.numpy(), # [x1, y1, x2, y2]
"confidence": float(
boxes.conf[i].cpu().numpy()
),
"class_id": int(
boxes.cls[i].cpu().numpy()
),
"class_name": result.names[
int(boxes.cls[i].cpu().numpy())
],
}
detections.append(detection)
logger.debug(f"Detected {len(detections)} objects")
return detections
except Exception as e:
logger.error(f"Object detection failed: {e}")
raise RuntimeError(f"Object detection error: {e}")
def generate_point_cloud(
self,
image: np.ndarray,
depth_map: np.ndarray,
masks: Optional[List[Dict[str, Any]]] = None,
) -> o3d.geometry.PointCloud:
"""
Generate 3D point cloud from image and depth data.
Args:
image: RGB image array (H, W, 3)
depth_map: Depth map array (H, W)
masks: Optional segmentation masks for point cloud filtering
Returns:
Open3D PointCloud object
Raises:
ValueError: If input dimensions don't match
RuntimeError: If point cloud generation fails
"""
try:
logger.debug("Generating 3D point cloud")
if image.shape[:2] != depth_map.shape:
raise ValueError(
"Image and depth map dimensions must match"
)
height, width = depth_map.shape
# Create intrinsic camera parameters (assuming standard camera)
fx = fy = width # Focal length approximation
cx, cy = (
width / 2,
height / 2,
) # Principal point at image center
# Create coordinate grids
u, v = np.meshgrid(np.arange(width), np.arange(height))
# Convert depth to actual distances (inverse depth)
# MiDaS outputs inverse depth, so we invert it
z = 1.0 / (
depth_map + 1e-6
) # Add small epsilon to avoid division by zero
# Back-project to 3D coordinates
x = (u - cx) * z / fx
y = (v - cy) * z / fy
# Create point cloud
points = np.stack(
[x.flatten(), y.flatten(), z.flatten()], axis=1
)
colors = (
image.reshape(-1, 3) / 255.0
) # Normalize colors to [0, 1]
# Filter out invalid points
valid_mask = np.isfinite(points).all(axis=1) & (
z.flatten() > 0
)
points = points[valid_mask]
colors = colors[valid_mask]
# Create Open3D point cloud
point_cloud = o3d.geometry.PointCloud()
point_cloud.points = o3d.utility.Vector3dVector(points)
point_cloud.colors = o3d.utility.Vector3dVector(colors)
# Optional: Filter by segmentation masks
if masks and len(masks) > 0:
# Use the largest mask for filtering
largest_mask = max(masks, key=lambda x: x["area"])
mask_2d = largest_mask["segmentation"]
mask_1d = mask_2d.flatten()[valid_mask]
filtered_points = points[mask_1d]
filtered_colors = colors[mask_1d]
point_cloud.points = o3d.utility.Vector3dVector(
filtered_points
)
point_cloud.colors = o3d.utility.Vector3dVector(
filtered_colors
)
# Remove statistical outliers
point_cloud, _ = point_cloud.remove_statistical_outlier(
nb_neighbors=20, std_ratio=2.0
)
logger.debug(
f"Generated point cloud with {len(point_cloud.points)} points"
)
return point_cloud
except Exception as e:
logger.error(f"Point cloud generation failed: {e}")
raise RuntimeError(f"Point cloud generation error: {e}")
def process_image(
self, image_path: Union[str, Path]
) -> Dict[str, Any]:
"""
Process a single image through the complete AI vision pipeline.
Args:
image_path: Path to input image (JPG or PNG)
Returns:
Dictionary containing all processing results:
- 'image': Original RGB image
- 'depth_map': Depth estimation result
- 'segmentation_masks': SAM segmentation results
- 'detections': YOLO object detection results
- 'point_cloud': Open3D point cloud object
Raises:
FileNotFoundError: If image file doesn't exist
RuntimeError: If any processing step fails
"""
try:
logger.info(f"Processing image: {image_path}")
# Load and preprocess image
rgb_image, pil_image = self._load_and_preprocess_image(
image_path
)
# Depth estimation
depth_map = self.estimate_depth(rgb_image)
# Semantic segmentation
segmentation_masks = self.segment_image(rgb_image)
# Object detection
detections = self.detect_objects(rgb_image)
# 3D point cloud generation
point_cloud = self.generate_point_cloud(
rgb_image, depth_map, segmentation_masks
)
# Compile results
results = {
"image": rgb_image,
"depth_map": depth_map,
"segmentation_masks": segmentation_masks,
"detections": detections,
"point_cloud": point_cloud,
"metadata": {
"image_shape": rgb_image.shape,
"num_segments": len(segmentation_masks),
"num_detections": len(detections),
"num_points": len(point_cloud.points),
},
}
logger.success("Image processing completed successfully")
logger.info(f"Results: {results['metadata']}")
return results
except Exception as e:
logger.error(f"Image processing failed: {e}")
raise
def save_point_cloud(
self,
point_cloud: o3d.geometry.PointCloud,
output_path: Union[str, Path],
) -> None:
"""
Save point cloud to file.
Args:
point_cloud: Open3D PointCloud object
output_path: Output file path (.ply, .pcd, .xyz)
Raises:
RuntimeError: If saving fails
"""
try:
output_path = Path(output_path)
output_path.parent.mkdir(parents=True, exist_ok=True)
success = o3d.io.write_point_cloud(
str(output_path), point_cloud
)
if not success:
raise RuntimeError("Failed to write point cloud file")
logger.success(f"Point cloud saved to: {output_path}")
except Exception as e:
logger.error(f"Failed to save point cloud: {e}")
raise RuntimeError(f"Point cloud save error: {e}")
def visualize_point_cloud(
self, point_cloud: o3d.geometry.PointCloud
) -> None:
"""
Visualize point cloud using Open3D viewer.
Args:
point_cloud: Open3D PointCloud object to visualize
"""
try:
logger.info("Opening point cloud visualization")
o3d.visualization.draw_geometries([point_cloud])
except Exception as e:
logger.warning(f"Visualization failed: {e}")
# Example usage and testing
if __name__ == "__main__":
# Example usage
try:
# Initialize pipeline
pipeline = AIVisionPipeline(
model_dir="./models", log_level="INFO"
)
# Process an image (replace with actual image path)
image_path = "map_two.png" # Replace with your image path
if Path(image_path).exists():
results = pipeline.process_image(image_path)
# Save point cloud
pipeline.save_point_cloud(
results["point_cloud"], "output_point_cloud.ply"
)
# Optional: Visualize point cloud
pipeline.visualize_point_cloud(results["point_cloud"])
print(
f"Processing completed! Generated {results['metadata']['num_points']} 3D points"
)
else:
logger.warning(f"Example image not found: {image_path}")
except Exception as e:
logger.error(f"Example execution failed: {e}")

Binary file not shown.

After

Width:  |  Height:  |  Size: 943 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.0 MiB

File diff suppressed because it is too large Load Diff

@ -5,8 +5,8 @@ This script demonstrates various scenarios and use cases for the senator simulat
including debates, votes, committee hearings, and individual senator interactions. including debates, votes, committee hearings, and individual senator interactions.
""" """
from simulations.senator_assembly.senator_simulation import ( from swarms.sims.senator_assembly import (
SenatorSimulation, SenatorAssembly,
) )
import json import json
import time import time
@ -18,7 +18,7 @@ def demonstrate_individual_senators():
print("🎭 INDIVIDUAL SENATOR DEMONSTRATIONS") print("🎭 INDIVIDUAL SENATOR DEMONSTRATIONS")
print("=" * 80) print("=" * 80)
senate = SenatorSimulation() senate = SenatorAssembly()
# Test different types of senators with various questions # Test different types of senators with various questions
test_senators = [ test_senators = [
@ -85,7 +85,7 @@ def demonstrate_senate_debates():
print("💬 SENATE DEBATE SIMULATIONS") print("💬 SENATE DEBATE SIMULATIONS")
print("=" * 80) print("=" * 80)
senate = SenatorSimulation() senate = SenatorAssembly()
debate_topics = [ debate_topics = [
{ {
@ -153,7 +153,7 @@ def demonstrate_senate_votes():
print("🗳️ SENATE VOTING SIMULATIONS") print("🗳️ SENATE VOTING SIMULATIONS")
print("=" * 80) print("=" * 80)
senate = SenatorSimulation() senate = SenatorAssembly()
bills = [ bills = [
{ {
@ -244,7 +244,7 @@ def demonstrate_committee_hearings():
print("🏛️ COMMITTEE HEARING SIMULATIONS") print("🏛️ COMMITTEE HEARING SIMULATIONS")
print("=" * 80) print("=" * 80)
senate = SenatorSimulation() senate = SenatorAssembly()
hearings = [ hearings = [
{ {
@ -320,7 +320,7 @@ def demonstrate_party_analysis():
print("📊 PARTY ANALYSIS AND COMPARISONS") print("📊 PARTY ANALYSIS AND COMPARISONS")
print("=" * 80) print("=" * 80)
senate = SenatorSimulation() senate = SenatorAssembly()
# Get party breakdown # Get party breakdown
composition = senate.get_senate_composition() composition = senate.get_senate_composition()
@ -372,7 +372,7 @@ def demonstrate_interactive_scenarios():
print("🎮 INTERACTIVE SCENARIOS") print("🎮 INTERACTIVE SCENARIOS")
print("=" * 80) print("=" * 80)
senate = SenatorSimulation() senate = SenatorAssembly()
scenarios = [ scenarios = [
{ {
@ -492,7 +492,7 @@ def main():
print("• Party-based analysis and comparisons") print("• Party-based analysis and comparisons")
print("• Interactive scenarios and what-if situations") print("• Interactive scenarios and what-if situations")
print( print(
"\nYou can now use the SenatorSimulation class to create your own scenarios!" "\nYou can now use the SenatorAssembly class to create your own scenarios!"
) )

@ -0,0 +1,135 @@
#!/usr/bin/env python3
"""
Test script for the new concurrent voting functionality in the Senate simulation.
"""
from swarms.sims.senator_assembly import SenatorAssembly
def test_concurrent_voting():
"""
Test the new concurrent voting functionality.
"""
print("🏛️ Testing Concurrent Senate Voting...")
# Create the simulation
senate = SenatorAssembly()
print("\n📊 Senate Composition:")
composition = senate.get_senate_composition()
print(f" Total Senators: {composition['total_senators']}")
print(f" Party Breakdown: {composition['party_breakdown']}")
# Test concurrent voting on a bill
bill_description = "A comprehensive infrastructure bill including roads, bridges, broadband expansion, and clean energy projects with a total cost of $1.2 trillion"
print("\n🗳️ Running Concurrent Vote on Infrastructure Bill")
print(f" Bill: {bill_description[:100]}...")
# Run the concurrent vote with batch size of 10
vote_results = senate.simulate_vote_concurrent(
bill_description=bill_description,
batch_size=10, # Process 10 senators concurrently in each batch
)
# Display results
print("\n📊 Final Vote Results:")
print(f" Total Votes: {vote_results['results']['total_votes']}")
print(f" YEA: {vote_results['results']['yea']}")
print(f" NAY: {vote_results['results']['nay']}")
print(f" PRESENT: {vote_results['results']['present']}")
print(f" OUTCOME: {vote_results['results']['outcome']}")
print("\n📈 Party Breakdown:")
for party, votes in vote_results["party_breakdown"].items():
total_party_votes = sum(votes.values())
if total_party_votes > 0:
print(
f" {party}: YEA={votes['yea']}, NAY={votes['nay']}, PRESENT={votes['present']}"
)
print("\n📋 Sample Individual Votes (first 10):")
for i, (senator, vote) in enumerate(
vote_results["votes"].items()
):
if i >= 10: # Only show first 10
break
party = senate._get_senator_party(senator)
print(f" {senator} ({party}): {vote}")
if len(vote_results["votes"]) > 10:
print(
f" ... and {len(vote_results['votes']) - 10} more votes"
)
print("\n⚡ Performance Info:")
print(f" Batch Size: {vote_results['batch_size']}")
print(f" Total Batches: {vote_results['total_batches']}")
return vote_results
def test_concurrent_voting_with_subset():
"""
Test concurrent voting with a subset of senators.
"""
print("\n" + "=" * 60)
print("🏛️ Testing Concurrent Voting with Subset of Senators...")
# Create the simulation
senate = SenatorAssembly()
# Select a subset of senators for testing
test_senators = [
"Katie Britt",
"Mark Kelly",
"Lisa Murkowski",
"Alex Padilla",
"Tom Cotton",
"Kyrsten Sinema",
"John Barrasso",
"Tammy Duckworth",
"Ted Cruz",
"Amy Klobuchar",
]
bill_description = (
"A bill to increase the federal minimum wage to $15 per hour"
)
print("\n🗳️ Running Concurrent Vote on Minimum Wage Bill")
print(f" Bill: {bill_description}")
print(f" Participants: {len(test_senators)} senators")
# Run the concurrent vote
vote_results = senate.simulate_vote_concurrent(
bill_description=bill_description,
participants=test_senators,
batch_size=5, # Smaller batch size for testing
)
# Display results
print("\n📊 Vote Results:")
print(f" YEA: {vote_results['results']['yea']}")
print(f" NAY: {vote_results['results']['nay']}")
print(f" PRESENT: {vote_results['results']['present']}")
print(f" OUTCOME: {vote_results['results']['outcome']}")
print("\n📋 All Individual Votes:")
for senator, vote in vote_results["votes"].items():
party = senate._get_senator_party(senator)
print(f" {senator} ({party}): {vote}")
return vote_results
if __name__ == "__main__":
# Test full senate concurrent voting
full_results = test_concurrent_voting()
# Test subset concurrent voting
subset_results = test_concurrent_voting_with_subset()
print("\n✅ Concurrent voting tests completed successfully!")
print(f" Full Senate: {full_results['results']['outcome']}")
print(f" Subset: {subset_results['results']['outcome']}")

File diff suppressed because it is too large Load Diff

@ -99,14 +99,14 @@ models = [
"anthropic/claude-3-sonnet-20240229", "anthropic/claude-3-sonnet-20240229",
"openai/gpt-4o-mini", "openai/gpt-4o-mini",
"openai/gpt-4o", "openai/gpt-4o",
"deepseek/deepseek-chat",
"deepseek/deepseek-reasoner",
"groq/deepseek-r1-distill-qwen-32b", "groq/deepseek-r1-distill-qwen-32b",
"groq/deepseek-r1-distill-qwen-32b", "groq/deepseek-r1-distill-qwen-32b",
# "gemini/gemini-pro", # "gemini/gemini-pro",
# "gemini/gemini-1.5-pro", # "gemini/gemini-1.5-pro",
"groq/moonshotai/kimi-k2-instruct",
"openai/03-mini", "openai/03-mini",
"o4-mini", "o4-mini",
"claude-sonnet-4-20250514",
"o3", "o3",
"gpt-4.1", "gpt-4.1",
"groq/llama-3.1-8b-instant", "groq/llama-3.1-8b-instant",

Loading…
Cancel
Save