from Neko.RealSense import RealSenseController import cv2 from ultralytics import YOLO import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d.art3d import Poly3DCollection from datetime import datetime class CameraProcessor: def __init__(self, model_path='./models/yolov9e_object_classification.pt'): self.controller = RealSenseController() self.model = YOLO(model_path) def inline_detection(self): depth_matrix = self.controller.acquisition.get_depth_image() color_matrix = self.controller.acquisition.get_color_image() if depth_matrix is None or color_matrix is None: print("Не удалось получить изображения. Проверьте подключение камеры.") return [], None results = self.model(color_matrix) annotated_image = results[0].plot() detections_info = [] for detection in results[0].boxes: x1, y1, x2, y2 = map(int, detection.xyxy[0]) class_id = int(detection.cls[0]) class_name = self.model.names[class_id] depth_values = depth_matrix[y1:y2, x1:x2] mean_depth = np.mean(depth_values) detections_info.append({ 'class_id': class_id, 'class_name': class_name, 'bbox': [x1, y1, x2, y2], 'mean_depth': float(mean_depth) }) cv2.putText(annotated_image, f'{class_name} {mean_depth:.2f}m', (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) cv2.rectangle(annotated_image, (x1, y1), (x2, y2), (0, 255, 0), 2) return detections_info, annotated_image def plot_3d_bounding_boxes(self, detections_info): fig = plt.figure() ax = fig.add_subplot(111, projection='3d') for detection in detections_info: x1, y1, x2, y2 = detection['bbox'] mean_depth = detection['mean_depth'] class_name = detection['class_name'] width = x2 - x1 height = y2 - y1 depth = 0.05 box_points = [ [x1, y1, mean_depth], [x2, y1, mean_depth], [x2, y2, mean_depth], [x1, y2, mean_depth], [x1, y1, mean_depth + depth], [x2, y1, mean_depth + depth], [x2, y2, mean_depth + depth], [x1, y2, mean_depth + depth] ] faces = [ [box_points[0], box_points[1], box_points[5], box_points[4]], [box_points[3], box_points[2], box_points[6], box_points[7]], [box_points[0], box_points[3], box_points[7], box_points[4]], [box_points[1], box_points[2], box_points[6], box_points[5]], [box_points[0], box_points[1], box_points[2], box_points[3]], [box_points[4], box_points[5], box_points[6], box_points[7]] ] box = Poly3DCollection(faces, facecolors='cyan', linewidths=1, edgecolors='r', alpha=0.25) ax.add_collection3d(box) ax.text((x1 + x2) / 2, (y1 + y2) / 2, mean_depth + depth, f'{class_name} {mean_depth:.2f}m', color='blue', fontsize=8) ax.set_xlabel("X") ax.set_ylabel("Y") ax.set_zlabel("Depth (m)") ax.set_title("3D Bounding Boxes with Depth Information") all_x = [d['bbox'][0] for d in detections_info] + [d['bbox'][2] for d in detections_info] all_y = [d['bbox'][1] for d in detections_info] + [d['bbox'][3] for d in detections_info] all_z = [d['mean_depth'] for d in detections_info] + [d['mean_depth'] + 0.05 for d in detections_info] ax.set_xlim(min(all_x) - 50, max(all_x) + 50) ax.set_ylim(min(all_y) - 50, max(all_y) + 50) ax.set_zlim(min(all_z) - 50, max(all_z) + 50) plt.show() def test_time_delta(self): start_time = datetime.now() detections_info, _ = self.inline_detection() end_time = datetime.now() delta_time = end_time - start_time print(f"Время выполнения: {delta_time}")