feat: Написан alpha version сервис реализизующий обработку вектора следования за ближайшим человеком в кадре RealSense
parent
732c4fb7f7
commit
6cfbece237
@ -1,9 +1,6 @@
|
|||||||
# producer.Dockerfile
|
# producer.Dockerfile
|
||||||
FROM python:3.9
|
FROM python:3.9
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY test_show_responses_from_llm.py .
|
COPY . .
|
||||||
COPY requirements.txt .
|
|
||||||
COPY AgenticInterfaces ./AgenticInterfaces
|
|
||||||
RUN pip install -r requirements.txt
|
RUN pip install -r requirements.txt
|
||||||
ENV PYTHONPATH=/app
|
|
||||||
CMD ["python", "test_show_responses_from_llm.py"]
|
CMD ["python", "test_show_responses_from_llm.py"]
|
||||||
|
@ -1,8 +1,5 @@
|
|||||||
FROM python:3.9
|
FROM python:3.9
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY llm_worker.py .
|
COPY . .
|
||||||
COPY requirements.txt .
|
|
||||||
COPY AgenticInterfaces ./AgenticInterfaces
|
|
||||||
RUN pip install -r requirements.txt
|
RUN pip install -r requirements.txt
|
||||||
ENV PYTHONPATH=/app
|
|
||||||
CMD ["python", "llm_worker.py"]
|
CMD ["python", "llm_worker.py"]
|
@ -1,9 +1,6 @@
|
|||||||
# producer.Dockerfile
|
# producer.Dockerfile
|
||||||
FROM python:3.9
|
FROM python:3.9
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY test_send_llm_requests.py .
|
COPY . .
|
||||||
COPY requirements.txt .
|
|
||||||
COPY AgenticInterfaces ./AgenticInterfaces
|
|
||||||
RUN pip install -r requirements.txt
|
RUN pip install -r requirements.txt
|
||||||
ENV PYTHONPATH=/app
|
|
||||||
CMD ["python", "test_send_llm_requests.py"]
|
CMD ["python", "test_send_llm_requests.py"]
|
||||||
|
@ -0,0 +1,78 @@
|
|||||||
|
import numpy as np
|
||||||
|
import cv2
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
|
||||||
|
from Interfaces.RealSense import RealSenseController
|
||||||
|
from Interfaces.CameraProcessor import CameraProcessor
|
||||||
|
from Algorithms.TargetFollower import TargetFollower
|
||||||
|
from ultralytics import YOLO
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
|
||||||
|
class Visualization3D:
|
||||||
|
def __init__(self, camera_position):
|
||||||
|
self.camera_position = camera_position
|
||||||
|
|
||||||
|
def plot_scene(self, detections_info, follow_vector=None):
|
||||||
|
fig = plt.figure()
|
||||||
|
ax = fig.add_subplot(111, projection='3d')
|
||||||
|
|
||||||
|
for detection in detections_info:
|
||||||
|
x1, y1, x2, y2 = detection['bbox']
|
||||||
|
mean_depth = detection['mean_depth']
|
||||||
|
class_name = detection['class_name']
|
||||||
|
|
||||||
|
width = x2 - x1
|
||||||
|
height = y2 - y1
|
||||||
|
depth = 0.05
|
||||||
|
|
||||||
|
box_points = [
|
||||||
|
[x1, y1, mean_depth],
|
||||||
|
[x2, y1, mean_depth],
|
||||||
|
[x2, y2, mean_depth],
|
||||||
|
[x1, y2, mean_depth],
|
||||||
|
[x1, y1, mean_depth + depth],
|
||||||
|
[x2, y1, mean_depth + depth],
|
||||||
|
[x2, y2, mean_depth + depth],
|
||||||
|
[x1, y2, mean_depth + depth]
|
||||||
|
]
|
||||||
|
|
||||||
|
faces = [
|
||||||
|
[box_points[0], box_points[1], box_points[5], box_points[4]],
|
||||||
|
[box_points[3], box_points[2], box_points[6], box_points[7]],
|
||||||
|
[box_points[0], box_points[3], box_points[7], box_points[4]],
|
||||||
|
[box_points[1], box_points[2], box_points[6], box_points[5]],
|
||||||
|
[box_points[0], box_points[1], box_points[2], box_points[3]],
|
||||||
|
[box_points[4], box_points[5], box_points[6], box_points[7]]
|
||||||
|
]
|
||||||
|
|
||||||
|
box = Poly3DCollection(faces, facecolors='cyan', linewidths=1, edgecolors='r', alpha=0.25)
|
||||||
|
ax.add_collection3d(box)
|
||||||
|
|
||||||
|
ax.text((x1 + x2) / 2, (y1 + y2) / 2, mean_depth + depth, f'{class_name} {mean_depth:.2f}m',
|
||||||
|
color='blue', fontsize=8)
|
||||||
|
|
||||||
|
if follow_vector is not None:
|
||||||
|
target_position = self.camera_position + follow_vector
|
||||||
|
ax.quiver(
|
||||||
|
self.camera_position[0], self.camera_position[1], self.camera_position[2],
|
||||||
|
follow_vector[0], follow_vector[1], follow_vector[2],
|
||||||
|
color='red', arrow_length_ratio=0.1
|
||||||
|
)
|
||||||
|
ax.text(target_position[0], target_position[1], target_position[2],
|
||||||
|
"Target", color='red', fontsize=10)
|
||||||
|
|
||||||
|
ax.set_xlabel("X")
|
||||||
|
ax.set_ylabel("Y")
|
||||||
|
ax.set_zlabel("Depth (m)")
|
||||||
|
ax.set_title("3D Scene with Follow Vector")
|
||||||
|
|
||||||
|
all_x = [d['bbox'][0] for d in detections_info] + [d['bbox'][2] for d in detections_info]
|
||||||
|
all_y = [d['bbox'][1] for d in detections_info] + [d['bbox'][3] for d in detections_info]
|
||||||
|
all_z = [d['mean_depth'] for d in detections_info] + [d['mean_depth'] + 0.05 for d in detections_info]
|
||||||
|
|
||||||
|
ax.set_xlim(min(all_x) - 50, max(all_x) + 50)
|
||||||
|
ax.set_ylim(min(all_y) - 50, max(all_y) + 50)
|
||||||
|
ax.set_zlim(min(all_z) - 0.1, max(all_z) + 0.1)
|
||||||
|
|
||||||
|
plt.show()
|
@ -0,0 +1,13 @@
|
|||||||
|
FROM python:3.9-slim
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
COPY requirements.txt .
|
||||||
|
|
||||||
|
RUN pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
EXPOSE 5000
|
||||||
|
|
||||||
|
CMD ["python", "main.py"]
|
@ -0,0 +1,88 @@
|
|||||||
|
import pyrealsense2 as rs
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from matplotlib.animation import FuncAnimation
|
||||||
|
import cv2
|
||||||
|
|
||||||
|
# RS_WIDTH = 1280
|
||||||
|
# RS_HEIGHT = 720
|
||||||
|
|
||||||
|
RS_WIDTH = 640
|
||||||
|
RS_HEIGHT = 480
|
||||||
|
|
||||||
|
|
||||||
|
class RealSenseInterface:
|
||||||
|
|
||||||
|
def noCameraFound(self):
|
||||||
|
print("RealSense camera not found. Check the connection.")
|
||||||
|
|
||||||
|
|
||||||
|
class ImageAcquisition(RealSenseInterface):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
try:
|
||||||
|
self.__pipeline = rs.pipeline()
|
||||||
|
config = rs.config()
|
||||||
|
config.enable_stream(rs.stream.depth, RS_WIDTH, RS_HEIGHT, rs.format.z16, 30)
|
||||||
|
config.enable_stream(rs.stream.color, RS_WIDTH, RS_HEIGHT, rs.format.bgr8, 30)
|
||||||
|
self.__pipeline.start(config)
|
||||||
|
except:
|
||||||
|
self.noCameraFound()
|
||||||
|
exit()
|
||||||
|
|
||||||
|
def get_depth_image(self):
|
||||||
|
frames = self.__pipeline.wait_for_frames()
|
||||||
|
depth_frame = frames.get_depth_frame()
|
||||||
|
return np.asanyarray(depth_frame.get_data()) if depth_frame else None
|
||||||
|
|
||||||
|
def get_color_image(self):
|
||||||
|
frames = self.__pipeline.wait_for_frames()
|
||||||
|
color_frame = frames.get_color_frame()
|
||||||
|
return np.asanyarray(color_frame.get_data()) if color_frame else None
|
||||||
|
|
||||||
|
def stop(self):
|
||||||
|
self.__pipeline.stop()
|
||||||
|
|
||||||
|
|
||||||
|
class ImageDisplay(RealSenseInterface):
|
||||||
|
|
||||||
|
def display_images(self, depth_image, color_image):
|
||||||
|
fig, (ax1, ax2) = plt.subplots(1, 2)
|
||||||
|
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
|
||||||
|
ax1.imshow(depth_colormap, cmap='jet')
|
||||||
|
ax2.imshow(color_image)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
class ImageSaver(RealSenseInterface):
|
||||||
|
|
||||||
|
def save_image(self, image, filename):
|
||||||
|
cv2.imwrite(filename, image)
|
||||||
|
|
||||||
|
|
||||||
|
class RealSenseController:
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.acquisition = ImageAcquisition()
|
||||||
|
self.display = ImageDisplay()
|
||||||
|
self.saver = ImageSaver()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
print("RealSense 3D camera detected.")
|
||||||
|
fig, (ax1, ax2) = plt.subplots(1, 2)
|
||||||
|
depth_im = ax1.imshow(np.zeros((RS_HEIGHT, RS_WIDTH, 3)), cmap='jet')
|
||||||
|
color_im = ax2.imshow(np.zeros((RS_HEIGHT, RS_WIDTH, 3)), cmap='jet')
|
||||||
|
|
||||||
|
def update(frame):
|
||||||
|
depth_image = self.acquisition.get_depth_image()
|
||||||
|
color_image = self.acquisition.get_color_image()
|
||||||
|
if depth_image is not None and color_image is not None:
|
||||||
|
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
|
||||||
|
depth_im.set_array(depth_colormap)
|
||||||
|
color_im.set_array(color_image)
|
||||||
|
self.saver.save_image(color_image, "color_image.png")
|
||||||
|
|
||||||
|
ani = FuncAnimation(fig, update, blit=False, cache_frame_data=False)
|
||||||
|
fig.canvas.manager.window.wm_geometry("+0+0")
|
||||||
|
plt.show()
|
||||||
|
self.acquisition.stop()
|
@ -0,0 +1,12 @@
|
|||||||
|
# Object detection and track service
|
||||||
|
|
||||||
|
Этот сервис также регестрирует объекты в Kafka для работы ассоциативной памяти
|
||||||
|
|
||||||
|
### Input data:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"rgb": "<base64_encoded_rgb_image>",
|
||||||
|
"depth": "<base64_encoded_depth_image>"
|
||||||
|
}
|
||||||
|
```
|
@ -0,0 +1,12 @@
|
|||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
robot-rs-object-target-detection:
|
||||||
|
build: .
|
||||||
|
container_name: robot_realsense_object_target_detection
|
||||||
|
ports:
|
||||||
|
- "7780:5000"
|
||||||
|
environment:
|
||||||
|
- FLASK_ENV=development
|
||||||
|
volumes:
|
||||||
|
- .:/app
|
@ -0,0 +1,47 @@
|
|||||||
|
from flask import Flask, request, jsonify
|
||||||
|
from flask_cors import CORS
|
||||||
|
from Interfaces.CameraProcessor import CameraProcessor
|
||||||
|
from Algorithms.TargetFollower import TargetFollower
|
||||||
|
from Interfaces.DataProcessor import DataProcessor
|
||||||
|
import numpy as np
|
||||||
|
import cv2
|
||||||
|
import base64
|
||||||
|
|
||||||
|
app = Flask(__name__)
|
||||||
|
CORS(app)
|
||||||
|
|
||||||
|
camera_processor = CameraProcessor()
|
||||||
|
data_processor = DataProcessor()
|
||||||
|
|
||||||
|
|
||||||
|
@app.route('/follow-vector', methods=['POST'])
|
||||||
|
def follow_vector():
|
||||||
|
try:
|
||||||
|
data = request.json
|
||||||
|
detections_info, annotated_image = data_processor.inline_detection(data)
|
||||||
|
|
||||||
|
if not detections_info and annotated_image is None:
|
||||||
|
return jsonify({'error': 'No detections found.'}), 400
|
||||||
|
|
||||||
|
# Указываем параметры камеры
|
||||||
|
image_height, image_width, _ = annotated_image.shape
|
||||||
|
camera_position = np.array([image_width / 2, image_height, 0])
|
||||||
|
|
||||||
|
# todo: add detections_info to the kafka topic
|
||||||
|
|
||||||
|
# Инициализируем TargetFollower и рассчитываем target вектор
|
||||||
|
follower = TargetFollower(detections_info, annotated_image, camera_position=camera_position)
|
||||||
|
selected_target = follower.select_target('person', nearest=True)
|
||||||
|
follow_vector = follower.calculate_follow_vector()
|
||||||
|
|
||||||
|
return jsonify({
|
||||||
|
'detections_info': detections_info,
|
||||||
|
'follow_vector': follow_vector.tolist()
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return jsonify({'error': str(e)}), 500
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
app.run(debug=True)
|
@ -0,0 +1,6 @@
|
|||||||
|
flask
|
||||||
|
flask-cors
|
||||||
|
opencv-python
|
||||||
|
ultralytics
|
||||||
|
matplotlib
|
||||||
|
numpy
|
Loading…
Reference in new issue