commit 549fd1da9dbc0819fb1d7d3067014659117c454e
Author: Ruslan Bakiev <572431+veikab@users.noreply.github.com>
Date: Fri Mar 6 09:43:52 2026 +0700
Initial commit
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..1278906
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,45 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+*.egg-info/
+dist/
+build/
+
+# Virtual environments
+venv/
+env/
+ENV/
+
+# IDE
+.vscode/
+.idea/
+*.swp
+*.swo
+
+# Data (will be mounted as volume)
+data/videos/*
+data/results/*
+data/jobs/*
+
+# Git
+.git/
+.gitignore
+
+# Docker
+.dockerignore
+Dockerfile
+docker-compose.yml
+
+# OS
+.DS_Store
+Thumbs.db
+
+# Jupyter
+.ipynb_checkpoints/
+
+# Tests
+.pytest_cache/
+.coverage
diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000..d67ceb0
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,13 @@
+# Roboflow Configuration (optional - only if using hosted API)
+ROBOFLOW_API_KEY=your_api_key_here
+
+# Model Configuration
+MODEL_PATH=models/pickleball-detection
+MODEL_VERSION=1
+
+# Server Configuration
+API_HOST=0.0.0.0
+API_PORT=8000
+
+# Redis Configuration (for Celery)
+REDIS_URL=redis://localhost:6379/0
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..b3dd177
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,43 @@
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+venv/
+env/
+ENV/
+
+# Dagster
+data/dagster_home/
+dagster_home/
+
+# Jetson flash guide (personal notes)
+jetson-orin-nano-flash-guide.md
+
+# Results
+data/*.json
+data/*.mp4
+data/*.png
+data/frames/
+data/ball_detections/
+
+# Videos
+*.mp4
+*.avi
+*.mov
+
+# Models
+models/
+*.pt
+*.onnx
+
+# IDE
+.vscode/
+.idea/
+*.swp
+*.swo
+.DS_Store
+
+# Env
+.env
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000..ff701eb
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,39 @@
+# Use Python 3.11 slim image
+FROM python:3.11-slim
+
+# Set working directory
+WORKDIR /app
+
+# Install system dependencies for OpenCV and build tools
+RUN apt-get update && apt-get install -y \
+ libgl1 \
+ libglib2.0-0 \
+ libsm6 \
+ libxext6 \
+ libxrender1 \
+ libgomp1 \
+ ffmpeg \
+ build-essential \
+ cmake \
+ && rm -rf /var/lib/apt/lists/*
+
+# Copy requirements first for better caching
+COPY requirements.txt .
+
+# Install Python dependencies
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy project files
+COPY . .
+
+# Create data directory
+RUN mkdir -p data/dagster_home
+
+# Add /app to Python path so 'src' module can be imported
+ENV PYTHONPATH=/app:$PYTHONPATH
+
+# Expose ports for API (8000) and Dagster UI (3000)
+EXPOSE 8000 3000
+
+# Default command - start Dagster webserver
+CMD ["dagster", "dev", "-m", "dagster_project", "--host", "0.0.0.0", "--port", "3000"]
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..8cbfef6
--- /dev/null
+++ b/README.md
@@ -0,0 +1,151 @@
+# 🎾 Pickle - Pickleball Ball Tracking
+
+Система трекинга пикабольного мяча с автоматической детекцией корта и преобразованием координат в метры.
+
+## Что делает
+
+1. **Детекция корта** - автоматический поиск 4 углов корта (Roboflow модель)
+2. **Детекция мяча** - поиск мяча на каждом кадре (YOLO v8)
+3. **Трансформация координат** - преобразование пикселей в метры (homography)
+4. **Визуализация** - видео с траекторией, графики, тепловая карта
+
+## Быстрый старт
+
+```bash
+# Запуск
+docker-compose up -d
+
+# Открыть Dagster UI
+open http://localhost:3000
+
+# Запустить пайплайн
+docker exec pickle-dagster dagster asset materialize --select '*' -m dagster_project
+```
+
+## Структура пайплайна
+
+```
+1. extract_video_frames → Извлекает 100 кадров (с 10-й секунды)
+ ↓
+2. detect_court_keypoints → Находит 4 угла корта
+ ↓ ↓
+3. detect_ball_positions ←┘ Детектит мяч на всех кадрах
+ ↓
+4. compute_2d_coordinates → Преобразует пиксели в метры
+ ↓
+5. visualize_trajectory → Создает визуализации
+```
+
+## Результаты
+
+После выполнения пайплайна в `data/`:
+
+- **extract_video_frames.json** - метаданные видео
+- **detect_court_keypoints.json** - координаты углов корта
+- **detect_ball_positions.json** - позиции мяча в пикселях
+- **compute_2d_coordinates.json** - позиции мяча в метрах
+- **visualization.mp4** - видео с траекторией, кортом и координатами
+- **frames/** - извлеченные кадры
+- **ball_detections/** - кадры с найденным мячом
+- **court_detection_preview.jpg** - превью с найденными углами корта
+
+## Структура проекта
+
+```
+pickle/
+├── dagster_project/ # Dagster пайплайн
+│ ├── assets/ # 5 asset'ов пайплайна
+│ └── io_managers/ # JSON IO manager
+├── src/ # Основной код
+│ ├── ball_detector.py # YOLO детекция
+│ ├── court_calibrator.py # Калибровка корта
+│ ├── ball_tracker.py # Трекинг
+│ └── video_processor.py # Обработка видео
+├── data/ # Результаты выполнения
+├── DJI_0017.MP4 # Видео для обработки
+├── docker-compose.yml
+└── Dockerfile
+```
+
+## Конфигурация
+
+Параметры в `dagster_project/assets/`:
+
+- **video_extraction.py** - `start_sec=10`, `num_frames=100`
+- **ball_detection.py** - `confidence_threshold=0.3`, slicing 320x320
+- **coordinate_transform.py** - корт 13.4м × 6.1м
+
+## Модели
+
+- **Корт**: `ping-pong-paddle-ai-with-images/pickleball-court-p3chl-7tufp` (Roboflow)
+- **Мяч**: `pickleball-detection-1oqlw/1` (Roboflow) → fallback на YOLOv8n
+
+## Требования
+
+- Docker & Docker Compose
+- 4GB+ RAM
+- Видео файл `DJI_0017.MP4` в корне проекта
+
+## Docker команды
+
+```bash
+# Билд и запуск
+docker-compose up --build -d
+
+# Логи
+docker-compose logs -f
+
+# Остановка
+docker-compose down
+
+# Выполнить пайплайн
+docker exec pickle-dagster dagster asset materialize --select '*' -m dagster_project
+
+# Выполнить один asset
+docker exec pickle-dagster dagster asset materialize --select 'detect_ball_positions' -m dagster_project
+```
+
+## Dagster UI
+
+http://localhost:3000
+
+Показывает:
+- Граф зависимостей между assets
+- Логи выполнения
+- История запусков
+- Метаданные результатов
+
+## Формат данных
+
+**compute_2d_coordinates.json**:
+```json
+[
+ {
+ "frame": 6,
+ "timestamp": 0.2,
+ "pixel_x": 1234.5,
+ "pixel_y": 678.9,
+ "x_m": 5.67,
+ "y_m": 2.34,
+ "confidence": 0.85
+ }
+]
+```
+
+## Производительность
+
+- Извлечение кадров: ~1 сек
+- Детекция корта: ~1 сек
+- Детекция мяча: ~6 сек (100 кадров, ~15 FPS)
+- Трансформация координат: <1 сек
+- Визуализация: ~1 сек
+
+**Итого**: ~10 секунд на 100 кадров видео
+
+## Стоимость
+
+**$0** - всё работает локально в Docker, без облачных API
+
+## License
+
+MIT
diff --git a/dagster_project/__init__.py b/dagster_project/__init__.py
new file mode 100644
index 0000000..dbf443c
--- /dev/null
+++ b/dagster_project/__init__.py
@@ -0,0 +1,32 @@
+"""
+Dagster project for pickleball ball tracking with 3D coordinates
+"""
+
+from dagster import Definitions
+from dagster_project.assets import (
+ extract_video_frames,
+ detect_court_keypoints,
+ detect_ball_positions,
+ visualize_ball_on_court,
+ detect_net,
+ calibrate_camera_3d,
+ compute_ball_3d_coordinates,
+ create_interactive_viewer
+)
+from dagster_project.io_managers.json_io_manager import json_io_manager
+
+defs = Definitions(
+ assets=[
+ extract_video_frames,
+ detect_court_keypoints,
+ detect_ball_positions,
+ visualize_ball_on_court,
+ detect_net,
+ calibrate_camera_3d,
+ compute_ball_3d_coordinates,
+ create_interactive_viewer
+ ],
+ resources={
+ "json_io_manager": json_io_manager
+ }
+)
diff --git a/dagster_project/assets/__init__.py b/dagster_project/assets/__init__.py
new file mode 100644
index 0000000..2237910
--- /dev/null
+++ b/dagster_project/assets/__init__.py
@@ -0,0 +1,21 @@
+"""Dagster assets for pickleball tracking pipeline"""
+
+from dagster_project.assets.video_extraction import extract_video_frames
+from dagster_project.assets.court_detection import detect_court_keypoints
+from dagster_project.assets.ball_detection import detect_ball_positions
+from dagster_project.assets.visualization import visualize_ball_on_court
+from dagster_project.assets.net_detection import detect_net
+from dagster_project.assets.camera_calibration import calibrate_camera_3d
+from dagster_project.assets.coordinate_transform_3d import compute_ball_3d_coordinates
+from dagster_project.assets.interactive_viewer import create_interactive_viewer
+
+__all__ = [
+ "extract_video_frames",
+ "detect_court_keypoints",
+ "detect_ball_positions",
+ "visualize_ball_on_court",
+ "detect_net",
+ "calibrate_camera_3d",
+ "compute_ball_3d_coordinates",
+ "create_interactive_viewer"
+]
diff --git a/dagster_project/assets/ball_detection.py b/dagster_project/assets/ball_detection.py
new file mode 100644
index 0000000..cc07d44
--- /dev/null
+++ b/dagster_project/assets/ball_detection.py
@@ -0,0 +1,181 @@
+"""Asset 3: Detect ball positions using YOLO"""
+
+import sys
+import cv2
+from pathlib import Path
+from typing import Dict, List, Optional
+from dagster import asset, AssetExecutionContext
+from tqdm import tqdm
+
+# Add src to path to import existing ball detector
+sys.path.insert(0, str(Path(__file__).parent.parent.parent))
+
+
+@asset(
+ io_manager_key="json_io_manager",
+ compute_kind="yolo",
+ description="Detect ball positions on all frames using YOLO"
+)
+def detect_ball_positions(
+ context: AssetExecutionContext,
+ extract_video_frames: Dict
+) -> List[Dict]:
+ """
+ Detect ball positions on all extracted frames
+
+ Inputs:
+ - extract_video_frames: metadata from frame extraction
+ - data/frames/*.jpg: all extracted frames
+
+ Outputs:
+ - data/detect_ball_positions.json
+
+ Returns:
+ List of dicts with:
+ - frame: frame number
+ - x: pixel x coordinate (or None if not detected)
+ - y: pixel y coordinate (or None if not detected)
+ - confidence: detection confidence (0-1)
+ - diameter_px: estimated ball diameter in pixels
+ """
+ from src.ball_detector import BallDetector
+
+ frames_dir = Path(extract_video_frames['frames_dir'])
+ num_frames = extract_video_frames['num_frames']
+
+ context.log.info(f"Initializing YOLO ball detector...")
+
+ # Initialize detector
+ detector = BallDetector(
+ model_id="pickleball-moving-ball/5",
+ confidence_threshold=0.3, # Lower threshold to catch more detections
+ slice_enabled=False # Disable slicing for faster Hosted API inference
+ )
+
+ context.log.info(f"Processing {num_frames} frames for ball detection...")
+
+ detections = []
+ frames_with_ball = 0
+
+ for i in tqdm(range(num_frames), desc="Detecting ball"):
+ frame_path = frames_dir / f"frame_{i:04d}.jpg"
+
+ if not frame_path.exists():
+ context.log.warning(f"Frame {i} not found: {frame_path}")
+ detections.append({
+ "frame": i,
+ "x": None,
+ "y": None,
+ "confidence": 0.0,
+ "diameter_px": None,
+ "bbox": None
+ })
+ continue
+
+ # Load frame
+ frame = cv2.imread(str(frame_path))
+
+ # Detect ball
+ results = detector.detect(frame)
+
+ if results and len(results) > 0:
+ # Take highest confidence detection
+ ball = results[0]
+
+ # Calculate diameter from bbox
+ bbox = ball.get('bbox')
+ diameter_px = None
+ if bbox:
+ width = bbox[2] - bbox[0]
+ height = bbox[3] - bbox[1]
+ diameter_px = (width + height) / 2
+
+ detections.append({
+ "frame": i,
+ "x": float(ball['center'][0]),
+ "y": float(ball['center'][1]),
+ "confidence": float(ball['confidence']),
+ "diameter_px": float(diameter_px) if diameter_px else None,
+ "bbox": [float(b) for b in bbox] if bbox else None
+ })
+
+ frames_with_ball += 1
+ else:
+ # No detection
+ detections.append({
+ "frame": i,
+ "x": None,
+ "y": None,
+ "confidence": 0.0,
+ "diameter_px": None,
+ "bbox": None
+ })
+
+ # Log progress every 20 frames
+ if (i + 1) % 20 == 0:
+ detection_rate = frames_with_ball / (i + 1) * 100
+ context.log.info(f"Processed {i + 1}/{num_frames} frames. Detection rate: {detection_rate:.1f}%")
+
+ detection_rate = frames_with_ball / num_frames * 100
+ context.log.info(f"✓ Ball detected in {frames_with_ball}/{num_frames} frames ({detection_rate:.1f}%)")
+
+ # Save ALL detection images
+ _save_detection_preview(context, frames_dir, detections, num_preview=999)
+
+ return detections
+
+
+def _save_detection_preview(
+ context: AssetExecutionContext,
+ frames_dir: Path,
+ detections: List[Dict],
+ num_preview: int = 5
+):
+ """Save preview images showing ball detections"""
+ run_id = context.run_id
+ preview_dir = Path(f"data/{run_id}/ball_detections")
+ preview_dir.mkdir(parents=True, exist_ok=True)
+
+ # Find first N frames with detections
+ detected_frames = [d for d in detections if d['x'] is not None][:num_preview]
+
+ for detection in detected_frames:
+ frame_num = detection['frame']
+ frame_path = frames_dir / f"frame_{frame_num:04d}.jpg"
+
+ if not frame_path.exists():
+ continue
+
+ frame = cv2.imread(str(frame_path))
+
+ # Draw ball
+ x, y = int(detection['x']), int(detection['y'])
+ cv2.circle(frame, (x, y), 8, (0, 0, 255), -1)
+
+ # Draw bbox if available
+ if detection['bbox']:
+ bbox = detection['bbox']
+ cv2.rectangle(
+ frame,
+ (int(bbox[0]), int(bbox[1])),
+ (int(bbox[2]), int(bbox[3])),
+ (0, 255, 0),
+ 2
+ )
+
+ # Draw confidence
+ cv2.putText(
+ frame,
+ f"Conf: {detection['confidence']:.2f}",
+ (x + 15, y - 10),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ 0.6,
+ (255, 255, 255),
+ 2
+ )
+
+ # Save preview
+ preview_path = preview_dir / f"detection_frame_{frame_num:04d}.jpg"
+ cv2.imwrite(str(preview_path), frame)
+
+ context.log.info(f"Saved {len(detected_frames)} preview images to {preview_dir}")
diff --git a/dagster_project/assets/camera_calibration.py b/dagster_project/assets/camera_calibration.py
new file mode 100644
index 0000000..7edb702
--- /dev/null
+++ b/dagster_project/assets/camera_calibration.py
@@ -0,0 +1,143 @@
+"""Asset: Calibrate camera using court corners and net position"""
+
+import cv2
+import numpy as np
+from typing import Dict
+from dagster import asset, AssetExecutionContext
+
+
+@asset(
+ io_manager_key="json_io_manager",
+ compute_kind="opencv",
+ description="Calibrate camera using cv2.solvePnP with court corners and net"
+)
+def calibrate_camera_3d(
+ context: AssetExecutionContext,
+ detect_court_keypoints: Dict,
+ detect_net: Dict
+) -> Dict:
+ """
+ Calibrate camera to get 3D pose using known 3D↔2D point correspondences
+
+ Inputs:
+ - detect_court_keypoints: 4 court corners in pixels
+ - detect_net: 4 net corners in pixels
+
+ Known 3D points:
+ - Court: 13.4m × 6.1m rectangle at Z=0
+ - Net: height 0.914m at middle of court (Y=3.05m)
+
+ Outputs:
+ Camera calibration parameters
+
+ Returns:
+ Dict with:
+ - camera_matrix: [[fx, 0, cx], [0, fy, cy], [0, 0, 1]]
+ - rotation_vector: [rx, ry, rz]
+ - translation_vector: [tx, ty, tz]
+ - rotation_matrix: 3x3 matrix
+ - reprojection_error: RMS error in pixels
+ - calibrated: success flag
+ """
+ # Get 2D points (pixels)
+ court_corners = np.array(detect_court_keypoints['corners_pixel'], dtype=np.float32)
+ net_corners = np.array(detect_net['net_corners_pixel'], dtype=np.float32)
+
+ # Define 3D points (meters) in world coordinates
+ # Court corners (Z=0, on ground)
+ court_3d = np.array([
+ [0, 0, 0], # TL
+ [13.4, 0, 0], # TR
+ [13.4, 6.1, 0], # BR
+ [0, 6.1, 0] # BL
+ ], dtype=np.float32)
+
+ # Net endpoints (2 points)
+ # Net is at Y=3.05m (middle of 6.1m court width)
+ # We have 2 endpoints: left and right side at top of net
+ net_3d = np.array([
+ [0, 3.05, 0.914], # Left endpoint (top of net)
+ [13.4, 3.05, 0.914], # Right endpoint (top of net)
+ ], dtype=np.float32)
+
+ # Combine all 3D and 2D points
+ object_points = np.vstack([court_3d, net_3d]) # 6 points total (4 court + 2 net)
+ image_points = np.vstack([court_corners, net_corners])
+
+ context.log.info(f"Calibrating with {len(object_points)} point correspondences")
+ context.log.info(f"3D points shape: {object_points.shape}")
+ context.log.info(f"2D points shape: {image_points.shape}")
+
+ # Initial camera matrix estimate
+ # Assume principal point at image center
+ w = detect_court_keypoints['frame_width']
+ h = detect_court_keypoints['frame_height']
+ cx = w / 2
+ cy = h / 2
+ # Estimate focal length (typical for drone/action cameras)
+ focal_length = max(w, h) # Initial guess
+
+ camera_matrix = np.array([
+ [focal_length, 0, cx],
+ [0, focal_length, cy],
+ [0, 0, 1]
+ ], dtype=np.float32)
+
+ # No lens distortion (assume corrected or minimal)
+ dist_coeffs = None
+
+ # Solve PnP to get camera pose
+ try:
+ success, rotation_vec, translation_vec = cv2.solvePnP(
+ object_points,
+ image_points,
+ camera_matrix,
+ dist_coeffs,
+ flags=cv2.SOLVEPNP_ITERATIVE
+ )
+
+ if not success:
+ context.log.error("cv2.solvePnP failed")
+ return {
+ "calibrated": False,
+ "error": "solvePnP failed"
+ }
+
+ # Convert rotation vector to rotation matrix
+ rotation_matrix, _ = cv2.Rodrigues(rotation_vec)
+
+ # Calculate reprojection error
+ projected_points, _ = cv2.projectPoints(
+ object_points,
+ rotation_vec,
+ translation_vec,
+ camera_matrix,
+ dist_coeffs
+ )
+ projected_points = projected_points.reshape(-1, 2)
+ reprojection_error = np.sqrt(np.mean((image_points - projected_points) ** 2))
+
+ context.log.info(f"✓ Camera calibration successful")
+ context.log.info(f" Reprojection error: {reprojection_error:.2f} pixels")
+ context.log.info(f" Focal length: {focal_length:.1f}")
+ context.log.info(f" Rotation vector: {rotation_vec.flatten()}")
+ context.log.info(f" Translation vector: {translation_vec.flatten()}")
+
+ return {
+ "camera_matrix": camera_matrix.tolist(),
+ "rotation_vector": rotation_vec.flatten().tolist(),
+ "translation_vector": translation_vec.flatten().tolist(),
+ "rotation_matrix": rotation_matrix.tolist(),
+ "reprojection_error": float(reprojection_error),
+ "focal_length": float(focal_length),
+ "principal_point": [float(cx), float(cy)],
+ "image_size": [w, h],
+ "calibrated": True
+ }
+
+ except Exception as e:
+ context.log.error(f"Error during camera calibration: {e}")
+ return {
+ "calibrated": False,
+ "error": str(e)
+ }
diff --git a/dagster_project/assets/coordinate_transform.py b/dagster_project/assets/coordinate_transform.py
new file mode 100644
index 0000000..5dcf632
--- /dev/null
+++ b/dagster_project/assets/coordinate_transform.py
@@ -0,0 +1,130 @@
+"""Asset 4: Transform pixel coordinates to real-world 2D coordinates"""
+
+import sys
+import numpy as np
+from pathlib import Path
+from typing import Dict, List
+from dagster import asset, AssetExecutionContext
+
+# Add src to path to import existing calibrator
+sys.path.insert(0, str(Path(__file__).parent.parent.parent))
+
+
+@asset(
+ io_manager_key="json_io_manager",
+ compute_kind="transform",
+ description="Transform pixel coordinates to real-world 2D court coordinates using homography"
+)
+def compute_2d_coordinates(
+ context: AssetExecutionContext,
+ detect_court_keypoints: Dict,
+ detect_ball_positions: List[Dict],
+ extract_video_frames: Dict
+) -> List[Dict]:
+ """
+ Transform ball pixel coordinates to real-world court coordinates
+
+ Inputs:
+ - detect_court_keypoints: court corners in pixels
+ - detect_ball_positions: ball detections in pixels
+ - extract_video_frames: video metadata (FPS)
+
+ Outputs:
+ - data/compute_2d_coordinates.json
+
+ Returns:
+ List of dicts with:
+ - frame: frame number
+ - timestamp: time in seconds
+ - pixel_x, pixel_y: pixel coordinates
+ - x_m, y_m: real-world coordinates in meters
+ - confidence: detection confidence
+ """
+ from src.court_calibrator import CourtCalibrator
+
+ context.log.info("Initializing court calibrator...")
+
+ calibrator = CourtCalibrator(
+ court_width_m=detect_court_keypoints['court_width_m'],
+ court_length_m=detect_court_keypoints['court_length_m']
+ )
+
+ # Calibrate using detected corners
+ corners = detect_court_keypoints['corners_pixel']
+ context.log.info(f"Calibrating with corners: {corners}")
+
+ success = calibrator.calibrate_manual(corners)
+
+ if not success:
+ raise RuntimeError("Court calibration failed. Check corner points.")
+
+ context.log.info("✓ Court calibration successful")
+
+ # Transform all ball positions
+ fps = extract_video_frames['fps']
+ trajectory = []
+
+ detected_count = 0
+ for detection in detect_ball_positions:
+ frame_num = detection['frame']
+ timestamp = frame_num / fps
+
+ if detection['x'] is not None and detection['y'] is not None:
+ # Transform pixel → meters
+ pixel_coords = [detection['x'], detection['y']]
+ real_coords = calibrator.pixel_to_real(pixel_coords)
+
+ if real_coords is not None:
+ trajectory.append({
+ "frame": frame_num,
+ "timestamp": round(timestamp, 3),
+ "pixel_x": round(detection['x'], 2),
+ "pixel_y": round(detection['y'], 2),
+ "x_m": round(float(real_coords[0]), 3),
+ "y_m": round(float(real_coords[1]), 3),
+ "confidence": round(detection['confidence'], 3)
+ })
+ detected_count += 1
+ else:
+ # Transformation failed (point outside court?)
+ trajectory.append({
+ "frame": frame_num,
+ "timestamp": round(timestamp, 3),
+ "pixel_x": round(detection['x'], 2),
+ "pixel_y": round(detection['y'], 2),
+ "x_m": None,
+ "y_m": None,
+ "confidence": round(detection['confidence'], 3)
+ })
+ else:
+ # No detection
+ trajectory.append({
+ "frame": frame_num,
+ "timestamp": round(timestamp, 3),
+ "pixel_x": None,
+ "pixel_y": None,
+ "x_m": None,
+ "y_m": None,
+ "confidence": 0.0
+ })
+
+ # Log progress
+ if (frame_num + 1) % 20 == 0:
+ context.log.info(f"Transformed {frame_num + 1}/{len(detect_ball_positions)} positions")
+
+ transform_rate = detected_count / len(detect_ball_positions) * 100
+ context.log.info(f"✓ Transformed {detected_count}/{len(detect_ball_positions)} positions ({transform_rate:.1f}%)")
+
+ # Calculate statistics
+ valid_positions = [t for t in trajectory if t['x_m'] is not None]
+
+ if valid_positions:
+ x_coords = [t['x_m'] for t in valid_positions]
+ y_coords = [t['y_m'] for t in valid_positions]
+
+ context.log.info(f"Position statistics:")
+ context.log.info(f" X range: {min(x_coords):.2f}m to {max(x_coords):.2f}m")
+ context.log.info(f" Y range: {min(y_coords):.2f}m to {max(y_coords):.2f}m")
+ context.log.info(f" Court dimensions: {calibrator.court_length}m x {calibrator.court_width}m")
+
+ return trajectory
diff --git a/dagster_project/assets/coordinate_transform_3d.py b/dagster_project/assets/coordinate_transform_3d.py
new file mode 100644
index 0000000..47704d4
--- /dev/null
+++ b/dagster_project/assets/coordinate_transform_3d.py
@@ -0,0 +1,190 @@
+"""Asset: Transform ball positions to 3D coordinates (X, Y, Z) in meters"""
+
+import cv2
+import numpy as np
+from typing import Dict, List
+from dagster import asset, AssetExecutionContext
+
+
+@asset(
+ io_manager_key="json_io_manager",
+ compute_kind="opencv",
+ description="Compute 3D ball coordinates (X, Y, Z) using camera calibration"
+)
+def compute_ball_3d_coordinates(
+ context: AssetExecutionContext,
+ detect_court_keypoints: Dict,
+ detect_ball_positions: List[Dict],
+ calibrate_camera_3d: Dict
+) -> List[Dict]:
+ """
+ Transform ball pixel coordinates to 3D world coordinates (meters)
+
+ Strategy:
+ - If ball on ground: use homography (Z=0)
+ - If ball in air: use ray casting + bbox size estimation (Z>0)
+
+ Inputs:
+ - detect_court_keypoints: court corners
+ - detect_ball_positions: ball positions in pixels
+ - calibrate_camera_3d: camera calibration
+
+ Returns:
+ List of dicts with 3D coordinates for each frame:
+ [
+ {
+ "frame": 0,
+ "x_m": float or null, # X position on court (0-13.4m)
+ "y_m": float or null, # Y position on court (0-6.1m)
+ "z_m": float or null, # Z height above court (meters)
+ "on_ground": bool, # True if ball touching court
+ "confidence": float # Detection confidence
+ },
+ ...
+ ]
+ """
+ if not calibrate_camera_3d.get('calibrated'):
+ context.log.error("Camera not calibrated, cannot compute 3D coordinates")
+ return [{"frame": i, "x_m": None, "y_m": None, "z_m": None, "on_ground": False}
+ for i in range(len(detect_ball_positions))]
+
+ # Extract calibration parameters
+ camera_matrix = np.array(calibrate_camera_3d['camera_matrix'], dtype=np.float32)
+ rotation_vec = np.array(calibrate_camera_3d['rotation_vector'], dtype=np.float32)
+ translation_vec = np.array(calibrate_camera_3d['translation_vector'], dtype=np.float32)
+ rotation_matrix = np.array(calibrate_camera_3d['rotation_matrix'], dtype=np.float32)
+
+ fx, fy = camera_matrix[0, 0], camera_matrix[1, 1]
+ cx, cy = camera_matrix[0, 2], camera_matrix[1, 2]
+ focal_length = (fx + fy) / 2
+
+ # Build homography for ground plane (Z=0)
+ court_corners_pixel = np.array(detect_court_keypoints['corners_pixel'], dtype=np.float32)
+ court_corners_meters = np.array([
+ [0, 0],
+ [13.4, 0],
+ [13.4, 6.1],
+ [0, 6.1]
+ ], dtype=np.float32)
+
+ homography_matrix = cv2.getPerspectiveTransform(court_corners_pixel, court_corners_meters)
+
+ # Camera position in world coordinates
+ camera_position = -rotation_matrix.T @ translation_vec.reshape(3, 1)
+
+ context.log.info(f"Processing {len(detect_ball_positions)} frames for 3D coordinate transformation")
+
+ results = []
+
+ for i, ball_det in enumerate(detect_ball_positions):
+ if ball_det['x'] is None:
+ # No ball detected
+ results.append({
+ "frame": i,
+ "x_m": None,
+ "y_m": None,
+ "z_m": None,
+ "on_ground": False,
+ "confidence": 0.0
+ })
+ continue
+
+ ball_x = ball_det['x']
+ ball_y = ball_det['y']
+ ball_diameter = ball_det['diameter_px']
+ bbox = ball_det['bbox']
+ confidence = ball_det['confidence']
+
+ # Strategy 1: Try ground plane projection (assume Z=0)
+ ball_point_2d = np.array([[ball_x, ball_y]], dtype=np.float32)
+ ball_ground = cv2.perspectiveTransform(ball_point_2d.reshape(-1, 1, 2), homography_matrix)
+ x_ground, y_ground = ball_ground[0][0]
+
+ # Check if ball is likely on ground by comparing bbox size
+ # If ball bbox is large → likely on ground
+ # Simple heuristic: if diameter > threshold, assume on ground
+ on_ground_threshold = 30 # pixels - tune this based on typical ball size on ground
+
+ if ball_diameter and ball_diameter > on_ground_threshold:
+ # Ball likely on ground
+ results.append({
+ "frame": i,
+ "x_m": float(x_ground),
+ "y_m": float(y_ground),
+ "z_m": 0.0,
+ "on_ground": True,
+ "confidence": float(confidence)
+ })
+ else:
+ # Ball likely in air - use ray casting
+ try:
+ # Unproject 2D point to 3D ray
+ point_2d_normalized = np.array([
+ (ball_x - cx) / fx,
+ (ball_y - cy) / fy,
+ 1.0
+ ])
+
+ # Ray direction in camera coordinates
+ ray_camera = point_2d_normalized / np.linalg.norm(point_2d_normalized)
+
+ # Transform ray to world coordinates
+ ray_world = rotation_matrix.T @ ray_camera
+
+ # Estimate distance using ball size
+ # Real pickleball diameter: 74mm = 0.074m
+ ball_diameter_real = 0.074 # meters
+ ball_diameter_pixels = ball_diameter if ball_diameter else 20 # fallback to 20px
+
+ # Distance formula: D = (real_size * focal_length) / pixel_size
+ distance = (ball_diameter_real * focal_length) / ball_diameter_pixels
+
+ # Compute 3D point along ray
+ ball_3d = camera_position.flatten() + ray_world * distance
+
+ x_3d, y_3d, z_3d = ball_3d
+
+ # Sanity checks
+ if z_3d < 0 or z_3d > 10: # Height should be 0-10m
+ # Invalid, fallback to ground projection
+ results.append({
+ "frame": i,
+ "x_m": float(x_ground),
+ "y_m": float(y_ground),
+ "z_m": 0.0,
+ "on_ground": True,
+ "confidence": float(confidence)
+ })
+ else:
+ results.append({
+ "frame": i,
+ "x_m": float(x_3d),
+ "y_m": float(y_3d),
+ "z_m": float(z_3d),
+ "on_ground": False,
+ "confidence": float(confidence)
+ })
+
+ except Exception as e:
+ context.log.warning(f"Frame {i}: Error computing 3D coords, using ground projection: {e}")
+ results.append({
+ "frame": i,
+ "x_m": float(x_ground),
+ "y_m": float(y_ground),
+ "z_m": 0.0,
+ "on_ground": True,
+ "confidence": float(confidence)
+ })
+
+ if (i + 1) % 20 == 0:
+ context.log.info(f"Processed {i + 1}/{len(detect_ball_positions)} frames")
+
+ # Statistics
+ detected_count = sum(1 for r in results if r['x_m'] is not None)
+ on_ground_count = sum(1 for r in results if r.get('on_ground', False))
+ in_air_count = detected_count - on_ground_count
+
+ context.log.info(f"✓ Computed 3D coordinates: {detected_count} detections")
+ context.log.info(f" On ground: {on_ground_count}, In air: {in_air_count}")
+
+ return results
diff --git a/dagster_project/assets/court_detection.py b/dagster_project/assets/court_detection.py
new file mode 100644
index 0000000..7923457
--- /dev/null
+++ b/dagster_project/assets/court_detection.py
@@ -0,0 +1,277 @@
+"""Asset 2: Detect court keypoints using Roboflow Hosted API"""
+
+import os
+import cv2
+import numpy as np
+from pathlib import Path
+from typing import Dict, List
+from dagster import asset, AssetExecutionContext
+from inference_sdk import InferenceHTTPClient
+
+
+@asset(
+ io_manager_key="json_io_manager",
+ compute_kind="roboflow",
+ description="Detect pickleball court corners using Roboflow keypoint detection model"
+)
+def detect_court_keypoints(
+ context: AssetExecutionContext,
+ extract_video_frames: Dict
+) -> Dict:
+ """
+ Detect court keypoints from first frame using Roboflow model
+
+ Inputs:
+ - extract_video_frames: metadata from frame extraction
+ - data/frames/frame_0000.jpg: first frame
+
+ Outputs:
+ - data/detect_court_keypoints.json
+
+ Returns:
+ Dict with:
+ - corners_pixel: list of 4 corner coordinates [[x,y], ...]
+ - court_width_m: court width in meters (6.1)
+ - court_length_m: court length in meters (13.4)
+ - keypoints: all detected keypoints
+ """
+ from inference import get_model
+
+ frames_dir = Path(extract_video_frames['frames_dir'])
+ first_frame_path = frames_dir / "frame_0000.jpg"
+
+ context.log.info(f"Loading first frame: {first_frame_path}")
+
+ if not first_frame_path.exists():
+ raise FileNotFoundError(f"First frame not found: {first_frame_path}")
+
+ # Load frame
+ frame = cv2.imread(str(first_frame_path))
+ h, w = frame.shape[:2]
+ context.log.info(f"Frame dimensions: {w}x{h}")
+
+ # Get API key
+ api_key = os.getenv("ROBOFLOW_API_KEY")
+ if not api_key:
+ context.log.warning("ROBOFLOW_API_KEY not set, using estimated corners")
+ corners = _estimate_court_corners(w, h)
+ else:
+ # Try to detect court using Roboflow Hosted API
+ try:
+ context.log.info("Detecting court using Roboflow Hosted API...")
+
+ client = InferenceHTTPClient(
+ api_url="https://serverless.roboflow.com",
+ api_key=api_key
+ )
+
+ result = client.infer(str(first_frame_path), model_id="pickleball-court-cfyv4/1")
+
+ # Extract keypoints from result
+ all_points = []
+ if result and 'predictions' in result and len(result['predictions']) > 0:
+ pred = result['predictions'][0]
+ if 'points' in pred and len(pred['points']) >= 4:
+ # Модель возвращает много points (линии корта)
+ all_points = [[p['x'], p['y']] for p in pred['points']]
+ context.log.info(f"✓ Detected {len(all_points)} keypoints from court lines")
+
+ # Находим 4 угла для калибровки (но не для визуализации)
+ corners = _extract_court_corners_from_points(all_points, w, h)
+ context.log.info(f"✓ Extracted 4 corners from keypoints")
+ else:
+ context.log.warning("No keypoints in prediction, using estimated corners")
+ corners = _estimate_court_corners(w, h)
+ else:
+ context.log.warning("No predictions from model, using estimated corners")
+ corners = _estimate_court_corners(w, h)
+
+ except Exception as e:
+ context.log.warning(f"Court detection failed: {e}. Using estimated corners.")
+ corners = _estimate_court_corners(w, h)
+
+ context.log.info(f"Court corners: {corners}")
+
+ # Save visualization - рисуем ВСЕ точки и линии от модели
+ vis_frame = frame.copy()
+
+ # Рисуем все точки от модели
+ if len(all_points) > 0:
+ context.log.info(f"Drawing {len(all_points)} keypoints on visualization")
+
+ # Рисуем все точки
+ for i, point in enumerate(all_points):
+ x, y = int(point[0]), int(point[1])
+ cv2.circle(vis_frame, (x, y), 5, (0, 255, 0), -1)
+ # Подписываем каждую точку номером
+ cv2.putText(
+ vis_frame,
+ str(i),
+ (x + 8, y),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ 0.4,
+ (255, 255, 0),
+ 1
+ )
+
+ # Соединяем все соседние точки линиями
+ for i in range(len(all_points) - 1):
+ p1 = tuple(map(int, all_points[i]))
+ p2 = tuple(map(int, all_points[i + 1]))
+ cv2.line(vis_frame, p1, p2, (0, 255, 0), 2)
+
+ # Save visualization with run_id
+ run_id = context.run_id
+ vis_path = Path(f"data/{run_id}/court_detection_preview.jpg")
+ cv2.imwrite(str(vis_path), vis_frame)
+ context.log.info(f"Saved court visualization to {vis_path}")
+
+ return {
+ "corners_pixel": corners,
+ "court_width_m": 6.1,
+ "court_length_m": 13.4,
+ "frame_width": w,
+ "frame_height": h
+ }
+
+
+def _estimate_court_corners(width: int, height: int) -> List[List[float]]:
+ """
+ Estimate court corners based on typical DJI camera position
+ (camera in corner at angle)
+
+ Returns corners in order: [TL, TR, BR, BL]
+ """
+ # Assume court takes up ~80% of frame with perspective
+ margin_x = width * 0.05
+ margin_y = height * 0.1
+
+ # Perspective: far edge narrower than near edge
+ return [
+ [margin_x + width * 0.1, margin_y], # Top-left (far)
+ [width - margin_x - width * 0.1, margin_y], # Top-right (far)
+ [width - margin_x, height - margin_y], # Bottom-right (near)
+ [margin_x, height - margin_y] # Bottom-left (near)
+ ]
+
+
+def _extract_court_corners_from_points(points: List[List[float]], width: int, height: int) -> List[List[float]]:
+ """
+ Extract 4 court corners from many detected points (court lines)
+
+ Strategy:
+ 1. Build convex hull from all points
+ 2. Classify hull points into 4 sides (left, right, top, bottom)
+ 3. Fit line for each side using linear regression
+ 4. Find 4 corners as intersections of fitted lines
+
+ This works even if one corner is not visible on frame (extrapolation)
+ """
+ if len(points) < 4:
+ return _estimate_court_corners(width, height)
+
+ # Build convex hull from all points
+ points_array = np.array(points, dtype=np.float32)
+ hull = cv2.convexHull(points_array)
+ hull_points = np.array([p[0] for p in hull], dtype=np.float32)
+
+ # Classify hull points into 4 sides
+ # Strategy: sort hull points by angle from centroid, then split into 4 groups
+ center = hull_points.mean(axis=0)
+
+ # Calculate angle for each point relative to center
+ angles = np.arctan2(hull_points[:, 1] - center[1], hull_points[:, 0] - center[0])
+
+ # Sort points by angle
+ sorted_indices = np.argsort(angles)
+ sorted_points = hull_points[sorted_indices]
+
+ # Split into 4 groups (4 sides)
+ n = len(sorted_points)
+ quarter = n // 4
+
+ side1 = sorted_points[0:quarter]
+ side2 = sorted_points[quarter:2*quarter]
+ side3 = sorted_points[2*quarter:3*quarter]
+ side4 = sorted_points[3*quarter:]
+
+ # Fit lines for each side using cv2.fitLine
+ def fit_line_coefficients(pts):
+ if len(pts) < 2:
+ return None
+ # cv2.fitLine returns (vx, vy, x0, y0) - direction vector and point on line
+ line = cv2.fitLine(pts, cv2.DIST_L2, 0, 0.01, 0.01)
+ vx, vy, x0, y0 = line[0][0], line[1][0], line[2][0], line[3][0]
+ # Convert to line equation: y = mx + b or vertical line x = c
+ if abs(vx) < 1e-6: # Vertical line
+ return ('vertical', x0)
+ m = vy / vx
+ b = y0 - m * x0
+ return ('normal', m, b)
+
+ line1 = fit_line_coefficients(side1)
+ line2 = fit_line_coefficients(side2)
+ line3 = fit_line_coefficients(side3)
+ line4 = fit_line_coefficients(side4)
+
+ lines = [line1, line2, line3, line4]
+
+ # Find intersections between adjacent sides
+ def line_intersection(line_a, line_b):
+ if line_a is None or line_b is None:
+ return None
+
+ # Handle vertical lines
+ if line_a[0] == 'vertical' and line_b[0] == 'vertical':
+ return None
+ elif line_a[0] == 'vertical':
+ x = line_a[1]
+ m2, b2 = line_b[1], line_b[2]
+ y = m2 * x + b2
+ return [float(x), float(y)]
+ elif line_b[0] == 'vertical':
+ x = line_b[1]
+ m1, b1 = line_a[1], line_a[2]
+ y = m1 * x + b1
+ return [float(x), float(y)]
+ else:
+ m1, b1 = line_a[1], line_a[2]
+ m2, b2 = line_b[1], line_b[2]
+
+ if abs(m1 - m2) < 1e-6: # Parallel lines
+ return None
+
+ x = (b2 - b1) / (m1 - m2)
+ y = m1 * x + b1
+ return [float(x), float(y)]
+
+ # Find 4 corners as intersections
+ corners = []
+ for i in range(4):
+ next_i = (i + 1) % 4
+ corner = line_intersection(lines[i], lines[next_i])
+ if corner:
+ corners.append(corner)
+
+ # If we got 4 corners, return them
+ if len(corners) == 4:
+ return corners
+
+ # Fallback: use convex hull extreme points
+ tl = hull_points[np.argmin(hull_points[:, 0] + hull_points[:, 1])].tolist()
+ tr = hull_points[np.argmax(hull_points[:, 0] - hull_points[:, 1])].tolist()
+ br = hull_points[np.argmax(hull_points[:, 0] + hull_points[:, 1])].tolist()
+ bl = hull_points[np.argmin(hull_points[:, 0] - hull_points[:, 1])].tolist()
+
+ return [tl, tr, br, bl]
+
+
+def _extract_court_corners(keypoints: List[Dict], width: int, height: int) -> List[List[float]]:
+ """
+ Extract 4 court corners from detected keypoints (old function for compatibility)
+ """
+ if len(keypoints) < 4:
+ return _estimate_court_corners(width, height)
+
+ points = [[kp['x'], kp['y']] for kp in keypoints]
+ return _extract_court_corners_from_points(points, width, height)
diff --git a/dagster_project/assets/interactive_viewer.py b/dagster_project/assets/interactive_viewer.py
new file mode 100644
index 0000000..449d5d5
--- /dev/null
+++ b/dagster_project/assets/interactive_viewer.py
@@ -0,0 +1,665 @@
+"""Asset: Create interactive HTML viewer for frame-by-frame ball tracking"""
+
+from pathlib import Path
+from typing import Dict, List
+from dagster import asset, AssetExecutionContext
+
+
+@asset(
+ io_manager_key="json_io_manager",
+ compute_kind="html",
+ description="Create interactive HTML viewer with frame + 3D court visualization"
+)
+def create_interactive_viewer(
+ context: AssetExecutionContext,
+ extract_video_frames: Dict,
+ compute_ball_3d_coordinates: List[Dict]
+) -> Dict:
+ """
+ Create interactive HTML viewer showing:
+ - Left: Original video frame
+ - Right: Interactive 3D court (Three.js - rotatable with mouse)
+ - Controls: Prev/Next buttons + slider
+
+ Outputs:
+ - data/{run_id}/viewer/index.html
+
+ Returns:
+ Dict with viewer_path
+ """
+ run_id = context.run_id
+ frames_dir = Path(extract_video_frames['frames_dir'])
+
+ # Create viewer directory
+ viewer_dir = Path(f"data/{run_id}/viewer")
+ viewer_dir.mkdir(parents=True, exist_ok=True)
+
+ # Copy frames to viewer directory
+ import shutil
+ viewer_frames_dir = viewer_dir / "frames"
+ viewer_frames_dir.mkdir(exist_ok=True)
+
+ # Filter frames with detections
+ frames_with_ball = [f for f in compute_ball_3d_coordinates if f['x_m'] is not None]
+
+ context.log.info(f"Creating viewer for {len(frames_with_ball)} frames with ball detections")
+
+ # Copy only frames with detections
+ for frame_data in frames_with_ball:
+ frame_num = frame_data['frame']
+ src = frames_dir / f"frame_{frame_num:04d}.jpg"
+ dst = viewer_frames_dir / f"frame_{frame_num:04d}.jpg"
+ if src.exists():
+ shutil.copy2(src, dst)
+
+ context.log.info(f"Copied {len(frames_with_ball)} frames to viewer directory")
+
+ # Generate HTML
+ html_content = _generate_html(frames_with_ball, run_id)
+
+ # Save HTML
+ html_path = viewer_dir / "index.html"
+ with open(html_path, 'w') as f:
+ f.write(html_content)
+
+ context.log.info(f"✓ Interactive viewer created: {html_path}")
+ context.log.info(f" Open in browser: file://{html_path.absolute()}")
+
+ return {
+ "viewer_path": str(html_path),
+ "num_frames": len(frames_with_ball)
+ }
+
+
+def _generate_html(frames_data: List[Dict], run_id: str) -> str:
+ """Generate HTML with Three.js for real 3D visualization"""
+
+ # Convert frames data to JSON
+ import json
+ frames_json = json.dumps(frames_data, indent=2)
+
+ html = f"""
+
+
+
+
+ Ball Tracking 3D Viewer - Run {run_id[:8]}
+
+
+
+
+
🎾 Pickleball Ball Tracking 3D Viewer
+
+
+
+
📹 Video Frame
+
![Video frame]()
+
+
+
+
🗺️ Interactive 3D Court (drag to rotate, scroll to zoom)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 💡 Controls:
+ ←/→ Navigate frames
+ Space Play/Pause
+ Mouse drag Rotate 3D view
+ Mouse wheel Zoom
+
+
+
+
+
+
+
+
+
+"""
+
+ return html
diff --git a/dagster_project/assets/net_detection.py b/dagster_project/assets/net_detection.py
new file mode 100644
index 0000000..39e043b
--- /dev/null
+++ b/dagster_project/assets/net_detection.py
@@ -0,0 +1,72 @@
+"""Asset: Detect tennis/pickleball net using Roboflow"""
+
+import os
+import cv2
+import numpy as np
+from pathlib import Path
+from typing import Dict
+from dagster import asset, AssetExecutionContext
+from inference_sdk import InferenceHTTPClient
+
+
+@asset(
+ io_manager_key="json_io_manager",
+ compute_kind="roboflow",
+ description="Detect pickleball/tennis net using Roboflow model"
+)
+def detect_net(
+ context: AssetExecutionContext,
+ extract_video_frames: Dict,
+ detect_court_keypoints: Dict
+) -> Dict:
+ """
+ Detect net on first frame using Roboflow model
+
+ NO FALLBACKS - if model doesn't detect net, this will fail
+
+ Inputs:
+ - extract_video_frames: frame metadata
+ - detect_court_keypoints: court corners (for visualization)
+
+ Outputs:
+ - data/{run_id}/net_detection_preview.jpg: visualization
+ - JSON with net detection results
+
+ Returns:
+ Dict with net detection data
+ """
+ run_id = context.run_id
+ frames_dir = Path(extract_video_frames['frames_dir'])
+ first_frame_path = frames_dir / "frame_0000.jpg"
+
+ context.log.info(f"Loading first frame: {first_frame_path}")
+
+ # Load frame
+ frame = cv2.imread(str(first_frame_path))
+ h, w = frame.shape[:2]
+ context.log.info(f"Frame dimensions: {w}x{h}")
+
+ # Get API key
+ api_key = os.getenv("ROBOFLOW_API_KEY")
+ if not api_key:
+ raise ValueError("ROBOFLOW_API_KEY environment variable is not set")
+
+ context.log.info("Detecting net using Roboflow model...")
+
+ client = InferenceHTTPClient(
+ api_url="https://serverless.roboflow.com",
+ api_key=api_key
+ )
+
+ # Call Roboflow model - MODEL_ID WILL BE PROVIDED BY USER
+ # Placeholder - user will provide correct model
+ model_id = "MODEL_ID_PLACEHOLDER"
+
+ result = client.infer(str(first_frame_path), model_id=model_id)
+
+ context.log.info(f"Roboflow response: {result}")
+
+ # TODO: Parse result based on actual model output format
+ # User will provide correct model and we'll update parsing logic
+
+ raise NotImplementedError("Waiting for correct Roboflow model from user")
diff --git a/dagster_project/assets/video_extraction.py b/dagster_project/assets/video_extraction.py
new file mode 100644
index 0000000..2f06de5
--- /dev/null
+++ b/dagster_project/assets/video_extraction.py
@@ -0,0 +1,83 @@
+"""Asset 1: Extract frames from video"""
+
+import cv2
+from pathlib import Path
+from typing import Dict
+from dagster import asset, AssetExecutionContext
+
+
+@asset(
+ io_manager_key="json_io_manager",
+ compute_kind="opencv",
+ description="Extract frames from video starting at specified second"
+)
+def extract_video_frames(context: AssetExecutionContext) -> Dict:
+ """
+ Extract frames from DJI_0017.MP4 video
+
+ Inputs:
+ - DJI_0017.MP4 (video file in root directory)
+
+ Outputs:
+ - data/frames/frame_XXXX.jpg (100 frames)
+ - data/extract_video_frames.json (metadata)
+
+ Returns:
+ Dict with:
+ - frames_dir: path to frames directory
+ - num_frames: number of extracted frames
+ - fps: video FPS
+ - start_frame: starting frame number
+ """
+ # Configuration
+ video_path = "DJI_0017.MP4"
+ start_sec = 10
+ num_frames = 100
+
+ context.log.info(f"Opening video: {video_path}")
+ cap = cv2.VideoCapture(video_path)
+
+ if not cap.isOpened():
+ raise RuntimeError(f"Could not open video: {video_path}")
+
+ fps = cap.get(cv2.CAP_PROP_FPS)
+ total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
+ start_frame = int(start_sec * fps)
+
+ context.log.info(f"Video info: {total_frames} frames, {fps} FPS")
+ context.log.info(f"Extracting {num_frames} frames starting from frame {start_frame} ({start_sec}s)")
+
+ # Create output directory with run_id
+ run_id = context.run_id
+ frames_dir = Path(f"data/{run_id}/frames")
+ frames_dir.mkdir(parents=True, exist_ok=True)
+
+ # Set starting position
+ cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
+
+ # Extract frames
+ extracted = 0
+ for i in range(num_frames):
+ ret, frame = cap.read()
+ if not ret:
+ context.log.warning(f"Could not read frame {i}. Stopping extraction.")
+ break
+
+ frame_path = frames_dir / f"frame_{i:04d}.jpg"
+ cv2.imwrite(str(frame_path), frame)
+ extracted += 1
+
+ if (i + 1) % 20 == 0:
+ context.log.info(f"Extracted {i + 1}/{num_frames} frames")
+
+ cap.release()
+
+ context.log.info(f"✓ Extracted {extracted} frames to {frames_dir}")
+
+ return {
+ "frames_dir": str(frames_dir),
+ "num_frames": extracted,
+ "fps": fps,
+ "start_frame": start_frame,
+ "start_sec": start_sec
+ }
diff --git a/dagster_project/assets/visualization.py b/dagster_project/assets/visualization.py
new file mode 100644
index 0000000..68b6847
--- /dev/null
+++ b/dagster_project/assets/visualization.py
@@ -0,0 +1,90 @@
+"""Asset: Draw court polygon with 4 corners"""
+
+import cv2
+import numpy as np
+from pathlib import Path
+from typing import Dict
+from dagster import asset, AssetExecutionContext
+
+
+@asset(
+ io_manager_key="json_io_manager",
+ compute_kind="opencv",
+ description="Draw court polygon with 4 corners on first frame"
+)
+def visualize_ball_on_court(
+ context: AssetExecutionContext,
+ extract_video_frames: Dict,
+ detect_court_keypoints: Dict
+) -> Dict:
+ """
+ Draw court polygon (4 corners) on first frame
+
+ Inputs:
+ - extract_video_frames: frame metadata
+ - detect_court_keypoints: 4 court corners with perspective
+
+ Outputs:
+ - One image: data/{run_id}/court_polygon.jpg
+
+ Returns:
+ Dict with:
+ - image_path: path to saved image
+ """
+ run_id = context.run_id
+ frames_dir = Path(extract_video_frames['frames_dir'])
+
+ # Load first frame
+ first_frame_path = frames_dir / "frame_0000.jpg"
+ context.log.info(f"Loading first frame: {first_frame_path}")
+
+ if not first_frame_path.exists():
+ raise FileNotFoundError(f"First frame not found: {first_frame_path}")
+
+ frame = cv2.imread(str(first_frame_path))
+ if frame is None:
+ raise RuntimeError(f"Failed to load frame: {first_frame_path}")
+
+ # Get court corners (4 points with perspective)
+ corners = detect_court_keypoints['corners_pixel']
+ court_polygon = np.array(corners, dtype=np.int32)
+
+ context.log.info(f"Drawing court polygon with 4 corners: {corners}")
+
+ # Draw court polygon (4 corners with perspective)
+ cv2.polylines(
+ frame,
+ [court_polygon],
+ isClosed=True,
+ color=(0, 255, 0), # Green
+ thickness=3
+ )
+
+ # Draw court corners as circles
+ for i, corner in enumerate(corners):
+ cv2.circle(
+ frame,
+ (int(corner[0]), int(corner[1])),
+ 8,
+ (0, 255, 255), # Yellow
+ -1
+ )
+ # Label corners
+ cv2.putText(
+ frame,
+ str(i),
+ (int(corner[0]) + 12, int(corner[1])),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ 0.6,
+ (0, 255, 255),
+ 2
+ )
+
+ # Save image
+ output_path = Path(f"data/{run_id}/court_polygon.jpg")
+ cv2.imwrite(str(output_path), frame)
+ context.log.info(f"✓ Saved court polygon visualization to {output_path}")
+
+ return {
+ "image_path": str(output_path)
+ }
diff --git a/dagster_project/io_managers/__init__.py b/dagster_project/io_managers/__init__.py
new file mode 100644
index 0000000..19951b8
--- /dev/null
+++ b/dagster_project/io_managers/__init__.py
@@ -0,0 +1 @@
+"""IO Managers for Dagster assets"""
diff --git a/dagster_project/io_managers/json_io_manager.py b/dagster_project/io_managers/json_io_manager.py
new file mode 100644
index 0000000..9cad2a2
--- /dev/null
+++ b/dagster_project/io_managers/json_io_manager.py
@@ -0,0 +1,70 @@
+"""JSON IO Manager for storing asset outputs as JSON files"""
+
+import json
+from pathlib import Path
+from typing import Any
+from dagster import IOManager, io_manager, OutputContext, InputContext
+
+
+class JSONIOManager(IOManager):
+ """IO Manager that stores outputs as JSON files in data/{run_id}/ directory"""
+
+ def __init__(self, base_path: str = "data"):
+ self.base_path = Path(base_path)
+ self.base_path.mkdir(parents=True, exist_ok=True)
+
+ def _get_path(self, context) -> Path:
+ """Get file path for asset with run_id subdirectory"""
+ asset_name = context.asset_key.path[-1]
+
+ # For InputContext, try upstream run_id first, fallback to finding latest
+ if isinstance(context, InputContext):
+ try:
+ run_id = context.upstream_output.run_id
+ except:
+ # If upstream run_id not available, find the most recent run directory
+ # that contains this asset (for partial re-runs)
+ run_dirs = sorted([d for d in self.base_path.iterdir() if d.is_dir()],
+ key=lambda d: d.stat().st_mtime, reverse=True)
+ for run_dir in run_dirs:
+ potential_path = run_dir / f"{asset_name}.json"
+ if potential_path.exists():
+ return potential_path
+ # If not found, use the latest run_dir
+ run_id = run_dirs[0].name if run_dirs else "unknown"
+ else:
+ run_id = context.run_id
+
+ # Create run-specific directory
+ run_dir = self.base_path / run_id
+ run_dir.mkdir(parents=True, exist_ok=True)
+
+ return run_dir / f"{asset_name}.json"
+
+ def handle_output(self, context: OutputContext, obj: Any):
+ """Save asset output to JSON file"""
+ file_path = self._get_path(context)
+
+ with open(file_path, 'w') as f:
+ json.dump(obj, f, indent=2)
+
+ context.log.info(f"Saved {context.asset_key.path[-1]} to {file_path}")
+
+ def load_input(self, context: InputContext) -> Any:
+ """Load asset input from JSON file"""
+ file_path = self._get_path(context)
+
+ if not file_path.exists():
+ raise FileNotFoundError(f"Asset output not found: {file_path}")
+
+ with open(file_path, 'r') as f:
+ obj = json.load(f)
+
+ context.log.info(f"Loaded {context.asset_key.path[-1]} from {file_path}")
+ return obj
+
+
+@io_manager
+def json_io_manager():
+ """Factory for JSON IO Manager"""
+ return JSONIOManager(base_path="data")
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_3d_heatmap.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_3d_heatmap.jpg
new file mode 100644
index 0000000..c3002e9
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_3d_heatmap.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0003.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0003.jpg
new file mode 100644
index 0000000..0e05244
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0003.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0004.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0004.jpg
new file mode 100644
index 0000000..79e1efd
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0004.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0006.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0006.jpg
new file mode 100644
index 0000000..51763a8
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0006.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0007.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0007.jpg
new file mode 100644
index 0000000..b4b5ef2
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0007.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0008.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0008.jpg
new file mode 100644
index 0000000..f292856
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0008.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0009.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0009.jpg
new file mode 100644
index 0000000..88caa36
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0009.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0010.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0010.jpg
new file mode 100644
index 0000000..4fe364a
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0010.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0011.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0011.jpg
new file mode 100644
index 0000000..73b4b8a
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0011.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0014.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0014.jpg
new file mode 100644
index 0000000..1c720bc
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0014.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0017.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0017.jpg
new file mode 100644
index 0000000..234b1a0
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0017.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0018.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0018.jpg
new file mode 100644
index 0000000..545b094
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0018.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0019.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0019.jpg
new file mode 100644
index 0000000..3116f4a
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0019.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0021.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0021.jpg
new file mode 100644
index 0000000..404a03a
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0021.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0022.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0022.jpg
new file mode 100644
index 0000000..cd4d557
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0022.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0023.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0023.jpg
new file mode 100644
index 0000000..464b3a7
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0023.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0029.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0029.jpg
new file mode 100644
index 0000000..4ed78d4
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0029.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0030.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0030.jpg
new file mode 100644
index 0000000..d00c9b8
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0030.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0042.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0042.jpg
new file mode 100644
index 0000000..60ab810
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0042.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0062.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0062.jpg
new file mode 100644
index 0000000..a75c5bf
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0062.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0065.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0065.jpg
new file mode 100644
index 0000000..5fae845
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0065.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0082.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0082.jpg
new file mode 100644
index 0000000..5d6b9c3
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0082.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0083.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0083.jpg
new file mode 100644
index 0000000..1a63d26
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0083.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0084.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0084.jpg
new file mode 100644
index 0000000..e15c3dc
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0084.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0085.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0085.jpg
new file mode 100644
index 0000000..e41d912
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0085.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0087.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0087.jpg
new file mode 100644
index 0000000..5257883
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0087.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0088.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0088.jpg
new file mode 100644
index 0000000..99c1409
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0088.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0092.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0092.jpg
new file mode 100644
index 0000000..9a6d551
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0092.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0093.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0093.jpg
new file mode 100644
index 0000000..cf2fdfd
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0093.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0096.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0096.jpg
new file mode 100644
index 0000000..e3df131
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0096.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0097.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0097.jpg
new file mode 100644
index 0000000..a2f740e
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0097.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0098.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0098.jpg
new file mode 100644
index 0000000..84d6b74
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_detections/detection_frame_0098.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_trajectory_side_view.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_trajectory_side_view.jpg
new file mode 100644
index 0000000..aabc1aa
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_trajectory_side_view.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_trajectory_top_view.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_trajectory_top_view.jpg
new file mode 100644
index 0000000..ba53920
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_trajectory_top_view.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/calibrate_camera_3d.json b/data/20602718-5870-4419-9fa3-3a067ff0ad00/calibrate_camera_3d.json
new file mode 100644
index 0000000..e686030
--- /dev/null
+++ b/data/20602718-5870-4419-9fa3-3a067ff0ad00/calibrate_camera_3d.json
@@ -0,0 +1,57 @@
+{
+ "camera_matrix": [
+ [
+ 1920.0,
+ 0.0,
+ 960.0
+ ],
+ [
+ 0.0,
+ 1920.0,
+ 540.0
+ ],
+ [
+ 0.0,
+ 0.0,
+ 1.0
+ ]
+ ],
+ "rotation_vector": [
+ 0.9480162063192903,
+ -1.0991197215888209,
+ 0.8109145675124131
+ ],
+ "translation_vector": [
+ 2.0009117491752173,
+ -1.3630239735893306,
+ 9.994446229895267
+ ],
+ "rotation_matrix": [
+ [
+ 0.26321344952133247,
+ -0.8971735762272555,
+ -0.35468049581373706
+ ],
+ [
+ 0.07416745228464969,
+ 0.3853747246273644,
+ -0.9197747064580476
+ ],
+ [
+ 0.9618824611212565,
+ 0.21579132451973237,
+ 0.16797688903338565
+ ]
+ ],
+ "reprojection_error": 200.16749572753906,
+ "focal_length": 1920.0,
+ "principal_point": [
+ 960.0,
+ 540.0
+ ],
+ "image_size": [
+ 1920,
+ 1080
+ ],
+ "calibrated": true
+}
\ No newline at end of file
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/compute_ball_3d_coordinates.json b/data/20602718-5870-4419-9fa3-3a067ff0ad00/compute_ball_3d_coordinates.json
new file mode 100644
index 0000000..14fc7cf
--- /dev/null
+++ b/data/20602718-5870-4419-9fa3-3a067ff0ad00/compute_ball_3d_coordinates.json
@@ -0,0 +1,802 @@
+[
+ {
+ "frame": 0,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 1,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 2,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 3,
+ "x_m": 10.044878959655762,
+ "y_m": 3.3315815925598145,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.4595355987548828
+ },
+ {
+ "frame": 4,
+ "x_m": 9.925751686096191,
+ "y_m": 3.282414674758911,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.7499630451202393
+ },
+ {
+ "frame": 5,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 6,
+ "x_m": 9.522378921508789,
+ "y_m": 3.081491708755493,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.4438209533691406
+ },
+ {
+ "frame": 7,
+ "x_m": 9.406031608581543,
+ "y_m": 3.0407073497772217,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.8662319779396057
+ },
+ {
+ "frame": 8,
+ "x_m": 9.371339797973633,
+ "y_m": 3.0464587211608887,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.9164504408836365
+ },
+ {
+ "frame": 9,
+ "x_m": 9.37229061126709,
+ "y_m": 3.072193145751953,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.9407913088798523
+ },
+ {
+ "frame": 10,
+ "x_m": 9.378125190734863,
+ "y_m": 3.1054039001464844,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.9483180642127991
+ },
+ {
+ "frame": 11,
+ "x_m": 9.478368759155273,
+ "y_m": 3.180798053741455,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.9082649350166321
+ },
+ {
+ "frame": 12,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 13,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 14,
+ "x_m": 9.910148620605469,
+ "y_m": 3.5509445667266846,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.667772114276886
+ },
+ {
+ "frame": 15,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 16,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 17,
+ "x_m": 8.93185043334961,
+ "y_m": 2.7611701488494873,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.5858595967292786
+ },
+ {
+ "frame": 18,
+ "x_m": 8.352518081665039,
+ "y_m": 2.2731122970581055,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.8277773857116699
+ },
+ {
+ "frame": 19,
+ "x_m": 7.649472713470459,
+ "y_m": 1.729779601097107,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.6525294780731201
+ },
+ {
+ "frame": 20,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 21,
+ "x_m": 6.449870586395264,
+ "y_m": 0.7403887510299683,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.9178251624107361
+ },
+ {
+ "frame": 22,
+ "x_m": 5.954407215118408,
+ "y_m": 0.2702276408672333,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.6851440668106079
+ },
+ {
+ "frame": 23,
+ "x_m": 5.351879596710205,
+ "y_m": -0.2799437344074249,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.8174329400062561
+ },
+ {
+ "frame": 24,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 25,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 26,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 27,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 28,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 29,
+ "x_m": 1.9875693942279895,
+ "y_m": -0.4494613476800051,
+ "z_m": 0.538501512841481,
+ "on_ground": false,
+ "confidence": 0.7200624942779541
+ },
+ {
+ "frame": 30,
+ "x_m": 3.134514534829931,
+ "y_m": -0.5216126547913007,
+ "z_m": 0.7916199045450409,
+ "on_ground": false,
+ "confidence": 0.4647325277328491
+ },
+ {
+ "frame": 31,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 32,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 33,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 34,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 35,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 36,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 37,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 38,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 39,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 40,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 41,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 42,
+ "x_m": 3.9051076612543536,
+ "y_m": -0.5035200640235366,
+ "z_m": 0.4195843244793487,
+ "on_ground": false,
+ "confidence": 0.4238705635070801
+ },
+ {
+ "frame": 43,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 44,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 45,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 46,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 47,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 48,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 49,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 50,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 51,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 52,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 53,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 54,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 55,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 56,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 57,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 58,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 59,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 60,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 61,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 62,
+ "x_m": 1.9072337782891502,
+ "y_m": -0.2804220005117505,
+ "z_m": 0.8963949565054601,
+ "on_ground": false,
+ "confidence": 0.46716606616973877
+ },
+ {
+ "frame": 63,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 64,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 65,
+ "x_m": 3.017917751921617,
+ "y_m": -0.062196873493954974,
+ "z_m": 1.3217371998457894,
+ "on_ground": false,
+ "confidence": 0.7788172364234924
+ },
+ {
+ "frame": 66,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 67,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 68,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 69,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 70,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 71,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 72,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 73,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 74,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 75,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 76,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 77,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 78,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 79,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 80,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 81,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 82,
+ "x_m": 5.462012767791748,
+ "y_m": 4.150640964508057,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.7371496558189392
+ },
+ {
+ "frame": 83,
+ "x_m": 6.035140037536621,
+ "y_m": 4.453850746154785,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.47047895193099976
+ },
+ {
+ "frame": 84,
+ "x_m": 6.361359596252441,
+ "y_m": 4.682921409606934,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.7571220993995667
+ },
+ {
+ "frame": 85,
+ "x_m": 5.944571495056152,
+ "y_m": 4.653173923492432,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.6384866237640381
+ },
+ {
+ "frame": 86,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 87,
+ "x_m": 5.069350242614746,
+ "y_m": 4.607361316680908,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.924823522567749
+ },
+ {
+ "frame": 88,
+ "x_m": 4.626520156860352,
+ "y_m": 4.583075046539307,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.6589019298553467
+ },
+ {
+ "frame": 89,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 90,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 91,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 92,
+ "x_m": 3.593766450881958,
+ "y_m": 4.720729351043701,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.616001307964325
+ },
+ {
+ "frame": 93,
+ "x_m": 3.4283807277679443,
+ "y_m": 4.76817512512207,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.7801673412322998
+ },
+ {
+ "frame": 94,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 95,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 96,
+ "x_m": 3.5402987003326416,
+ "y_m": 5.051088809967041,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.9100144505500793
+ },
+ {
+ "frame": 97,
+ "x_m": 3.6705820560455322,
+ "y_m": 5.15645694732666,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.9327566623687744
+ },
+ {
+ "frame": 98,
+ "x_m": 3.850410223007202,
+ "y_m": 5.273887634277344,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.7828439474105835
+ },
+ {
+ "frame": 99,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ }
+]
\ No newline at end of file
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/court_detection_preview.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/court_detection_preview.jpg
new file mode 100644
index 0000000..395571a
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/court_detection_preview.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/court_polygon.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/court_polygon.jpg
new file mode 100644
index 0000000..a98a927
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/court_polygon.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/create_interactive_viewer.json b/data/20602718-5870-4419-9fa3-3a067ff0ad00/create_interactive_viewer.json
new file mode 100644
index 0000000..caab0c9
--- /dev/null
+++ b/data/20602718-5870-4419-9fa3-3a067ff0ad00/create_interactive_viewer.json
@@ -0,0 +1,4 @@
+{
+ "viewer_path": "data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/index.html",
+ "num_frames": 31
+}
\ No newline at end of file
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/detect_ball_positions.json b/data/20602718-5870-4419-9fa3-3a067ff0ad00/detect_ball_positions.json
new file mode 100644
index 0000000..a10e339
--- /dev/null
+++ b/data/20602718-5870-4419-9fa3-3a067ff0ad00/detect_ball_positions.json
@@ -0,0 +1,957 @@
+[
+ {
+ "frame": 0,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 1,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 2,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 3,
+ "x": 1321.0,
+ "y": 425.0,
+ "confidence": 0.4595355987548828,
+ "diameter_px": 14.0,
+ "bbox": [
+ 1315.0,
+ 417.0,
+ 1327.0,
+ 433.0
+ ]
+ },
+ {
+ "frame": 4,
+ "x": 1319.0,
+ "y": 420.5,
+ "confidence": 0.7499630451202393,
+ "diameter_px": 14.5,
+ "bbox": [
+ 1313.0,
+ 412.0,
+ 1325.0,
+ 429.0
+ ]
+ },
+ {
+ "frame": 5,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 6,
+ "x": 1316.0,
+ "y": 404.0,
+ "confidence": 0.4438209533691406,
+ "diameter_px": 20.0,
+ "bbox": [
+ 1307.0,
+ 393.0,
+ 1325.0,
+ 415.0
+ ]
+ },
+ {
+ "frame": 7,
+ "x": 1313.5,
+ "y": 400.5,
+ "confidence": 0.8662319779396057,
+ "diameter_px": 19.0,
+ "bbox": [
+ 1305.0,
+ 390.0,
+ 1322.0,
+ 411.0
+ ]
+ },
+ {
+ "frame": 8,
+ "x": 1311.0,
+ "y": 400.5,
+ "confidence": 0.9164504408836365,
+ "diameter_px": 17.5,
+ "bbox": [
+ 1303.0,
+ 391.0,
+ 1319.0,
+ 410.0
+ ]
+ },
+ {
+ "frame": 9,
+ "x": 1308.5,
+ "y": 402.0,
+ "confidence": 0.9407913088798523,
+ "diameter_px": 17.5,
+ "bbox": [
+ 1300.0,
+ 393.0,
+ 1317.0,
+ 411.0
+ ]
+ },
+ {
+ "frame": 10,
+ "x": 1305.5,
+ "y": 404.0,
+ "confidence": 0.9483180642127991,
+ "diameter_px": 18.5,
+ "bbox": [
+ 1297.0,
+ 394.0,
+ 1314.0,
+ 414.0
+ ]
+ },
+ {
+ "frame": 11,
+ "x": 1303.5,
+ "y": 409.5,
+ "confidence": 0.9082649350166321,
+ "diameter_px": 16.0,
+ "bbox": [
+ 1296.0,
+ 401.0,
+ 1311.0,
+ 418.0
+ ]
+ },
+ {
+ "frame": 12,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 13,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 14,
+ "x": 1289.0,
+ "y": 438.5,
+ "confidence": 0.667772114276886,
+ "diameter_px": 17.5,
+ "bbox": [
+ 1282.0,
+ 428.0,
+ 1296.0,
+ 449.0
+ ]
+ },
+ {
+ "frame": 15,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 16,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 17,
+ "x": 1314.5,
+ "y": 381.0,
+ "confidence": 0.5858595967292786,
+ "diameter_px": 18.5,
+ "bbox": [
+ 1306.0,
+ 371.0,
+ 1323.0,
+ 391.0
+ ]
+ },
+ {
+ "frame": 18,
+ "x": 1328.0,
+ "y": 353.5,
+ "confidence": 0.8277773857116699,
+ "diameter_px": 21.5,
+ "bbox": [
+ 1318.0,
+ 342.0,
+ 1338.0,
+ 365.0
+ ]
+ },
+ {
+ "frame": 19,
+ "x": 1338.0,
+ "y": 327.5,
+ "confidence": 0.6525294780731201,
+ "diameter_px": 19.5,
+ "bbox": [
+ 1329.0,
+ 317.0,
+ 1347.0,
+ 338.0
+ ]
+ },
+ {
+ "frame": 20,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 21,
+ "x": 1355.5,
+ "y": 290.5,
+ "confidence": 0.9178251624107361,
+ "diameter_px": 16.0,
+ "bbox": [
+ 1348.0,
+ 282.0,
+ 1363.0,
+ 299.0
+ ]
+ },
+ {
+ "frame": 22,
+ "x": 1365.0,
+ "y": 276.5,
+ "confidence": 0.6851440668106079,
+ "diameter_px": 14.5,
+ "bbox": [
+ 1359.0,
+ 268.0,
+ 1371.0,
+ 285.0
+ ]
+ },
+ {
+ "frame": 23,
+ "x": 1374.0,
+ "y": 262.0,
+ "confidence": 0.8174329400062561,
+ "diameter_px": 19.0,
+ "bbox": [
+ 1365.0,
+ 252.0,
+ 1383.0,
+ 272.0
+ ]
+ },
+ {
+ "frame": 24,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 25,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 26,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 27,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 28,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 29,
+ "x": 1401.5,
+ "y": 236.0,
+ "confidence": 0.7200624942779541,
+ "diameter_px": 11.5,
+ "bbox": [
+ 1396.0,
+ 230.0,
+ 1407.0,
+ 242.0
+ ]
+ },
+ {
+ "frame": 30,
+ "x": 1404.0,
+ "y": 236.5,
+ "confidence": 0.4647325277328491,
+ "diameter_px": 10.5,
+ "bbox": [
+ 1399.0,
+ 231.0,
+ 1409.0,
+ 242.0
+ ]
+ },
+ {
+ "frame": 31,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 32,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 33,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 34,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 35,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 36,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 37,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 38,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 39,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 40,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 41,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 42,
+ "x": 1426.5,
+ "y": 308.5,
+ "confidence": 0.4238705635070801,
+ "diameter_px": 10.0,
+ "bbox": [
+ 1422.0,
+ 303.0,
+ 1431.0,
+ 314.0
+ ]
+ },
+ {
+ "frame": 43,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 44,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 45,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 46,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 47,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 48,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 49,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 50,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 51,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 52,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 53,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 54,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 55,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 56,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 57,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 58,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 59,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 60,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 61,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 62,
+ "x": 1352.5,
+ "y": 193.0,
+ "confidence": 0.46716606616973877,
+ "diameter_px": 11.5,
+ "bbox": [
+ 1347.0,
+ 187.0,
+ 1358.0,
+ 199.0
+ ]
+ },
+ {
+ "frame": 63,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 64,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 65,
+ "x": 1309.0,
+ "y": 191.5,
+ "confidence": 0.7788172364234924,
+ "diameter_px": 10.5,
+ "bbox": [
+ 1304.0,
+ 186.0,
+ 1314.0,
+ 197.0
+ ]
+ },
+ {
+ "frame": 66,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 67,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 68,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 69,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 70,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 71,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 72,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 73,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 74,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 75,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 76,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 77,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 78,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 79,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 80,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 81,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 82,
+ "x": 957.0,
+ "y": 429.0,
+ "confidence": 0.7371496558189392,
+ "diameter_px": 24.0,
+ "bbox": [
+ 946.0,
+ 416.0,
+ 968.0,
+ 442.0
+ ]
+ },
+ {
+ "frame": 83,
+ "x": 932.0,
+ "y": 458.0,
+ "confidence": 0.47047895193099976,
+ "diameter_px": 22.0,
+ "bbox": [
+ 922.0,
+ 446.0,
+ 942.0,
+ 470.0
+ ]
+ },
+ {
+ "frame": 84,
+ "x": 904.5,
+ "y": 481.5,
+ "confidence": 0.7571220993995667,
+ "diameter_px": 15.0,
+ "bbox": [
+ 898.0,
+ 473.0,
+ 911.0,
+ 490.0
+ ]
+ },
+ {
+ "frame": 85,
+ "x": 888.0,
+ "y": 473.0,
+ "confidence": 0.6384866237640381,
+ "diameter_px": 17.0,
+ "bbox": [
+ 880.0,
+ 464.0,
+ 896.0,
+ 482.0
+ ]
+ },
+ {
+ "frame": 86,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 87,
+ "x": 852.0,
+ "y": 457.5,
+ "confidence": 0.924823522567749,
+ "diameter_px": 17.5,
+ "bbox": [
+ 844.0,
+ 448.0,
+ 860.0,
+ 467.0
+ ]
+ },
+ {
+ "frame": 88,
+ "x": 835.0,
+ "y": 450.0,
+ "confidence": 0.6589019298553467,
+ "diameter_px": 19.0,
+ "bbox": [
+ 826.0,
+ 440.0,
+ 844.0,
+ 460.0
+ ]
+ },
+ {
+ "frame": 89,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 90,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 91,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 92,
+ "x": 757.0,
+ "y": 447.5,
+ "confidence": 0.616001307964325,
+ "diameter_px": 20.5,
+ "bbox": [
+ 747.0,
+ 437.0,
+ 767.0,
+ 458.0
+ ]
+ },
+ {
+ "frame": 93,
+ "x": 739.0,
+ "y": 449.0,
+ "confidence": 0.7801673412322998,
+ "diameter_px": 20.0,
+ "bbox": [
+ 729.0,
+ 439.0,
+ 749.0,
+ 459.0
+ ]
+ },
+ {
+ "frame": 94,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 95,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ },
+ {
+ "frame": 96,
+ "x": 678.0,
+ "y": 473.0,
+ "confidence": 0.9100144505500793,
+ "diameter_px": 22.0,
+ "bbox": [
+ 667.0,
+ 462.0,
+ 689.0,
+ 484.0
+ ]
+ },
+ {
+ "frame": 97,
+ "x": 657.5,
+ "y": 484.0,
+ "confidence": 0.9327566623687744,
+ "diameter_px": 18.5,
+ "bbox": [
+ 649.0,
+ 474.0,
+ 666.0,
+ 494.0
+ ]
+ },
+ {
+ "frame": 98,
+ "x": 635.0,
+ "y": 497.5,
+ "confidence": 0.7828439474105835,
+ "diameter_px": 18.5,
+ "bbox": [
+ 626.0,
+ 488.0,
+ 644.0,
+ 507.0
+ ]
+ },
+ {
+ "frame": 99,
+ "x": null,
+ "y": null,
+ "confidence": 0.0,
+ "diameter_px": null,
+ "bbox": null
+ }
+]
\ No newline at end of file
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/detect_court_keypoints.json b/data/20602718-5870-4419-9fa3-3a067ff0ad00/detect_court_keypoints.json
new file mode 100644
index 0000000..aac4955
--- /dev/null
+++ b/data/20602718-5870-4419-9fa3-3a067ff0ad00/detect_court_keypoints.json
@@ -0,0 +1,24 @@
+{
+ "corners_pixel": [
+ [
+ 1185.6519775390625,
+ 249.94744873046875
+ ],
+ [
+ 1687.109375,
+ 302.2617492675781
+ ],
+ [
+ 1108.75732421875,
+ 962.1505126953125
+ ],
+ [
+ 210.10595703125,
+ 516.1638793945312
+ ]
+ ],
+ "court_width_m": 6.1,
+ "court_length_m": 13.4,
+ "frame_width": 1920,
+ "frame_height": 1080
+}
\ No newline at end of file
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/detect_net.json b/data/20602718-5870-4419-9fa3-3a067ff0ad00/detect_net.json
new file mode 100644
index 0000000..0b2ede8
--- /dev/null
+++ b/data/20602718-5870-4419-9fa3-3a067ff0ad00/detect_net.json
@@ -0,0 +1,24 @@
+{
+ "net_corners_pixel": [
+ [
+ 960,
+ 270
+ ],
+ [
+ 970,
+ 270
+ ],
+ [
+ 970,
+ 810
+ ],
+ [
+ 960,
+ 810
+ ]
+ ],
+ "net_height_m": 0.914,
+ "detection_confidence": 0.5,
+ "frame_width": 1920,
+ "frame_height": 1080
+}
\ No newline at end of file
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/extract_video_frames.json b/data/20602718-5870-4419-9fa3-3a067ff0ad00/extract_video_frames.json
new file mode 100644
index 0000000..63b4c91
--- /dev/null
+++ b/data/20602718-5870-4419-9fa3-3a067ff0ad00/extract_video_frames.json
@@ -0,0 +1,7 @@
+{
+ "frames_dir": "data/20602718-5870-4419-9fa3-3a067ff0ad00/frames",
+ "num_frames": 100,
+ "fps": 29.97002997002997,
+ "start_frame": 299,
+ "start_sec": 10
+}
\ No newline at end of file
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0000.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0000.jpg
new file mode 100644
index 0000000..03070f0
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0000.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0001.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0001.jpg
new file mode 100644
index 0000000..702b519
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0001.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0002.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0002.jpg
new file mode 100644
index 0000000..f2adf3d
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0002.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0003.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0003.jpg
new file mode 100644
index 0000000..d866005
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0003.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0004.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0004.jpg
new file mode 100644
index 0000000..badfd77
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0004.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0005.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0005.jpg
new file mode 100644
index 0000000..27b6092
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0005.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0006.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0006.jpg
new file mode 100644
index 0000000..9526b7c
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0006.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0007.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0007.jpg
new file mode 100644
index 0000000..cb5c570
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0007.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0008.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0008.jpg
new file mode 100644
index 0000000..5cdea20
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0008.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0009.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0009.jpg
new file mode 100644
index 0000000..2bd3598
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0009.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0010.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0010.jpg
new file mode 100644
index 0000000..d8aa3d6
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0010.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0011.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0011.jpg
new file mode 100644
index 0000000..ec027fb
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0011.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0012.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0012.jpg
new file mode 100644
index 0000000..3bf96c5
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0012.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0013.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0013.jpg
new file mode 100644
index 0000000..1f8e3d7
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0013.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0014.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0014.jpg
new file mode 100644
index 0000000..10fcfd5
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0014.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0015.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0015.jpg
new file mode 100644
index 0000000..0fc64cf
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0015.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0016.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0016.jpg
new file mode 100644
index 0000000..fa6fe8d
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0016.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0017.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0017.jpg
new file mode 100644
index 0000000..81b2bca
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0017.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0018.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0018.jpg
new file mode 100644
index 0000000..57b37a1
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0018.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0019.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0019.jpg
new file mode 100644
index 0000000..fa548b4
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0019.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0020.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0020.jpg
new file mode 100644
index 0000000..16cca9d
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0020.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0021.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0021.jpg
new file mode 100644
index 0000000..16833da
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0021.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0022.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0022.jpg
new file mode 100644
index 0000000..c6a6dc4
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0022.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0023.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0023.jpg
new file mode 100644
index 0000000..8facc8f
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0023.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0024.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0024.jpg
new file mode 100644
index 0000000..6d33903
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0024.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0025.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0025.jpg
new file mode 100644
index 0000000..6454415
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0025.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0026.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0026.jpg
new file mode 100644
index 0000000..46b4697
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0026.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0027.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0027.jpg
new file mode 100644
index 0000000..37cf46b
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0027.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0028.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0028.jpg
new file mode 100644
index 0000000..b90f8f2
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0028.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0029.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0029.jpg
new file mode 100644
index 0000000..a967367
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0029.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0030.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0030.jpg
new file mode 100644
index 0000000..7b142f6
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0030.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0031.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0031.jpg
new file mode 100644
index 0000000..a90cc62
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0031.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0032.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0032.jpg
new file mode 100644
index 0000000..324b72e
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0032.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0033.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0033.jpg
new file mode 100644
index 0000000..0bbc860
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0033.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0034.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0034.jpg
new file mode 100644
index 0000000..ad66725
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0034.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0035.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0035.jpg
new file mode 100644
index 0000000..d8ed392
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0035.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0036.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0036.jpg
new file mode 100644
index 0000000..e1db866
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0036.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0037.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0037.jpg
new file mode 100644
index 0000000..f63b4ca
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0037.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0038.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0038.jpg
new file mode 100644
index 0000000..3ab92e3
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0038.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0039.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0039.jpg
new file mode 100644
index 0000000..958e525
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0039.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0040.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0040.jpg
new file mode 100644
index 0000000..dc88729
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0040.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0041.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0041.jpg
new file mode 100644
index 0000000..9a255b9
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0041.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0042.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0042.jpg
new file mode 100644
index 0000000..0fa4d18
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0042.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0043.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0043.jpg
new file mode 100644
index 0000000..ee37dda
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0043.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0044.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0044.jpg
new file mode 100644
index 0000000..7e0bd98
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0044.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0045.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0045.jpg
new file mode 100644
index 0000000..97148aa
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0045.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0046.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0046.jpg
new file mode 100644
index 0000000..77eb923
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0046.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0047.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0047.jpg
new file mode 100644
index 0000000..e1ff02e
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0047.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0048.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0048.jpg
new file mode 100644
index 0000000..728f451
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0048.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0049.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0049.jpg
new file mode 100644
index 0000000..022a404
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0049.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0050.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0050.jpg
new file mode 100644
index 0000000..08b0519
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0050.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0051.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0051.jpg
new file mode 100644
index 0000000..7a77ab0
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0051.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0052.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0052.jpg
new file mode 100644
index 0000000..e3a7f52
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0052.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0053.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0053.jpg
new file mode 100644
index 0000000..4b30b6f
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0053.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0054.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0054.jpg
new file mode 100644
index 0000000..0d12410
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0054.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0055.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0055.jpg
new file mode 100644
index 0000000..7732560
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0055.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0056.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0056.jpg
new file mode 100644
index 0000000..2d1d396
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0056.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0057.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0057.jpg
new file mode 100644
index 0000000..a897c46
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0057.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0058.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0058.jpg
new file mode 100644
index 0000000..55db62e
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0058.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0059.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0059.jpg
new file mode 100644
index 0000000..557f7a7
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0059.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0060.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0060.jpg
new file mode 100644
index 0000000..c6cbfc6
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0060.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0061.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0061.jpg
new file mode 100644
index 0000000..f43db73
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0061.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0062.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0062.jpg
new file mode 100644
index 0000000..bbc298a
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0062.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0063.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0063.jpg
new file mode 100644
index 0000000..3508df2
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0063.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0064.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0064.jpg
new file mode 100644
index 0000000..7899ea3
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0064.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0065.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0065.jpg
new file mode 100644
index 0000000..0f4d817
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0065.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0066.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0066.jpg
new file mode 100644
index 0000000..ad605ae
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0066.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0067.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0067.jpg
new file mode 100644
index 0000000..5689d9f
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0067.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0068.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0068.jpg
new file mode 100644
index 0000000..73c7811
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0068.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0069.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0069.jpg
new file mode 100644
index 0000000..df5cdc6
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0069.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0070.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0070.jpg
new file mode 100644
index 0000000..f7b4277
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0070.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0071.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0071.jpg
new file mode 100644
index 0000000..4dfa24f
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0071.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0072.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0072.jpg
new file mode 100644
index 0000000..2d791fa
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0072.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0073.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0073.jpg
new file mode 100644
index 0000000..dd7ef7d
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0073.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0074.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0074.jpg
new file mode 100644
index 0000000..396ba5e
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0074.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0075.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0075.jpg
new file mode 100644
index 0000000..63955d6
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0075.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0076.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0076.jpg
new file mode 100644
index 0000000..2a58caf
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0076.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0077.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0077.jpg
new file mode 100644
index 0000000..f5eb212
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0077.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0078.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0078.jpg
new file mode 100644
index 0000000..7aff7ef
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0078.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0079.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0079.jpg
new file mode 100644
index 0000000..dc81d6d
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0079.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0080.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0080.jpg
new file mode 100644
index 0000000..a46bec1
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0080.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0081.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0081.jpg
new file mode 100644
index 0000000..f513814
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0081.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0082.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0082.jpg
new file mode 100644
index 0000000..0fcd75e
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0082.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0083.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0083.jpg
new file mode 100644
index 0000000..549e3de
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0083.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0084.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0084.jpg
new file mode 100644
index 0000000..5c2feae
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0084.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0085.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0085.jpg
new file mode 100644
index 0000000..5fdb045
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0085.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0086.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0086.jpg
new file mode 100644
index 0000000..48eae8f
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0086.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0087.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0087.jpg
new file mode 100644
index 0000000..9a76b86
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0087.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0088.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0088.jpg
new file mode 100644
index 0000000..317bd3c
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0088.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0089.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0089.jpg
new file mode 100644
index 0000000..7a8f392
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0089.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0090.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0090.jpg
new file mode 100644
index 0000000..ff6b777
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0090.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0091.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0091.jpg
new file mode 100644
index 0000000..698531b
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0091.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0092.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0092.jpg
new file mode 100644
index 0000000..6c33a6a
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0092.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0093.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0093.jpg
new file mode 100644
index 0000000..d297f69
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0093.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0094.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0094.jpg
new file mode 100644
index 0000000..adee3b9
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0094.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0095.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0095.jpg
new file mode 100644
index 0000000..e54a452
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0095.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0096.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0096.jpg
new file mode 100644
index 0000000..76ef7b5
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0096.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0097.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0097.jpg
new file mode 100644
index 0000000..63c3512
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0097.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0098.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0098.jpg
new file mode 100644
index 0000000..076531c
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0098.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0099.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0099.jpg
new file mode 100644
index 0000000..e90357e
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/frames/frame_0099.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/net_detection_preview.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/net_detection_preview.jpg
new file mode 100644
index 0000000..c2a0e27
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/net_detection_preview.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0003.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0003.jpg
new file mode 100644
index 0000000..d866005
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0003.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0004.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0004.jpg
new file mode 100644
index 0000000..badfd77
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0004.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0006.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0006.jpg
new file mode 100644
index 0000000..9526b7c
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0006.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0007.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0007.jpg
new file mode 100644
index 0000000..cb5c570
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0007.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0008.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0008.jpg
new file mode 100644
index 0000000..5cdea20
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0008.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0009.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0009.jpg
new file mode 100644
index 0000000..2bd3598
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0009.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0010.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0010.jpg
new file mode 100644
index 0000000..d8aa3d6
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0010.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0011.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0011.jpg
new file mode 100644
index 0000000..ec027fb
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0011.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0014.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0014.jpg
new file mode 100644
index 0000000..10fcfd5
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0014.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0017.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0017.jpg
new file mode 100644
index 0000000..81b2bca
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0017.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0018.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0018.jpg
new file mode 100644
index 0000000..57b37a1
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0018.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0019.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0019.jpg
new file mode 100644
index 0000000..fa548b4
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0019.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0021.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0021.jpg
new file mode 100644
index 0000000..16833da
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0021.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0022.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0022.jpg
new file mode 100644
index 0000000..c6a6dc4
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0022.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0023.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0023.jpg
new file mode 100644
index 0000000..8facc8f
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0023.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0029.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0029.jpg
new file mode 100644
index 0000000..a967367
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0029.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0030.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0030.jpg
new file mode 100644
index 0000000..7b142f6
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0030.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0042.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0042.jpg
new file mode 100644
index 0000000..0fa4d18
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0042.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0062.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0062.jpg
new file mode 100644
index 0000000..bbc298a
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0062.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0065.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0065.jpg
new file mode 100644
index 0000000..0f4d817
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0065.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0082.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0082.jpg
new file mode 100644
index 0000000..0fcd75e
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0082.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0083.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0083.jpg
new file mode 100644
index 0000000..549e3de
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0083.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0084.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0084.jpg
new file mode 100644
index 0000000..5c2feae
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0084.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0085.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0085.jpg
new file mode 100644
index 0000000..5fdb045
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0085.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0087.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0087.jpg
new file mode 100644
index 0000000..9a76b86
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0087.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0088.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0088.jpg
new file mode 100644
index 0000000..317bd3c
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0088.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0092.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0092.jpg
new file mode 100644
index 0000000..6c33a6a
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0092.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0093.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0093.jpg
new file mode 100644
index 0000000..d297f69
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0093.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0096.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0096.jpg
new file mode 100644
index 0000000..76ef7b5
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0096.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0097.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0097.jpg
new file mode 100644
index 0000000..63c3512
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0097.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0098.jpg b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0098.jpg
new file mode 100644
index 0000000..076531c
Binary files /dev/null and b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/frames/frame_0098.jpg differ
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/index.html b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/index.html
new file mode 100644
index 0000000..2b80a5d
--- /dev/null
+++ b/data/20602718-5870-4419-9fa3-3a067ff0ad00/viewer/index.html
@@ -0,0 +1,831 @@
+
+
+
+
+
+ Ball Tracking 3D Viewer - Run 20602718
+
+
+
+
+
🎾 Pickleball Ball Tracking 3D Viewer
+
+
+
+
📹 Video Frame
+
![Video frame]()
+
+
+
+
🗺️ Interactive 3D Court (drag to rotate, scroll to zoom)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 💡 Controls:
+ ←/→ Navigate frames
+ Space Play/Pause
+ Mouse drag Rotate 3D view
+ Mouse wheel Zoom
+
+
+
+
+
+
+
+
+
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/visualize_ball_3d_trajectory.json b/data/20602718-5870-4419-9fa3-3a067ff0ad00/visualize_ball_3d_trajectory.json
new file mode 100644
index 0000000..65e58d3
--- /dev/null
+++ b/data/20602718-5870-4419-9fa3-3a067ff0ad00/visualize_ball_3d_trajectory.json
@@ -0,0 +1,6 @@
+{
+ "top_view_path": "data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_trajectory_top_view.jpg",
+ "side_view_path": "data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_trajectory_side_view.jpg",
+ "heatmap_path": "data/20602718-5870-4419-9fa3-3a067ff0ad00/ball_3d_heatmap.jpg",
+ "num_positions": 31
+}
\ No newline at end of file
diff --git a/data/20602718-5870-4419-9fa3-3a067ff0ad00/visualize_ball_on_court.json b/data/20602718-5870-4419-9fa3-3a067ff0ad00/visualize_ball_on_court.json
new file mode 100644
index 0000000..246f055
--- /dev/null
+++ b/data/20602718-5870-4419-9fa3-3a067ff0ad00/visualize_ball_on_court.json
@@ -0,0 +1,3 @@
+{
+ "image_path": "data/20602718-5870-4419-9fa3-3a067ff0ad00/court_polygon.jpg"
+}
\ No newline at end of file
diff --git a/data/466a4386-53ba-4744-9570-bf8b623c2479/detect_net.json b/data/466a4386-53ba-4744-9570-bf8b623c2479/detect_net.json
new file mode 100644
index 0000000..0937350
--- /dev/null
+++ b/data/466a4386-53ba-4744-9570-bf8b623c2479/detect_net.json
@@ -0,0 +1,16 @@
+{
+ "net_corners_pixel": [
+ [
+ 697.8789672851562,
+ 383.0556640625
+ ],
+ [
+ 1397.933349609375,
+ 632.2061309814453
+ ]
+ ],
+ "net_height_m": 0.914,
+ "detection_confidence": 1.0,
+ "frame_width": 1920,
+ "frame_height": 1080
+}
\ No newline at end of file
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/calibrate_camera_3d.json b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/calibrate_camera_3d.json
new file mode 100644
index 0000000..0f21bcb
--- /dev/null
+++ b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/calibrate_camera_3d.json
@@ -0,0 +1,57 @@
+{
+ "camera_matrix": [
+ [
+ 1920.0,
+ 0.0,
+ 960.0
+ ],
+ [
+ 0.0,
+ 1920.0,
+ 540.0
+ ],
+ [
+ 0.0,
+ 0.0,
+ 1.0
+ ]
+ ],
+ "rotation_vector": [
+ -0.7801686952693866,
+ 0.8941439646078098,
+ 0.6741747541455946
+ ],
+ "translation_vector": [
+ -1.1214146556912021,
+ -3.8975183875481156,
+ 24.026959814106053
+ ],
+ "rotation_matrix": [
+ [
+ 0.46447638978533573,
+ -0.7814338577335282,
+ 0.41668070428048015
+ ],
+ [
+ 0.18562725756007864,
+ 0.5459705583002341,
+ 0.8169814384183126
+ ],
+ [
+ -0.8659123538688573,
+ -0.30212129262438064,
+ 0.3986458578240444
+ ]
+ ],
+ "reprojection_error": 124.420654296875,
+ "focal_length": 1920.0,
+ "principal_point": [
+ 960.0,
+ 540.0
+ ],
+ "image_size": [
+ 1920,
+ 1080
+ ],
+ "calibrated": true
+}
\ No newline at end of file
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/compute_ball_3d_coordinates.json b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/compute_ball_3d_coordinates.json
new file mode 100644
index 0000000..092500c
--- /dev/null
+++ b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/compute_ball_3d_coordinates.json
@@ -0,0 +1,802 @@
+[
+ {
+ "frame": 0,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 1,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 2,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 3,
+ "x_m": 10.044878959655762,
+ "y_m": 3.3315815925598145,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.4595355987548828
+ },
+ {
+ "frame": 4,
+ "x_m": 9.925751686096191,
+ "y_m": 3.282414674758911,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.7499630451202393
+ },
+ {
+ "frame": 5,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 6,
+ "x_m": 9.522378921508789,
+ "y_m": 3.081491708755493,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.4438209533691406
+ },
+ {
+ "frame": 7,
+ "x_m": 9.406031608581543,
+ "y_m": 3.0407073497772217,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.8662319779396057
+ },
+ {
+ "frame": 8,
+ "x_m": 9.371339797973633,
+ "y_m": 3.0464587211608887,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.9164504408836365
+ },
+ {
+ "frame": 9,
+ "x_m": 9.37229061126709,
+ "y_m": 3.072193145751953,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.9407913088798523
+ },
+ {
+ "frame": 10,
+ "x_m": 9.378125190734863,
+ "y_m": 3.1054039001464844,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.9483180642127991
+ },
+ {
+ "frame": 11,
+ "x_m": 9.478368759155273,
+ "y_m": 3.180798053741455,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.9082649350166321
+ },
+ {
+ "frame": 12,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 13,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 14,
+ "x_m": 9.910148620605469,
+ "y_m": 3.5509445667266846,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.667772114276886
+ },
+ {
+ "frame": 15,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 16,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 17,
+ "x_m": 8.93185043334961,
+ "y_m": 2.7611701488494873,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.5858595967292786
+ },
+ {
+ "frame": 18,
+ "x_m": 8.352518081665039,
+ "y_m": 2.2731122970581055,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.8277773857116699
+ },
+ {
+ "frame": 19,
+ "x_m": 7.649472713470459,
+ "y_m": 1.729779601097107,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.6525294780731201
+ },
+ {
+ "frame": 20,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 21,
+ "x_m": 6.449870586395264,
+ "y_m": 0.7403887510299683,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.9178251624107361
+ },
+ {
+ "frame": 22,
+ "x_m": 5.954407215118408,
+ "y_m": 0.2702276408672333,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.6851440668106079
+ },
+ {
+ "frame": 23,
+ "x_m": 5.351879596710205,
+ "y_m": -0.2799437344074249,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.8174329400062561
+ },
+ {
+ "frame": 24,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 25,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 26,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 27,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 28,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 29,
+ "x_m": 4.331277370452881,
+ "y_m": -1.5349446535110474,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.7200624942779541
+ },
+ {
+ "frame": 30,
+ "x_m": 4.433146953582764,
+ "y_m": -1.5207486152648926,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.4647325277328491
+ },
+ {
+ "frame": 31,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 32,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 33,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 34,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 35,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 36,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 37,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 38,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 39,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 40,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 41,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 42,
+ "x_m": 8.577290534973145,
+ "y_m": 1.0078997611999512,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.4238705635070801
+ },
+ {
+ "frame": 43,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 44,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 45,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 46,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 47,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 48,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 49,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 50,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 51,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 52,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 53,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 54,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 55,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 56,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 57,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 58,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 59,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 60,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 61,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 62,
+ "x_m": -1.4370819330215454,
+ "y_m": -4.089122772216797,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.46716606616973877
+ },
+ {
+ "frame": 63,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 64,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 65,
+ "x_m": -3.1526687145233154,
+ "y_m": -3.9628825187683105,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.7788172364234924
+ },
+ {
+ "frame": 66,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 67,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 68,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 69,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 70,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 71,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 72,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 73,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 74,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 75,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 76,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 77,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 78,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 79,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 80,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 81,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 82,
+ "x_m": 5.462012767791748,
+ "y_m": 4.150640964508057,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.7371496558189392
+ },
+ {
+ "frame": 83,
+ "x_m": 6.035140037536621,
+ "y_m": 4.453850746154785,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.47047895193099976
+ },
+ {
+ "frame": 84,
+ "x_m": 6.361359596252441,
+ "y_m": 4.682921409606934,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.7571220993995667
+ },
+ {
+ "frame": 85,
+ "x_m": 5.944571495056152,
+ "y_m": 4.653173923492432,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.6384866237640381
+ },
+ {
+ "frame": 86,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 87,
+ "x_m": 5.069350242614746,
+ "y_m": 4.607361316680908,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.924823522567749
+ },
+ {
+ "frame": 88,
+ "x_m": 4.626520156860352,
+ "y_m": 4.583075046539307,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.6589019298553467
+ },
+ {
+ "frame": 89,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 90,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 91,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 92,
+ "x_m": 3.593766450881958,
+ "y_m": 4.720729351043701,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.616001307964325
+ },
+ {
+ "frame": 93,
+ "x_m": 3.4283807277679443,
+ "y_m": 4.76817512512207,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.7801673412322998
+ },
+ {
+ "frame": 94,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 95,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ },
+ {
+ "frame": 96,
+ "x_m": 3.5402987003326416,
+ "y_m": 5.051088809967041,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.9100144505500793
+ },
+ {
+ "frame": 97,
+ "x_m": 3.6705820560455322,
+ "y_m": 5.15645694732666,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.9327566623687744
+ },
+ {
+ "frame": 98,
+ "x_m": 3.850410223007202,
+ "y_m": 5.273887634277344,
+ "z_m": 0.0,
+ "on_ground": true,
+ "confidence": 0.7828439474105835
+ },
+ {
+ "frame": 99,
+ "x_m": null,
+ "y_m": null,
+ "z_m": null,
+ "on_ground": false,
+ "confidence": 0.0
+ }
+]
\ No newline at end of file
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/create_interactive_viewer.json b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/create_interactive_viewer.json
new file mode 100644
index 0000000..fcd9722
--- /dev/null
+++ b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/create_interactive_viewer.json
@@ -0,0 +1,4 @@
+{
+ "viewer_path": "data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/index.html",
+ "num_frames": 31
+}
\ No newline at end of file
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/detect_net.json b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/detect_net.json
new file mode 100644
index 0000000..0937350
--- /dev/null
+++ b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/detect_net.json
@@ -0,0 +1,16 @@
+{
+ "net_corners_pixel": [
+ [
+ 697.8789672851562,
+ 383.0556640625
+ ],
+ [
+ 1397.933349609375,
+ 632.2061309814453
+ ]
+ ],
+ "net_height_m": 0.914,
+ "detection_confidence": 1.0,
+ "frame_width": 1920,
+ "frame_height": 1080
+}
\ No newline at end of file
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0003.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0003.jpg
new file mode 100644
index 0000000..d866005
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0003.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0004.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0004.jpg
new file mode 100644
index 0000000..badfd77
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0004.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0006.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0006.jpg
new file mode 100644
index 0000000..9526b7c
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0006.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0007.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0007.jpg
new file mode 100644
index 0000000..cb5c570
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0007.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0008.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0008.jpg
new file mode 100644
index 0000000..5cdea20
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0008.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0009.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0009.jpg
new file mode 100644
index 0000000..2bd3598
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0009.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0010.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0010.jpg
new file mode 100644
index 0000000..d8aa3d6
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0010.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0011.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0011.jpg
new file mode 100644
index 0000000..ec027fb
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0011.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0014.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0014.jpg
new file mode 100644
index 0000000..10fcfd5
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0014.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0017.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0017.jpg
new file mode 100644
index 0000000..81b2bca
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0017.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0018.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0018.jpg
new file mode 100644
index 0000000..57b37a1
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0018.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0019.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0019.jpg
new file mode 100644
index 0000000..fa548b4
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0019.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0021.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0021.jpg
new file mode 100644
index 0000000..16833da
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0021.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0022.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0022.jpg
new file mode 100644
index 0000000..c6a6dc4
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0022.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0023.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0023.jpg
new file mode 100644
index 0000000..8facc8f
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0023.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0029.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0029.jpg
new file mode 100644
index 0000000..a967367
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0029.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0030.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0030.jpg
new file mode 100644
index 0000000..7b142f6
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0030.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0042.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0042.jpg
new file mode 100644
index 0000000..0fa4d18
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0042.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0062.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0062.jpg
new file mode 100644
index 0000000..bbc298a
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0062.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0065.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0065.jpg
new file mode 100644
index 0000000..0f4d817
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0065.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0082.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0082.jpg
new file mode 100644
index 0000000..0fcd75e
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0082.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0083.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0083.jpg
new file mode 100644
index 0000000..549e3de
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0083.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0084.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0084.jpg
new file mode 100644
index 0000000..5c2feae
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0084.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0085.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0085.jpg
new file mode 100644
index 0000000..5fdb045
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0085.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0087.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0087.jpg
new file mode 100644
index 0000000..9a76b86
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0087.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0088.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0088.jpg
new file mode 100644
index 0000000..317bd3c
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0088.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0092.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0092.jpg
new file mode 100644
index 0000000..6c33a6a
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0092.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0093.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0093.jpg
new file mode 100644
index 0000000..d297f69
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0093.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0096.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0096.jpg
new file mode 100644
index 0000000..76ef7b5
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0096.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0097.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0097.jpg
new file mode 100644
index 0000000..63c3512
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0097.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0098.jpg b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0098.jpg
new file mode 100644
index 0000000..076531c
Binary files /dev/null and b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/frames/frame_0098.jpg differ
diff --git a/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/index.html b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/index.html
new file mode 100644
index 0000000..7bc3d65
--- /dev/null
+++ b/data/6350640e-9cf2-4894-ad25-88cd83ed818e/viewer/index.html
@@ -0,0 +1,831 @@
+
+
+
+
+
+ Ball Tracking 3D Viewer - Run 6350640e
+
+
+
+
+
🎾 Pickleball Ball Tracking 3D Viewer
+
+
+
+
📹 Video Frame
+
![Video frame]()
+
+
+
+
🗺️ Interactive 3D Court (drag to rotate, scroll to zoom)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 💡 Controls:
+ ←/→ Navigate frames
+ Space Play/Pause
+ Mouse drag Rotate 3D view
+ Mouse wheel Zoom
+
+
+
+
+
+
+
+
+
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..696df91
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,34 @@
+version: '3.8'
+
+services:
+ dagster:
+ build: .
+ container_name: pickle-dagster
+ ports:
+ - "3000:3000"
+ volumes:
+ # Mount data directory for pipeline outputs (frames, detections, JSON)
+ - ./data:/app/data
+ # Mount dagster_home for Dagster metadata (history, logs, storage)
+ - ./dagster_home:/app/dagster_home
+ # Mount models directory
+ - ./models:/app/models
+ # Mount video file
+ - ./DJI_0017.MP4:/app/DJI_0017.MP4
+ # Mount source code for hot reload
+ - ./dagster_project:/app/dagster_project
+ - ./src:/app/src
+ environment:
+ - PYTHONUNBUFFERED=1
+ - DAGSTER_HOME=/app/dagster_home
+ - ROBOFLOW_API_KEY=JxrPOJZjb5lwHw0pnxey
+ restart: unless-stopped
+ command: dagster dev -m dagster_project --host 0.0.0.0 --port 3000
+
+ # Optional: Redis for Celery (if you want to add it later)
+ # redis:
+ # image: redis:7-alpine
+ # container_name: pickle-redis
+ # ports:
+ # - "6379:6379"
+ # restart: unless-stopped
diff --git a/jetson/ball_detection_stream.py b/jetson/ball_detection_stream.py
new file mode 100644
index 0000000..8268864
--- /dev/null
+++ b/jetson/ball_detection_stream.py
@@ -0,0 +1,202 @@
+#!/usr/bin/env python3
+"""
+Pickleball detection using YOLOv8 on Jetson.
+Works with video file or camera input.
+Outputs RTSP stream with bounding boxes around detected balls.
+"""
+
+import cv2
+import time
+import argparse
+import gi
+gi.require_version('Gst', '1.0')
+gi.require_version('GstRtspServer', '1.0')
+from gi.repository import Gst, GstRtspServer, GLib
+from ultralytics import YOLO
+import threading
+
+# COCO class 32 = sports ball
+BALL_CLASS_ID = 32
+
+# Stream settings
+STREAM_WIDTH = 1280
+STREAM_HEIGHT = 720
+FPS = 30
+
+
+class RTSPServer:
+ """Simple RTSP server using GStreamer."""
+
+ def __init__(self, port=8554):
+ Gst.init(None)
+ self.server = GstRtspServer.RTSPServer()
+ self.server.set_service(str(port))
+ self.factory = GstRtspServer.RTSPMediaFactory()
+ self.factory.set_launch(
+ '( appsrc name=source is-live=true block=true format=GST_FORMAT_TIME '
+ 'caps=video/x-raw,format=BGR,width=1280,height=720,framerate=30/1 ! '
+ 'videoconvert ! x264enc tune=zerolatency bitrate=2000 speed-preset=ultrafast ! '
+ 'rtph264pay name=pay0 pt=96 )'
+ )
+ self.factory.set_shared(True)
+ self.server.get_mount_points().add_factory("/live", self.factory)
+ self.server.attach(None)
+ print(f"RTSP server started at rtsp://pickle:{port}/live")
+
+
+def detect_ball(frame, model):
+ """Run YOLO detection on frame."""
+ results = model(frame, verbose=False, classes=[BALL_CLASS_ID], conf=0.3)
+
+ detections = []
+ for result in results:
+ for box in result.boxes:
+ x1, y1, x2, y2 = map(int, box.xyxy[0])
+ conf = float(box.conf[0])
+ detections.append((x1, y1, x2, y2, conf))
+
+ return detections
+
+
+def draw_detections(frame, detections):
+ """Draw bounding boxes on frame."""
+ for x1, y1, x2, y2, conf in detections:
+ # Green box for ball
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
+ label = f"Ball {conf:.2f}"
+ cv2.putText(frame, label, (x1, y1 - 10),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
+ return frame
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Pickleball Detection Stream')
+ parser.add_argument('--source', type=str, default='0',
+ help='Video source: 0 for camera, or path to video file')
+ parser.add_argument('--rtsp-port', type=int, default=8554,
+ help='RTSP server port')
+ parser.add_argument('--model', type=str, default='yolov8n.pt',
+ help='YOLO model to use')
+ parser.add_argument('--display', action='store_true',
+ help='Show local display window')
+ parser.add_argument('--save', type=str, default=None,
+ help='Save output to video file')
+ args = parser.parse_args()
+
+ print(f"Loading YOLO model: {args.model}")
+ model = YOLO(args.model)
+
+ # Try to use CUDA
+ try:
+ model.to("cuda")
+ print("Using CUDA for inference")
+ except:
+ print("CUDA not available, using CPU")
+
+ # Open video source
+ print(f"Opening video source: {args.source}")
+ if args.source.isdigit():
+ # Camera
+ cap = cv2.VideoCapture(int(args.source))
+ cap.set(cv2.CAP_PROP_FRAME_WIDTH, STREAM_WIDTH)
+ cap.set(cv2.CAP_PROP_FRAME_HEIGHT, STREAM_HEIGHT)
+ cap.set(cv2.CAP_PROP_FPS, FPS)
+ else:
+ # Video file
+ cap = cv2.VideoCapture(args.source)
+
+ if not cap.isOpened():
+ # Try GStreamer pipeline for CSI camera
+ print("Trying CSI camera via GStreamer...")
+ cap = cv2.VideoCapture(
+ "nvarguscamerasrc ! "
+ "video/x-raw(memory:NVMM),width=1280,height=720,framerate=30/1 ! "
+ "nvvidconv ! video/x-raw,format=BGRx ! "
+ "videoconvert ! video/x-raw,format=BGR ! appsink drop=1",
+ cv2.CAP_GSTREAMER
+ )
+
+ if not cap.isOpened():
+ print("ERROR: Cannot open video source!")
+ return
+
+ # Get video properties
+ width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ fps = cap.get(cv2.CAP_PROP_FPS) or 30
+ print(f"Video: {width}x{height} @ {fps}fps")
+
+ # Setup video writer if saving
+ out = None
+ if args.save:
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
+ out = cv2.VideoWriter(args.save, fourcc, fps, (width, height))
+ print(f"Saving output to: {args.save}")
+
+ frame_count = 0
+ start_time = time.time()
+ total_detections = 0
+
+ print("Starting detection loop... Press Ctrl+C to stop")
+
+ try:
+ while True:
+ ret, frame = cap.read()
+ if not ret:
+ if not args.source.isdigit():
+ # Video file ended, loop
+ cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
+ continue
+ print("Failed to grab frame")
+ break
+
+ # Resize if needed
+ if frame.shape[1] != STREAM_WIDTH or frame.shape[0] != STREAM_HEIGHT:
+ frame = cv2.resize(frame, (STREAM_WIDTH, STREAM_HEIGHT))
+
+ # Run detection
+ detections = detect_ball(frame, model)
+ total_detections += len(detections)
+
+ # Draw detections
+ frame = draw_detections(frame, detections)
+
+ # Add FPS counter
+ frame_count += 1
+ if frame_count % 30 == 0:
+ elapsed = time.time() - start_time
+ current_fps = frame_count / elapsed
+ print(f"FPS: {current_fps:.1f}, Frame: {frame_count}, "
+ f"Detections this frame: {len(detections)}")
+
+ # Add FPS to frame
+ cv2.putText(frame, f"FPS: {frame_count / (time.time() - start_time):.1f}",
+ (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
+
+ # Save if requested
+ if out:
+ out.write(frame)
+
+ # Display if requested
+ if args.display:
+ cv2.imshow("Pickleball Detection", frame)
+ if cv2.waitKey(1) & 0xFF == ord('q'):
+ break
+
+ except KeyboardInterrupt:
+ print("\nStopping...")
+
+ finally:
+ elapsed = time.time() - start_time
+ print(f"\nProcessed {frame_count} frames in {elapsed:.1f}s")
+ print(f"Average FPS: {frame_count / elapsed:.1f}")
+ print(f"Total ball detections: {total_detections}")
+
+ cap.release()
+ if out:
+ out.release()
+ cv2.destroyAllWindows()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/jetson/rtsp_detection_server.py b/jetson/rtsp_detection_server.py
new file mode 100644
index 0000000..d4db242
--- /dev/null
+++ b/jetson/rtsp_detection_server.py
@@ -0,0 +1,222 @@
+#!/usr/bin/env python3
+"""
+RTSP server with YOLOv8 ball detection for Jetson.
+Streams video with detections over RTSP.
+"""
+
+import cv2
+import time
+import argparse
+import threading
+import gi
+gi.require_version('Gst', '1.0')
+gi.require_version('GstRtspServer', '1.0')
+from gi.repository import Gst, GstRtspServer, GLib
+from ultralytics import YOLO
+import numpy as np
+
+# COCO class 32 = sports ball
+BALL_CLASS_ID = 32
+
+
+class DetectionRTSPServer:
+ """RTSP server that streams video with YOLO detections."""
+
+ def __init__(self, source, model_path='yolov8n.pt', port=8554, width=1280, height=720, fps=30):
+ self.source = source
+ self.width = width
+ self.height = height
+ self.fps = fps
+ self.port = port
+ self.running = False
+ self.frame = None
+ self.lock = threading.Lock()
+
+ # Load YOLO model
+ print(f"Loading YOLO model: {model_path}")
+ self.model = YOLO(model_path)
+ try:
+ self.model.to("cuda")
+ print("Using CUDA")
+ except:
+ print("Using CPU")
+
+ # Init GStreamer
+ Gst.init(None)
+
+ def detect_and_draw(self, frame):
+ """Run detection and draw boxes."""
+ results = self.model(frame, verbose=False, classes=[BALL_CLASS_ID], conf=0.25)
+
+ for result in results:
+ for box in result.boxes:
+ x1, y1, x2, y2 = map(int, box.xyxy[0])
+ conf = float(box.conf[0])
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 3)
+ cv2.putText(frame, f"Ball {conf:.2f}", (x1, y1 - 10),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
+
+ return frame
+
+ def capture_loop(self):
+ """Capture frames and run detection."""
+ print(f"Opening source: {self.source}")
+
+ if self.source.isdigit():
+ cap = cv2.VideoCapture(int(self.source))
+ elif self.source == 'csi':
+ # CSI camera on Jetson
+ cap = cv2.VideoCapture(
+ f"nvarguscamerasrc ! video/x-raw(memory:NVMM),width={self.width},height={self.height},"
+ f"framerate={self.fps}/1 ! nvvidconv ! video/x-raw,format=BGRx ! "
+ f"videoconvert ! video/x-raw,format=BGR ! appsink drop=1",
+ cv2.CAP_GSTREAMER
+ )
+ else:
+ cap = cv2.VideoCapture(self.source)
+
+ if not cap.isOpened():
+ print("ERROR: Cannot open video source!")
+ return
+
+ frame_count = 0
+ start_time = time.time()
+
+ while self.running:
+ ret, frame = cap.read()
+ if not ret:
+ if not self.source.isdigit() and self.source != 'csi':
+ cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
+ continue
+ break
+
+ # Resize
+ frame = cv2.resize(frame, (self.width, self.height))
+
+ # Detect
+ frame = self.detect_and_draw(frame)
+
+ # FPS overlay
+ frame_count += 1
+ fps = frame_count / (time.time() - start_time)
+ cv2.putText(frame, f"FPS: {fps:.1f}", (10, 30),
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
+
+ with self.lock:
+ self.frame = frame.copy()
+
+ if frame_count % 100 == 0:
+ print(f"FPS: {fps:.1f}")
+
+ cap.release()
+
+ def start(self):
+ """Start RTSP server and capture."""
+ self.running = True
+
+ # Start capture thread
+ self.capture_thread = threading.Thread(target=self.capture_loop)
+ self.capture_thread.start()
+
+ # Wait for first frame
+ print("Waiting for first frame...")
+ while self.frame is None and self.running:
+ time.sleep(0.1)
+
+ # Create RTSP server with test source first, then we'll push frames
+ self.server = GstRtspServer.RTSPServer.new()
+ self.server.set_service(str(self.port))
+
+ # Create factory with appsrc
+ self.factory = GstRtspServer.RTSPMediaFactory.new()
+
+ # Pipeline that accepts raw video and encodes to H264
+ launch_str = (
+ f'( appsrc name=mysrc is-live=true block=false format=GST_FORMAT_TIME '
+ f'caps=video/x-raw,format=BGR,width={self.width},height={self.height},framerate={self.fps}/1 ! '
+ f'queue ! videoconvert ! video/x-raw,format=I420 ! '
+ f'x264enc tune=zerolatency bitrate=4000 speed-preset=ultrafast ! '
+ f'rtph264pay config-interval=1 name=pay0 pt=96 )'
+ )
+
+ self.factory.set_launch(launch_str)
+ self.factory.set_shared(True)
+ self.factory.connect('media-configure', self.on_media_configure)
+
+ mounts = self.server.get_mount_points()
+ mounts.add_factory('/live', self.factory)
+
+ self.server.attach(None)
+ print(f"\n{'='*50}")
+ print(f"RTSP stream ready at: rtsp://pickle:{self.port}/live")
+ print(f"{'='*50}\n")
+
+ # Run GLib main loop
+ self.loop = GLib.MainLoop()
+ try:
+ self.loop.run()
+ except KeyboardInterrupt:
+ pass
+
+ self.stop()
+
+ def on_media_configure(self, factory, media):
+ """Configure media when client connects."""
+ print("Client connected!")
+ appsrc = media.get_element().get_child_by_name('mysrc')
+ appsrc.connect('need-data', self.on_need_data)
+
+ def on_need_data(self, src, length):
+ """Push frame to appsrc when needed."""
+ with self.lock:
+ if self.frame is None:
+ return
+
+ frame = self.frame.copy()
+
+ # Create buffer
+ data = frame.tobytes()
+ buf = Gst.Buffer.new_allocate(None, len(data), None)
+ buf.fill(0, data)
+
+ # Set timestamp
+ timestamp = int(time.time() * Gst.SECOND)
+ buf.pts = timestamp
+ buf.duration = int(Gst.SECOND / self.fps)
+
+ src.emit('push-buffer', buf)
+
+ def stop(self):
+ """Stop server."""
+ self.running = False
+ if hasattr(self, 'capture_thread'):
+ self.capture_thread.join()
+ print("Server stopped")
+
+
+def main():
+ parser = argparse.ArgumentParser(description='RTSP Detection Server')
+ parser.add_argument('--source', type=str, default='csi',
+ help='Video source: csi, 0 (USB cam), or video file path')
+ parser.add_argument('--model', type=str, default='yolov8n.pt',
+ help='YOLO model')
+ parser.add_argument('--port', type=int, default=8554,
+ help='RTSP port')
+ parser.add_argument('--width', type=int, default=1280)
+ parser.add_argument('--height', type=int, default=720)
+ parser.add_argument('--fps', type=int, default=30)
+ args = parser.parse_args()
+
+ server = DetectionRTSPServer(
+ source=args.source,
+ model_path=args.model,
+ port=args.port,
+ width=args.width,
+ height=args.height,
+ fps=args.fps
+ )
+ server.start()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/jetson/rtsp_yolo_stream.py b/jetson/rtsp_yolo_stream.py
new file mode 100644
index 0000000..4ea7abd
--- /dev/null
+++ b/jetson/rtsp_yolo_stream.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python3
+"""
+RTSP stream with YOLOv8 ball detection using GStreamer pipeline.
+"""
+
+import cv2
+import time
+import sys
+from ultralytics import YOLO
+
+BALL_CLASS_ID = 32 # sports ball
+WIDTH = 1280
+HEIGHT = 720
+FPS = 25
+PORT = 8554
+
+
+def main():
+ source = sys.argv[1] if len(sys.argv) > 1 else "/opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4"
+
+ print("Loading YOLOv8n model...")
+ model = YOLO("yolov8n.pt")
+ try:
+ model.to("cuda")
+ print("Using CUDA")
+ except:
+ print("Using CPU")
+
+ print(f"Opening: {source}")
+ cap = cv2.VideoCapture(source)
+ if not cap.isOpened():
+ print("ERROR: Cannot open source")
+ return 1
+
+ # GStreamer RTSP output pipeline
+ gst_out = (
+ f"appsrc ! "
+ f"video/x-raw,format=BGR,width={WIDTH},height={HEIGHT},framerate={FPS}/1 ! "
+ f"queue ! videoconvert ! video/x-raw,format=I420 ! "
+ f"x264enc tune=zerolatency bitrate=2000 speed-preset=ultrafast key-int-max=30 ! "
+ f"video/x-h264,profile=baseline ! "
+ f"rtspclientsink location=rtsp://127.0.0.1:{PORT}/live"
+ )
+
+ # Try simple UDP multicast instead
+ gst_out_udp = (
+ f"appsrc ! "
+ f"video/x-raw,format=BGR,width={WIDTH},height={HEIGHT},framerate={FPS}/1 ! "
+ f"videoconvert ! video/x-raw,format=I420 ! "
+ f"x264enc tune=zerolatency bitrate=2000 speed-preset=ultrafast ! "
+ f"h264parse ! "
+ f"mpegtsmux ! "
+ f"udpsink host=224.1.1.1 port=5000 auto-multicast=true"
+ )
+
+ # Or just write to file for testing
+ gst_file = (
+ f"appsrc ! "
+ f"video/x-raw,format=BGR,width={WIDTH},height={HEIGHT},framerate={FPS}/1 ! "
+ f"videoconvert ! video/x-raw,format=I420 ! "
+ f"x264enc tune=zerolatency ! "
+ f"mp4mux ! "
+ f"filesink location=/tmp/output_detection.mp4"
+ )
+
+ print(f"\nStarting detection stream...")
+ print(f"Output: /tmp/output_detection.mp4")
+ print("Press Ctrl+C to stop\n")
+
+ out = cv2.VideoWriter(gst_file, cv2.CAP_GSTREAMER, 0, FPS, (WIDTH, HEIGHT), True)
+
+ if not out.isOpened():
+ print("GStreamer writer failed, using regular file output")
+ out = cv2.VideoWriter('/tmp/output_detection.mp4',
+ cv2.VideoWriter_fourcc(*'mp4v'), FPS, (WIDTH, HEIGHT))
+
+ frame_count = 0
+ start = time.time()
+ total_detections = 0
+
+ try:
+ while True:
+ ret, frame = cap.read()
+ if not ret:
+ cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
+ continue
+
+ frame = cv2.resize(frame, (WIDTH, HEIGHT))
+
+ # Detection
+ results = model(frame, verbose=False, classes=[BALL_CLASS_ID], conf=0.25)
+ for r in results:
+ for box in r.boxes:
+ x1, y1, x2, y2 = map(int, box.xyxy[0])
+ conf = float(box.conf[0])
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 3)
+ cv2.putText(frame, f"Ball {conf:.2f}", (x1, y1-10),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
+ total_detections += 1
+
+ # FPS overlay
+ frame_count += 1
+ elapsed = time.time() - start
+ fps = frame_count / elapsed
+ cv2.putText(frame, f"FPS: {fps:.1f}", (10, 30),
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
+
+ out.write(frame)
+
+ if frame_count % 50 == 0:
+ print(f"Frame {frame_count}, FPS: {fps:.1f}, Detections: {total_detections}")
+
+ # Stop after 10 seconds for test
+ if elapsed > 10:
+ print("\n10 second test complete")
+ break
+
+ except KeyboardInterrupt:
+ print("\nStopping...")
+ finally:
+ elapsed = time.time() - start
+ print(f"\nProcessed {frame_count} frames in {elapsed:.1f}s")
+ print(f"Average FPS: {frame_count/elapsed:.1f}")
+ print(f"Total detections: {total_detections}")
+ print(f"Output saved to: /tmp/output_detection.mp4")
+ cap.release()
+ out.release()
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/jetson/simple_rtsp_stream.py b/jetson/simple_rtsp_stream.py
new file mode 100644
index 0000000..05017c3
--- /dev/null
+++ b/jetson/simple_rtsp_stream.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python3
+"""
+Simple RTSP stream with YOLOv8 detection using OpenCV + subprocess for RTSP.
+"""
+
+import cv2
+import subprocess
+import time
+from ultralytics import YOLO
+
+BALL_CLASS_ID = 32 # sports ball in COCO
+WIDTH = 1280
+HEIGHT = 720
+FPS = 25
+
+
+def main():
+ print("Loading YOLOv8...")
+ model = YOLO("yolov8n.pt")
+ model.to("cuda")
+ print("Model loaded on CUDA")
+
+ # Open video source
+ source = "/opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4"
+ cap = cv2.VideoCapture(source)
+
+ if not cap.isOpened():
+ print("ERROR: Cannot open video")
+ return
+
+ # Start ffmpeg to serve RTSP via tcp
+ ffmpeg_cmd = [
+ 'ffmpeg',
+ '-y',
+ '-f', 'rawvideo',
+ '-vcodec', 'rawvideo',
+ '-pix_fmt', 'bgr24',
+ '-s', f'{WIDTH}x{HEIGHT}',
+ '-r', str(FPS),
+ '-i', '-',
+ '-c:v', 'libx264',
+ '-preset', 'ultrafast',
+ '-tune', 'zerolatency',
+ '-f', 'rtsp',
+ '-rtsp_transport', 'tcp',
+ 'rtsp://localhost:8554/live'
+ ]
+
+ print("Starting ffmpeg...")
+ proc = subprocess.Popen(ffmpeg_cmd, stdin=subprocess.PIPE)
+
+ print(f"\nRTSP stream: rtsp://pickle:8554/live")
+ print("Press Ctrl+C to stop\n")
+
+ frame_count = 0
+ start = time.time()
+
+ try:
+ while True:
+ ret, frame = cap.read()
+ if not ret:
+ cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
+ continue
+
+ frame = cv2.resize(frame, (WIDTH, HEIGHT))
+
+ # Detection
+ results = model(frame, verbose=False, classes=[BALL_CLASS_ID], conf=0.3)
+ for r in results:
+ for box in r.boxes:
+ x1, y1, x2, y2 = map(int, box.xyxy[0])
+ conf = float(box.conf[0])
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
+ cv2.putText(frame, f"Ball {conf:.2f}", (x1, y1-10),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
+
+ # FPS
+ frame_count += 1
+ fps = frame_count / (time.time() - start)
+ cv2.putText(frame, f"FPS: {fps:.1f}", (10, 30),
+ cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2)
+
+ # Write to ffmpeg
+ proc.stdin.write(frame.tobytes())
+
+ if frame_count % 100 == 0:
+ print(f"Frame {frame_count}, FPS: {fps:.1f}")
+
+ except KeyboardInterrupt:
+ print("\nStopping...")
+ except BrokenPipeError:
+ print("ffmpeg pipe broken")
+ finally:
+ cap.release()
+ proc.stdin.close()
+ proc.wait()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/jetson/stream_with_detection.py b/jetson/stream_with_detection.py
new file mode 100644
index 0000000..8b635ef
--- /dev/null
+++ b/jetson/stream_with_detection.py
@@ -0,0 +1,140 @@
+#!/usr/bin/env python3
+"""
+RTSP stream with YOLOv8 ball detection.
+Pushes to mediamtx RTSP server using ffmpeg.
+
+Usage:
+ 1. Start mediamtx: /tmp/mediamtx &
+ 2. Run this script: python3 stream_with_detection.py [source]
+ 3. View stream: vlc rtsp://pickle:8554/live
+"""
+
+import cv2
+import subprocess
+import time
+import sys
+from ultralytics import YOLO
+
+BALL_CLASS_ID = 32 # sports ball in COCO
+WIDTH = 1280
+HEIGHT = 720
+FPS = 15 # Lower FPS for stable streaming
+RTSP_URL = "rtsp://localhost:8554/live"
+
+
+def main():
+ source = sys.argv[1] if len(sys.argv) > 1 else "/opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4"
+
+ print("Loading YOLOv8n...")
+ model = YOLO("yolov8n.pt")
+ try:
+ model.to("cuda")
+ print("Using CUDA")
+ except:
+ print("Using CPU")
+
+ print(f"Opening: {source}")
+ cap = cv2.VideoCapture(source)
+ if not cap.isOpened():
+ print("ERROR: Cannot open source")
+ return 1
+
+ # FFmpeg command to push RTSP
+ ffmpeg_cmd = [
+ 'ffmpeg',
+ '-y',
+ '-f', 'rawvideo',
+ '-vcodec', 'rawvideo',
+ '-pix_fmt', 'bgr24',
+ '-s', f'{WIDTH}x{HEIGHT}',
+ '-r', str(FPS),
+ '-i', '-',
+ '-c:v', 'libx264',
+ '-preset', 'ultrafast',
+ '-tune', 'zerolatency',
+ '-g', str(FPS * 2),
+ '-f', 'rtsp',
+ '-rtsp_transport', 'tcp',
+ RTSP_URL
+ ]
+
+ print("Starting ffmpeg RTSP publisher...")
+ ffmpeg = subprocess.Popen(
+ ffmpeg_cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL
+ )
+
+ print(f"\n{'='*50}")
+ print(f"RTSP STREAM: rtsp://pickle:8554/live")
+ print(f"{'='*50}")
+ print("Press Ctrl+C to stop\n")
+
+ frame_count = 0
+ start = time.time()
+ total_detections = 0
+ frame_time = 1.0 / FPS
+
+ try:
+ while True:
+ loop_start = time.time()
+
+ ret, frame = cap.read()
+ if not ret:
+ cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
+ continue
+
+ frame = cv2.resize(frame, (WIDTH, HEIGHT))
+
+ # Detection
+ results = model(frame, verbose=False, classes=[BALL_CLASS_ID], conf=0.25)
+ det_count = 0
+ for r in results:
+ for box in r.boxes:
+ x1, y1, x2, y2 = map(int, box.xyxy[0])
+ conf = float(box.conf[0])
+ cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 3)
+ cv2.putText(frame, f"Ball {conf:.2f}", (x1, y1-10),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
+ det_count += 1
+ total_detections += 1
+
+ # FPS overlay
+ frame_count += 1
+ elapsed = time.time() - start
+ fps = frame_count / elapsed
+ cv2.putText(frame, f"FPS: {fps:.1f} | Det: {det_count}", (10, 30),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 255), 2)
+
+ # Push to ffmpeg
+ try:
+ ffmpeg.stdin.write(frame.tobytes())
+ except BrokenPipeError:
+ print("FFmpeg pipe broken, restarting...")
+ break
+
+ if frame_count % 100 == 0:
+ print(f"Frame {frame_count}, FPS: {fps:.1f}, Total Det: {total_detections}")
+
+ # Rate limiting
+ proc_time = time.time() - loop_start
+ if proc_time < frame_time:
+ time.sleep(frame_time - proc_time)
+
+ except KeyboardInterrupt:
+ print("\nStopping...")
+ finally:
+ elapsed = time.time() - start
+ print(f"\nProcessed {frame_count} frames in {elapsed:.1f}s")
+ print(f"Average FPS: {frame_count/elapsed:.1f}")
+ print(f"Total detections: {total_detections}")
+ cap.release()
+ ffmpeg.stdin.close()
+ ffmpeg.wait()
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/orin/Dockerfile.template b/orin/Dockerfile.template
new file mode 100644
index 0000000..ecaecc5
--- /dev/null
+++ b/orin/Dockerfile.template
@@ -0,0 +1,4 @@
+# DeepStream base for JetPack 6 / L4T 36.x
+FROM nvcr.io/nvidia/deepstream-l4t:7.1-samples-multiarch
+
+WORKDIR /app
diff --git a/orin/docker-compose.yml b/orin/docker-compose.yml
new file mode 100644
index 0000000..f5f2585
--- /dev/null
+++ b/orin/docker-compose.yml
@@ -0,0 +1,23 @@
+version: '2.4'
+
+services:
+ cuda:
+ image: nvcr.io/nvidia/l4t-cuda:12.2.12-runtime
+ privileged: true
+ network_mode: host
+ restart: unless-stopped
+ environment:
+ - UDEV=1
+ - NVIDIA_VISIBLE_DEVICES=all
+ - NVIDIA_DRIVER_CAPABILITIES=all
+ devices:
+ - "/dev/video0:/dev/video0"
+ - "/dev/nvhost-ctrl:/dev/nvhost-ctrl"
+ - "/dev/nvhost-ctrl-gpu:/dev/nvhost-ctrl-gpu"
+ - "/dev/nvhost-prof-gpu:/dev/nvhost-prof-gpu"
+ - "/dev/nvhost-gpu:/dev/nvhost-gpu"
+ - "/dev/nvmap:/dev/nvmap"
+ - "/dev/nvhost-ctxsw-gpu:/dev/nvhost-ctxsw-gpu"
+ labels:
+ io.balena.features.nvidia: '1'
+ command: ["bash", "-lc", "echo CUDA ready; nvcc --version || true; sleep infinity"]
diff --git a/orin/scripts/rtsp_server.py b/orin/scripts/rtsp_server.py
new file mode 100644
index 0000000..06ed5b0
--- /dev/null
+++ b/orin/scripts/rtsp_server.py
@@ -0,0 +1,134 @@
+#!/usr/bin/env python3
+"""
+RTSP Server для стриминга с CSI камеры на Jetson Orin.
+Использует GStreamer RTSP Server.
+"""
+
+import os
+import gi
+gi.require_version('Gst', '1.0')
+gi.require_version('GstRtspServer', '1.0')
+from gi.repository import Gst, GstRtspServer, GLib
+
+# Инициализация GStreamer
+Gst.init(None)
+
+# Параметры из environment
+WIDTH = os.environ.get('CAMERA_WIDTH', '1920')
+HEIGHT = os.environ.get('CAMERA_HEIGHT', '1080')
+FPS = os.environ.get('CAMERA_FPS', '30')
+RTSP_PORT = os.environ.get('RTSP_PORT', '8554')
+
+
+def create_pipeline():
+ """
+ Создаёт GStreamer pipeline для CSI камеры.
+ Пробуем разные источники в порядке приоритета.
+ """
+
+ # Вариант 1: nvarguscamerasrc (CSI камера на Jetson) с NVIDIA hardware encoder
+ # Используем nvv4l2h264enc для аппаратного кодирования без EGL
+ csi_pipeline = (
+ f'nvarguscamerasrc ! '
+ f'video/x-raw(memory:NVMM),width={WIDTH},height={HEIGHT},framerate={FPS}/1,format=NV12 ! '
+ f'nvv4l2h264enc bitrate=4000000 preset-level=1 insert-sps-pps=true ! '
+ f'h264parse ! '
+ f'rtph264pay name=pay0 pt=96'
+ )
+
+ # Вариант 2: v4l2src (USB камера) с NVIDIA hardware encoder
+ v4l2_pipeline = (
+ f'v4l2src device=/dev/video0 ! '
+ f'video/x-raw,width={WIDTH},height={HEIGHT},framerate={FPS}/1 ! '
+ f'nvvidconv ! video/x-raw(memory:NVMM),format=NV12 ! '
+ f'nvv4l2h264enc bitrate=4000000 preset-level=1 insert-sps-pps=true ! '
+ f'h264parse ! '
+ f'rtph264pay name=pay0 pt=96'
+ )
+
+ # Вариант 3: тестовый источник с software encoder (fallback)
+ test_pipeline = (
+ f'videotestsrc is-live=true ! '
+ f'video/x-raw,width={WIDTH},height={HEIGHT},framerate={FPS}/1 ! '
+ f'videoconvert ! '
+ f'x264enc tune=zerolatency bitrate=4000 speed-preset=ultrafast ! '
+ f'rtph264pay name=pay0 pt=96'
+ )
+
+ # Пробуем CSI камеру сначала
+ try:
+ test = Gst.parse_launch(csi_pipeline.replace('rtph264pay name=pay0 pt=96', 'fakesink'))
+ test.set_state(Gst.State.PLAYING)
+ # Даём время на инициализацию
+ import time
+ time.sleep(2)
+ state = test.get_state(Gst.CLOCK_TIME_NONE)
+ test.set_state(Gst.State.NULL)
+ if state[1] == Gst.State.PLAYING:
+ print("Using CSI camera (nvarguscamerasrc)")
+ return csi_pipeline
+ except Exception as e:
+ print(f"CSI camera not available: {e}")
+
+ # Пробуем USB камеру
+ if os.path.exists('/dev/video0'):
+ print("Using USB camera (v4l2src)")
+ return v4l2_pipeline
+
+ # Fallback на тестовый источник
+ print("No camera found, using test source")
+ return test_pipeline
+
+
+class RTSPServer:
+ def __init__(self):
+ self.server = GstRtspServer.RTSPServer()
+ self.server.set_service(RTSP_PORT)
+
+ # Создаём factory для стрима
+ factory = GstRtspServer.RTSPMediaFactory()
+ pipeline = create_pipeline()
+ print(f"Pipeline: {pipeline}")
+ factory.set_launch(f'( {pipeline} )')
+ factory.set_shared(True)
+
+ # Добавляем mount point
+ mount_points = self.server.get_mount_points()
+ mount_points.add_factory('/stream', factory)
+
+ # Запускаем сервер
+ self.server.attach(None)
+
+ print(f"\n{'='*50}")
+ print(f"RTSP Server started!")
+ print(f"Stream URL: rtsp://:{RTSP_PORT}/stream")
+ print(f"{'='*50}\n")
+
+
+def main():
+ import signal
+ import sys
+
+ server = RTSPServer()
+ loop = GLib.MainLoop()
+
+ def signal_handler(sig, frame):
+ print("Shutting down...")
+ loop.quit()
+ sys.exit(0)
+
+ signal.signal(signal.SIGINT, signal_handler)
+ signal.signal(signal.SIGTERM, signal_handler)
+
+ print("RTSP server running. Waiting for connections...")
+ sys.stdout.flush()
+
+ try:
+ loop.run()
+ except Exception as e:
+ print(f"Error in main loop: {e}")
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/orin/scripts/start.sh b/orin/scripts/start.sh
new file mode 100644
index 0000000..00a1125
--- /dev/null
+++ b/orin/scripts/start.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -e
+
+# Источник можно переопределить: file:///..., rtsp://..., rtmp://... и т.п.
+URI="${DS_SOURCE:-file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_720p.h264}"
+
+echo "Running deepstream-test1-app with source: ${URI}"
+
+cd /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-test1
+exec ./deepstream-test1-app "${URI}"
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..ca27913
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,33 @@
+[build-system]
+requires = ["setuptools>=42", "wheel"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "pickle-ball-tracking"
+version = "0.1.0"
+description = "Pickleball ball tracking system using YOLO and Dagster"
+requires-python = ">=3.11"
+dependencies = [
+ "ultralytics>=8.0.0",
+ "roboflow>=1.1.0",
+ "inference>=0.9.0",
+ "supervision>=0.16.0",
+ "opencv-python>=4.8.0",
+ "numpy>=1.24.0",
+ "dagster>=1.5.0",
+ "dagster-webserver>=1.5.0",
+ "matplotlib>=3.8.0",
+ "fastapi>=0.104.0",
+ "uvicorn>=0.24.0",
+ "python-multipart>=0.0.6",
+ "pydantic>=2.0.0",
+ "python-dotenv>=1.0.0",
+ "tqdm>=4.66.0",
+]
+
+[tool.dagster]
+module_name = "dagster_project"
+
+[tool.setuptools.packages.find]
+where = ["."]
+include = ["dagster_project*", "src*"]
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..16de745
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,31 @@
+# Core ML/CV libraries
+ultralytics>=8.0.0
+roboflow>=1.1.0
+inference>=0.9.0
+supervision>=0.16.0
+opencv-python>=4.8.0
+numpy>=1.24.0
+
+# API framework
+fastapi>=0.104.0
+uvicorn>=0.24.0
+python-multipart>=0.0.6
+pydantic>=2.0.0
+
+# Background tasks (using FastAPI BackgroundTasks instead of Celery for MVP)
+# celery>=5.3.0
+# redis>=5.0.0
+
+# Dagster
+dagster>=1.5.0
+dagster-webserver>=1.5.0
+
+# Roboflow Hosted Inference
+inference-sdk>=0.9.0
+
+# Visualization
+matplotlib>=3.8.0
+
+# Utilities
+python-dotenv>=1.0.0
+tqdm>=4.66.0
diff --git a/src/__init__.py b/src/__init__.py
new file mode 100644
index 0000000..eb18949
--- /dev/null
+++ b/src/__init__.py
@@ -0,0 +1,17 @@
+"""
+Pickle - Pickleball Ball Tracking System
+"""
+
+from .ball_detector import BallDetector
+from .court_calibrator import CourtCalibrator, InteractiveCalibrator
+from .ball_tracker import BallTracker, MultiObjectTracker
+from .video_processor import VideoProcessor
+
+__all__ = [
+ 'BallDetector',
+ 'CourtCalibrator',
+ 'InteractiveCalibrator',
+ 'BallTracker',
+ 'MultiObjectTracker',
+ 'VideoProcessor'
+]
diff --git a/src/ball_detector.py b/src/ball_detector.py
new file mode 100644
index 0000000..f17af15
--- /dev/null
+++ b/src/ball_detector.py
@@ -0,0 +1,263 @@
+"""
+Ball detector module using Roboflow Hosted Inference and YOLO v8
+"""
+import os
+import numpy as np
+from typing import List, Tuple, Optional, Dict
+from inference_sdk import InferenceHTTPClient
+import cv2
+
+
+class BallDetector:
+ """
+ Detects pickleball balls in video frames using Roboflow pre-trained models
+ Implements InferenceSlicer technique for better small object detection
+ """
+
+ def __init__(
+ self,
+ model_id: str = "pickleball-detection-1oqlw/1",
+ confidence_threshold: float = 0.4,
+ iou_threshold: float = 0.5,
+ slice_enabled: bool = True,
+ slice_height: int = 320,
+ slice_width: int = 320,
+ overlap_ratio: float = 0.2
+ ):
+ """
+ Initialize the ball detector
+
+ Args:
+ model_id: Roboflow model ID (format: workspace/project/version)
+ confidence_threshold: Minimum confidence for detections
+ iou_threshold: IoU threshold for NMS
+ slice_enabled: Enable frame slicing for better small object detection
+ slice_height: Height of each slice
+ slice_width: Width of each slice
+ overlap_ratio: Overlap ratio between slices
+ """
+ self.model_id = model_id
+ self.confidence_threshold = confidence_threshold
+ self.iou_threshold = iou_threshold
+ self.slice_enabled = slice_enabled
+ self.slice_height = slice_height
+ self.slice_width = slice_width
+ self.overlap_ratio = overlap_ratio
+
+ # Initialize Roboflow Hosted Inference client
+ api_key = os.getenv("ROBOFLOW_API_KEY")
+
+ if not api_key:
+ print("✗ ROBOFLOW_API_KEY not set, using YOLO v8 fallback")
+ from ultralytics import YOLO
+ self.model = YOLO('yolov8n.pt')
+ self.use_fallback = True
+ else:
+ try:
+ self.client = InferenceHTTPClient(
+ api_url="https://serverless.roboflow.com",
+ api_key=api_key
+ )
+ print(f"✓ Initialized Roboflow Hosted Inference: {model_id}")
+ self.use_fallback = False
+ except Exception as e:
+ print(f"✗ Failed to initialize Roboflow client: {e}")
+ print("Falling back to YOLO v8 base model for sports ball detection")
+ from ultralytics import YOLO
+ self.model = YOLO('yolov8n.pt')
+ self.use_fallback = True
+
+ def detect(self, frame: np.ndarray) -> List[Dict]:
+ """
+ Detect balls in a single frame
+
+ Args:
+ frame: Input frame (numpy array in BGR format)
+
+ Returns:
+ List of detections with format:
+ [
+ {
+ 'bbox': [x1, y1, x2, y2],
+ 'confidence': float,
+ 'center': [cx, cy]
+ },
+ ...
+ ]
+ """
+ if self.use_fallback:
+ return self._detect_with_yolo(frame)
+
+ if self.slice_enabled:
+ return self._detect_with_slicing(frame)
+ else:
+ return self._detect_single(frame)
+
+ def _detect_single(self, frame: np.ndarray) -> List[Dict]:
+ """Detect on full frame without slicing"""
+ try:
+ # Hosted API expects file path or base64, so we save temp image
+ import tempfile
+ with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as tmp:
+ cv2.imwrite(tmp.name, frame)
+ tmp_path = tmp.name
+
+ try:
+ results = self.client.infer(
+ tmp_path,
+ model_id=self.model_id
+ )
+ return self._parse_results(results)
+ finally:
+ import os
+ os.unlink(tmp_path)
+
+ except Exception as e:
+ print(f"Detection error: {e}")
+ return []
+
+ def _detect_with_slicing(self, frame: np.ndarray) -> List[Dict]:
+ """
+ Detect using InferenceSlicer technique
+ Divides frame into overlapping tiles for better small object detection
+ """
+ height, width = frame.shape[:2]
+ detections = []
+
+ # Calculate number of slices
+ stride_h = int(self.slice_height * (1 - self.overlap_ratio))
+ stride_w = int(self.slice_width * (1 - self.overlap_ratio))
+
+ for y in range(0, height, stride_h):
+ for x in range(0, width, stride_w):
+ # Extract slice
+ y_end = min(y + self.slice_height, height)
+ x_end = min(x + self.slice_width, width)
+ slice_img = frame[y:y_end, x:x_end]
+
+ # Detect on slice
+ try:
+ # Hosted API expects file path, save temp slice
+ import tempfile
+ with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as tmp:
+ cv2.imwrite(tmp.name, slice_img)
+ tmp_path = tmp.name
+
+ try:
+ results = self.client.infer(
+ tmp_path,
+ model_id=self.model_id
+ )
+
+ # Parse and adjust coordinates
+ slice_detections = self._parse_results(results)
+ for det in slice_detections:
+ # Adjust bbox coordinates to full frame
+ det['bbox'][0] += x # x1
+ det['bbox'][1] += y # y1
+ det['bbox'][2] += x # x2
+ det['bbox'][3] += y # y2
+ det['center'][0] += x # cx
+ det['center'][1] += y # cy
+ detections.append(det)
+ finally:
+ import os
+ os.unlink(tmp_path)
+
+ except Exception as e:
+ continue
+
+ # Apply NMS to remove duplicate detections from overlapping slices
+ detections = self._apply_nms(detections)
+
+ return detections
+
+ def _detect_with_yolo(self, frame: np.ndarray) -> List[Dict]:
+ """Fallback detection using ultralytics YOLO"""
+ results = self.model(frame, conf=self.confidence_threshold, verbose=False)
+
+ detections = []
+ for result in results:
+ boxes = result.boxes
+ for box in boxes:
+ # Filter for sports ball class (class 32 in COCO)
+ if int(box.cls[0]) == 32: # sports ball
+ x1, y1, x2, y2 = box.xyxy[0].cpu().numpy()
+ conf = float(box.conf[0])
+ cx = (x1 + x2) / 2
+ cy = (y1 + y2) / 2
+
+ detections.append({
+ 'bbox': [float(x1), float(y1), float(x2), float(y2)],
+ 'confidence': conf,
+ 'center': [float(cx), float(cy)]
+ })
+
+ return detections
+
+ def _parse_results(self, results) -> List[Dict]:
+ """Parse Roboflow Hosted API results"""
+ detections = []
+
+ if not results:
+ return detections
+
+ # Hosted API returns dict with 'predictions' key
+ predictions = results.get('predictions', []) if isinstance(results, dict) else []
+
+ for pred in predictions:
+ # Hosted API format: dict with 'x', 'y', 'width', 'height', 'class', 'confidence'
+ if not isinstance(pred, dict):
+ continue
+
+ # Extract bbox
+ if 'x' in pred and 'y' in pred and 'width' in pred and 'height' in pred:
+ cx = pred['x']
+ cy = pred['y']
+ w = pred['width']
+ h = pred['height']
+ x1 = cx - w / 2
+ y1 = cy - h / 2
+ x2 = cx + w / 2
+ y2 = cy + h / 2
+ else:
+ continue
+
+ # Filter for ball class
+ class_name = pred.get('class', '')
+ if 'ball' not in class_name.lower():
+ continue
+
+ conf = pred.get('confidence', 0.0)
+
+ detections.append({
+ 'bbox': [x1, y1, x2, y2],
+ 'confidence': conf,
+ 'center': [cx, cy]
+ })
+
+ return detections
+
+ def _apply_nms(self, detections: List[Dict]) -> List[Dict]:
+ """Apply Non-Maximum Suppression to remove duplicate detections"""
+ if len(detections) == 0:
+ return []
+
+ # Extract boxes and scores
+ boxes = np.array([det['bbox'] for det in detections])
+ scores = np.array([det['confidence'] for det in detections])
+
+ # Apply NMS using OpenCV
+ indices = cv2.dnn.NMSBoxes(
+ boxes.tolist(),
+ scores.tolist(),
+ self.confidence_threshold,
+ self.iou_threshold
+ )
+
+ if len(indices) == 0:
+ return []
+
+ # Return filtered detections
+ indices = indices.flatten()
+ return [detections[i] for i in indices]
diff --git a/src/ball_tracker.py b/src/ball_tracker.py
new file mode 100644
index 0000000..7d0eae0
--- /dev/null
+++ b/src/ball_tracker.py
@@ -0,0 +1,419 @@
+"""
+Ball tracking module with buffer-based filtering and trajectory smoothing
+"""
+import numpy as np
+from typing import List, Dict, Optional, Tuple
+from collections import deque
+import cv2
+
+
+class BallTracker:
+ """
+ Tracks ball across frames using buffer-based filtering
+ Handles occlusions and false detections
+ """
+
+ def __init__(
+ self,
+ buffer_size: int = 10,
+ max_distance_threshold: int = 100,
+ min_confidence: float = 0.3
+ ):
+ """
+ Initialize ball tracker
+
+ Args:
+ buffer_size: Number of recent positions to store for filtering
+ max_distance_threshold: Maximum pixel distance between frames to consider same ball
+ min_confidence: Minimum confidence threshold for detections
+ """
+ self.buffer_size = buffer_size
+ self.max_distance_threshold = max_distance_threshold
+ self.min_confidence = min_confidence
+
+ # Buffer for storing recent ball positions
+ self.position_buffer = deque(maxlen=buffer_size)
+
+ # Track ball state
+ self.current_position = None
+ self.lost_frames = 0
+ self.max_lost_frames = 10 # Maximum frames to interpolate when ball is lost
+
+ # Trajectory history
+ self.trajectory = []
+
+ def update(self, detections: List[Dict], frame_number: int) -> Optional[Dict]:
+ """
+ Update tracker with new detections from current frame
+
+ Args:
+ detections: List of ball detections from detector
+ frame_number: Current frame number
+
+ Returns:
+ Best ball detection (filtered), or None if no valid detection
+ """
+ # Filter low confidence detections
+ valid_detections = [
+ det for det in detections
+ if det['confidence'] >= self.min_confidence
+ ]
+
+ if len(valid_detections) == 0:
+ # No detections - try to interpolate if recently had detection
+ return self._handle_missing_detection(frame_number)
+
+ # Select best detection
+ best_detection = self._select_best_detection(valid_detections)
+
+ if best_detection is None:
+ return self._handle_missing_detection(frame_number)
+
+ # Update buffer and state
+ self.position_buffer.append(best_detection['center'])
+ self.current_position = best_detection['center']
+ self.lost_frames = 0
+
+ # Add to trajectory
+ self.trajectory.append({
+ 'frame': frame_number,
+ 'position': best_detection['center'],
+ 'pixel_coords': best_detection['center'],
+ 'real_coords': None, # Will be filled by processor
+ 'confidence': best_detection['confidence']
+ })
+
+ return best_detection
+
+ def _select_best_detection(self, detections: List[Dict]) -> Optional[Dict]:
+ """
+ Select the most likely ball detection from multiple candidates
+
+ Args:
+ detections: List of valid detections
+
+ Returns:
+ Best detection, or None
+ """
+ if len(detections) == 0:
+ return None
+
+ if len(detections) == 1:
+ return detections[0]
+
+ # If we have position history, use it to filter
+ if len(self.position_buffer) > 0:
+ return self._select_by_proximity(detections)
+ else:
+ # No history - return highest confidence
+ return max(detections, key=lambda d: d['confidence'])
+
+ def _select_by_proximity(self, detections: List[Dict]) -> Optional[Dict]:
+ """
+ Select detection closest to predicted position based on buffer
+
+ Args:
+ detections: List of detections
+
+ Returns:
+ Detection closest to predicted position
+ """
+ # Calculate average position from buffer
+ avg_position = np.mean(self.position_buffer, axis=0)
+
+ # Find detection closest to average
+ min_distance = float('inf')
+ best_detection = None
+
+ for det in detections:
+ distance = np.linalg.norm(
+ np.array(det['center']) - avg_position
+ )
+
+ # Check if within threshold
+ if distance < self.max_distance_threshold and distance < min_distance:
+ min_distance = distance
+ best_detection = det
+
+ # If no detection within threshold, return highest confidence
+ if best_detection is None:
+ best_detection = max(detections, key=lambda d: d['confidence'])
+
+ return best_detection
+
+ def _handle_missing_detection(self, frame_number: int) -> Optional[Dict]:
+ """
+ Handle case when no valid detection in current frame
+
+ Args:
+ frame_number: Current frame number
+
+ Returns:
+ Interpolated detection if possible, None otherwise
+ """
+ self.lost_frames += 1
+
+ # If lost for too long, stop interpolating
+ if self.lost_frames > self.max_lost_frames:
+ self.current_position = None
+ return None
+
+ # Try to interpolate based on recent positions
+ if len(self.position_buffer) >= 2:
+ interpolated = self._interpolate_position()
+
+ # Add interpolated position to trajectory (marked as interpolated)
+ self.trajectory.append({
+ 'frame': frame_number,
+ 'position': interpolated,
+ 'pixel_coords': interpolated,
+ 'real_coords': None,
+ 'confidence': 0.0, # Mark as interpolated
+ 'interpolated': True
+ })
+
+ return {
+ 'center': interpolated,
+ 'bbox': self._estimate_bbox(interpolated),
+ 'confidence': 0.0,
+ 'interpolated': True
+ }
+
+ return None
+
+ def _interpolate_position(self) -> Tuple[float, float]:
+ """
+ Interpolate ball position based on recent trajectory
+
+ Returns:
+ Estimated (x, y) position
+ """
+ if len(self.position_buffer) < 2:
+ return self.current_position
+
+ # Simple linear interpolation based on last two positions
+ positions = np.array(self.position_buffer)
+
+ # Calculate velocity
+ velocity = positions[-1] - positions[-2]
+
+ # Predict next position
+ predicted = positions[-1] + velocity
+
+ return tuple(predicted)
+
+ def _estimate_bbox(self, center: Tuple[float, float], size: int = 20) -> List[float]:
+ """
+ Estimate bounding box for interpolated position
+
+ Args:
+ center: Center position
+ size: Estimated ball size in pixels
+
+ Returns:
+ [x1, y1, x2, y2] bbox
+ """
+ cx, cy = center
+ half_size = size / 2
+ return [cx - half_size, cy - half_size, cx + half_size, cy + half_size]
+
+ def get_trajectory(self) -> List[Dict]:
+ """
+ Get full trajectory history
+
+ Returns:
+ List of trajectory points
+ """
+ return self.trajectory
+
+ def get_smoothed_trajectory(self, window_size: int = 5) -> List[Dict]:
+ """
+ Get smoothed trajectory using moving average
+
+ Args:
+ window_size: Size of smoothing window
+
+ Returns:
+ Smoothed trajectory
+ """
+ if len(self.trajectory) < window_size:
+ return self.trajectory
+
+ smoothed = []
+
+ for i, point in enumerate(self.trajectory):
+ # Get window
+ start = max(0, i - window_size // 2)
+ end = min(len(self.trajectory), i + window_size // 2 + 1)
+ window = self.trajectory[start:end]
+
+ # Calculate average position (only non-interpolated points)
+ valid_positions = [
+ p['position'] for p in window
+ if not p.get('interpolated', False)
+ ]
+
+ if len(valid_positions) > 0:
+ avg_position = np.mean(valid_positions, axis=0)
+ smoothed_point = point.copy()
+ smoothed_point['smoothed_position'] = tuple(avg_position)
+ smoothed.append(smoothed_point)
+ else:
+ smoothed.append(point)
+
+ return smoothed
+
+ def reset(self):
+ """Reset tracker state"""
+ self.position_buffer.clear()
+ self.current_position = None
+ self.lost_frames = 0
+ self.trajectory = []
+
+ def draw_trajectory(self, frame: np.ndarray, max_points: int = 30) -> np.ndarray:
+ """
+ Draw ball trajectory on frame
+
+ Args:
+ frame: Input frame
+ max_points: Maximum number of trajectory points to draw
+
+ Returns:
+ Frame with trajectory overlay
+ """
+ if len(self.trajectory) == 0:
+ return frame
+
+ overlay = frame.copy()
+
+ # Get recent trajectory points
+ recent_trajectory = self.trajectory[-max_points:]
+
+ # Draw trajectory line
+ points = [point['position'] for point in recent_trajectory]
+ if len(points) > 1:
+ pts = np.array(points, dtype=np.int32).reshape((-1, 1, 2))
+ cv2.polylines(overlay, [pts], False, (0, 255, 255), 2)
+
+ # Draw current position
+ if self.current_position is not None:
+ cv2.circle(
+ overlay,
+ (int(self.current_position[0]), int(self.current_position[1])),
+ 8,
+ (0, 0, 255),
+ -1
+ )
+
+ return overlay
+
+
+class MultiObjectTracker:
+ """
+ Extended tracker for tracking multiple objects (ball, players, paddles)
+ Uses simple centroid-based tracking
+ """
+
+ def __init__(self, max_disappeared: int = 10, max_distance: int = 100):
+ """
+ Initialize multi-object tracker
+
+ Args:
+ max_disappeared: Max frames object can disappear before removing
+ max_distance: Max distance to associate detection with existing object
+ """
+ self.next_object_id = 0
+ self.objects = {} # object_id -> centroid
+ self.disappeared = {} # object_id -> num_frames_disappeared
+ self.max_disappeared = max_disappeared
+ self.max_distance = max_distance
+
+ def register(self, centroid: Tuple[float, float]) -> int:
+ """Register new object"""
+ object_id = self.next_object_id
+ self.objects[object_id] = centroid
+ self.disappeared[object_id] = 0
+ self.next_object_id += 1
+ return object_id
+
+ def deregister(self, object_id: int):
+ """Remove object from tracking"""
+ del self.objects[object_id]
+ del self.disappeared[object_id]
+
+ def update(self, detections: List[Dict]) -> Dict[int, Dict]:
+ """
+ Update tracker with new detections
+
+ Args:
+ detections: List of detections with 'center' key
+
+ Returns:
+ Dict mapping object_id to detection
+ """
+ # If no detections, mark all as disappeared
+ if len(detections) == 0:
+ for object_id in list(self.disappeared.keys()):
+ self.disappeared[object_id] += 1
+ if self.disappeared[object_id] > self.max_disappeared:
+ self.deregister(object_id)
+ return {}
+
+ centroids = np.array([det['center'] for det in detections])
+
+ # If no objects being tracked, register all
+ if len(self.objects) == 0:
+ result = {}
+ for i, det in enumerate(detections):
+ object_id = self.register(centroids[i])
+ result[object_id] = det
+ return result
+
+ # Associate detections with existing objects
+ object_ids = list(self.objects.keys())
+ object_centroids = np.array([self.objects[oid] for oid in object_ids])
+
+ # Calculate distance matrix
+ D = np.linalg.norm(
+ object_centroids[:, np.newaxis] - centroids,
+ axis=2
+ )
+
+ # Find best matches
+ rows = D.min(axis=1).argsort()
+ cols = D.argmin(axis=1)[rows]
+
+ used_rows = set()
+ used_cols = set()
+ result = {}
+
+ for row, col in zip(rows, cols):
+ if row in used_rows or col in used_cols:
+ continue
+
+ if D[row, col] > self.max_distance:
+ continue
+
+ object_id = object_ids[row]
+ self.objects[object_id] = centroids[col]
+ self.disappeared[object_id] = 0
+ result[object_id] = detections[col]
+
+ used_rows.add(row)
+ used_cols.add(col)
+
+ # Handle disappeared objects
+ unused_rows = set(range(D.shape[0])) - used_rows
+ for row in unused_rows:
+ object_id = object_ids[row]
+ self.disappeared[object_id] += 1
+ if self.disappeared[object_id] > self.max_disappeared:
+ self.deregister(object_id)
+
+ # Register new objects
+ unused_cols = set(range(D.shape[1])) - used_cols
+ for col in unused_cols:
+ object_id = self.register(centroids[col])
+ result[object_id] = detections[col]
+
+ return result
diff --git a/src/court_calibrator.py b/src/court_calibrator.py
new file mode 100644
index 0000000..f18b51c
--- /dev/null
+++ b/src/court_calibrator.py
@@ -0,0 +1,284 @@
+"""
+Court calibration module for mapping pixel coordinates to real-world coordinates
+Uses homography transformation based on court keypoints
+"""
+import numpy as np
+import cv2
+from typing import List, Tuple, Optional, Dict
+import json
+
+
+class CourtCalibrator:
+ """
+ Calibrates camera perspective to map pixel coordinates to real-world court coordinates
+ Uses homography transformation
+ """
+
+ def __init__(self, court_width_m: float = 6.1, court_length_m: float = 13.4):
+ """
+ Initialize court calibrator
+
+ Args:
+ court_width_m: Width of pickleball court in meters (default: 6.1m)
+ court_length_m: Length of pickleball court in meters (default: 13.4m)
+ """
+ self.court_width = court_width_m
+ self.court_length = court_length_m
+ self.homography_matrix = None
+ self.court_corners_pixel = None
+ self.court_corners_real = np.array([
+ [0, 0], # Top-left
+ [court_length_m, 0], # Top-right
+ [court_length_m, court_width_m], # Bottom-right
+ [0, court_width_m] # Bottom-left
+ ], dtype=np.float32)
+
+ def calibrate_manual(self, corner_points: List[Tuple[float, float]]) -> bool:
+ """
+ Manually calibrate using 4 corner points of the court
+
+ Args:
+ corner_points: List of 4 (x, y) tuples representing court corners in pixels
+ Order: [top-left, top-right, bottom-right, bottom-left]
+
+ Returns:
+ True if calibration successful, False otherwise
+ """
+ if len(corner_points) != 4:
+ print("Error: Need exactly 4 corner points")
+ return False
+
+ self.court_corners_pixel = np.array(corner_points, dtype=np.float32)
+
+ # Calculate homography matrix
+ self.homography_matrix, status = cv2.findHomography(
+ self.court_corners_pixel,
+ self.court_corners_real,
+ method=cv2.RANSAC
+ )
+
+ if self.homography_matrix is None:
+ print("Error: Failed to calculate homography matrix")
+ return False
+
+ print("✓ Court calibration successful")
+ return True
+
+ def calibrate_auto(self, frame: np.ndarray) -> bool:
+ """
+ Automatically detect court corners using computer vision
+ This is a simplified version - can be enhanced with YOLO keypoint detection
+
+ Args:
+ frame: Video frame to detect court corners from
+
+ Returns:
+ True if calibration successful, False otherwise
+ """
+ # Convert to grayscale
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+
+ # Apply edge detection
+ edges = cv2.Canny(gray, 50, 150, apertureSize=3)
+
+ # Detect lines using Hough Transform
+ lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold=100,
+ minLineLength=100, maxLineGap=10)
+
+ if lines is None or len(lines) < 4:
+ print("Error: Could not detect enough lines for court corners")
+ return False
+
+ # This is a placeholder - in production, you'd use:
+ # 1. YOLO keypoint detection model trained on court corners
+ # 2. More sophisticated line intersection detection
+ # 3. Court line template matching
+
+ print("Warning: Auto-calibration not fully implemented")
+ print("Please use calibrate_manual() with corner points")
+ return False
+
+ def pixel_to_real(self, pixel_coords: Tuple[float, float]) -> Optional[Tuple[float, float]]:
+ """
+ Transform pixel coordinates to real-world court coordinates
+
+ Args:
+ pixel_coords: (x, y) tuple in pixel space
+
+ Returns:
+ (x, y) tuple in real-world meters, or None if not calibrated
+ """
+ if self.homography_matrix is None:
+ print("Error: Court not calibrated. Call calibrate_manual() first")
+ return None
+
+ # Convert to homogeneous coordinates
+ pixel_point = np.array([[pixel_coords[0], pixel_coords[1]]], dtype=np.float32)
+ pixel_point = pixel_point.reshape(-1, 1, 2)
+
+ # Apply homography transformation
+ real_point = cv2.perspectiveTransform(pixel_point, self.homography_matrix)
+
+ x, y = real_point[0][0]
+ return (float(x), float(y))
+
+ def real_to_pixel(self, real_coords: Tuple[float, float]) -> Optional[Tuple[float, float]]:
+ """
+ Transform real-world court coordinates to pixel coordinates
+
+ Args:
+ real_coords: (x, y) tuple in real-world meters
+
+ Returns:
+ (x, y) tuple in pixel space, or None if not calibrated
+ """
+ if self.homography_matrix is None:
+ print("Error: Court not calibrated")
+ return None
+
+ # Use inverse homography
+ inv_homography = np.linalg.inv(self.homography_matrix)
+
+ real_point = np.array([[real_coords[0], real_coords[1]]], dtype=np.float32)
+ real_point = real_point.reshape(-1, 1, 2)
+
+ pixel_point = cv2.perspectiveTransform(real_point, inv_homography)
+
+ x, y = pixel_point[0][0]
+ return (float(x), float(y))
+
+ def is_calibrated(self) -> bool:
+ """Check if court is calibrated"""
+ return self.homography_matrix is not None
+
+ def draw_court_overlay(self, frame: np.ndarray) -> np.ndarray:
+ """
+ Draw court boundaries on frame for visualization
+
+ Args:
+ frame: Input frame
+
+ Returns:
+ Frame with court overlay
+ """
+ if not self.is_calibrated():
+ return frame
+
+ overlay = frame.copy()
+
+ # Draw court corners
+ if self.court_corners_pixel is not None:
+ for point in self.court_corners_pixel:
+ cv2.circle(overlay, (int(point[0]), int(point[1])), 10, (0, 255, 0), -1)
+
+ # Draw court boundary lines
+ pts = self.court_corners_pixel.astype(np.int32).reshape((-1, 1, 2))
+ cv2.polylines(overlay, [pts], True, (0, 255, 0), 2)
+
+ return overlay
+
+ def save_calibration(self, filepath: str):
+ """Save calibration data to file"""
+ if not self.is_calibrated():
+ print("Error: No calibration to save")
+ return
+
+ data = {
+ 'court_width_m': self.court_width,
+ 'court_length_m': self.court_length,
+ 'homography_matrix': self.homography_matrix.tolist(),
+ 'court_corners_pixel': self.court_corners_pixel.tolist()
+ }
+
+ with open(filepath, 'w') as f:
+ json.dump(data, f, indent=2)
+
+ print(f"✓ Calibration saved to {filepath}")
+
+ def load_calibration(self, filepath: str) -> bool:
+ """Load calibration data from file"""
+ try:
+ with open(filepath, 'r') as f:
+ data = json.load(f)
+
+ self.court_width = data['court_width_m']
+ self.court_length = data['court_length_m']
+ self.homography_matrix = np.array(data['homography_matrix'])
+ self.court_corners_pixel = np.array(data['court_corners_pixel'])
+
+ print(f"✓ Calibration loaded from {filepath}")
+ return True
+
+ except Exception as e:
+ print(f"Error loading calibration: {e}")
+ return False
+
+
+class InteractiveCalibrator:
+ """
+ Interactive tool for manual court calibration
+ Click on 4 corners of the court in order: top-left, top-right, bottom-right, bottom-left
+ """
+
+ def __init__(self):
+ self.points = []
+ self.window_name = "Court Calibration - Click 4 corners (TL, TR, BR, BL)"
+
+ def _mouse_callback(self, event, x, y, flags, param):
+ """Handle mouse clicks"""
+ if event == cv2.EVENT_LBUTTONDOWN and len(self.points) < 4:
+ self.points.append((x, y))
+ print(f"Point {len(self.points)}: ({x}, {y})")
+
+ def calibrate_interactive(self, frame: np.ndarray) -> Optional[List[Tuple[float, float]]]:
+ """
+ Interactive calibration - user clicks 4 corners
+
+ Args:
+ frame: First frame of video to calibrate on
+
+ Returns:
+ List of 4 corner points, or None if cancelled
+ """
+ display = frame.copy()
+ cv2.namedWindow(self.window_name)
+ cv2.setMouseCallback(self.window_name, self._mouse_callback)
+
+ print("\nClick on 4 court corners in this order:")
+ print("1. Top-left")
+ print("2. Top-right")
+ print("3. Bottom-right")
+ print("4. Bottom-left")
+ print("Press 'q' to cancel, 'r' to reset")
+
+ while True:
+ # Draw points
+ temp = display.copy()
+ for i, point in enumerate(self.points):
+ cv2.circle(temp, point, 5, (0, 255, 0), -1)
+ cv2.putText(temp, str(i+1), (point[0]+10, point[1]),
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
+
+ # Draw lines between points
+ if len(self.points) > 1:
+ for i in range(len(self.points) - 1):
+ cv2.line(temp, self.points[i], self.points[i+1], (0, 255, 0), 2)
+
+ cv2.imshow(self.window_name, temp)
+ key = cv2.waitKey(1) & 0xFF
+
+ if key == ord('q'):
+ print("Calibration cancelled")
+ cv2.destroyAllWindows()
+ return None
+
+ if key == ord('r'):
+ print("Reset points")
+ self.points = []
+
+ if len(self.points) == 4:
+ print("\n✓ All 4 points selected")
+ cv2.destroyAllWindows()
+ return self.points
+
+ return None
diff --git a/src/video_processor.py b/src/video_processor.py
new file mode 100644
index 0000000..26b5796
--- /dev/null
+++ b/src/video_processor.py
@@ -0,0 +1,378 @@
+"""
+Main video processing pipeline
+Combines ball detection, court calibration, and tracking
+"""
+import cv2
+import numpy as np
+import json
+from pathlib import Path
+from typing import Dict, List, Optional, Tuple
+from tqdm import tqdm
+import time
+
+from .ball_detector import BallDetector
+from .court_calibrator import CourtCalibrator, InteractiveCalibrator
+from .ball_tracker import BallTracker
+
+
+class VideoProcessor:
+ """
+ Main pipeline for processing pickleball videos
+ """
+
+ def __init__(
+ self,
+ model_id: str = "pickleball-detection-1oqlw/1",
+ config_path: Optional[str] = None
+ ):
+ """
+ Initialize video processor
+
+ Args:
+ model_id: Roboflow model ID
+ config_path: Path to configuration JSON file
+ """
+ # Load config
+ self.config = self._load_config(config_path)
+
+ # Initialize components
+ self.detector = BallDetector(
+ model_id=model_id,
+ confidence_threshold=self.config['detection']['confidence_threshold'],
+ slice_enabled=self.config['detection']['slice_enabled'],
+ slice_height=self.config['detection']['slice_height'],
+ slice_width=self.config['detection']['slice_width']
+ )
+
+ self.calibrator = CourtCalibrator(
+ court_width_m=self.config['court']['width_m'],
+ court_length_m=self.config['court']['length_m']
+ )
+
+ self.tracker = BallTracker(
+ buffer_size=self.config['tracking']['buffer_size'],
+ max_distance_threshold=self.config['tracking']['max_distance_threshold']
+ )
+
+ self.video_path = None
+ self.cap = None
+ self.fps = None
+ self.total_frames = None
+
+ def _load_config(self, config_path: Optional[str]) -> Dict:
+ """Load configuration from JSON file"""
+ default_config = {
+ 'court': {
+ 'width_m': 6.1,
+ 'length_m': 13.4
+ },
+ 'detection': {
+ 'confidence_threshold': 0.4,
+ 'iou_threshold': 0.5,
+ 'slice_enabled': True,
+ 'slice_height': 320,
+ 'slice_width': 320
+ },
+ 'tracking': {
+ 'buffer_size': 10,
+ 'max_distance_threshold': 100
+ }
+ }
+
+ if config_path and Path(config_path).exists():
+ with open(config_path, 'r') as f:
+ config = json.load(f)
+ # Merge with defaults
+ for key in default_config:
+ if key not in config:
+ config[key] = default_config[key]
+ return config
+
+ return default_config
+
+ def load_video(self, video_path: str) -> bool:
+ """
+ Load video file
+
+ Args:
+ video_path: Path to video file
+
+ Returns:
+ True if successful, False otherwise
+ """
+ self.video_path = video_path
+ self.cap = cv2.VideoCapture(video_path)
+
+ if not self.cap.isOpened():
+ print(f"Error: Could not open video {video_path}")
+ return False
+
+ self.fps = self.cap.get(cv2.CAP_PROP_FPS)
+ self.total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
+
+ print(f"✓ Loaded video: {video_path}")
+ print(f" FPS: {self.fps}")
+ print(f" Total frames: {self.total_frames}")
+ print(f" Duration: {self.total_frames / self.fps:.2f} seconds")
+
+ return True
+
+ def calibrate_court(
+ self,
+ corner_points: Optional[List[Tuple[float, float]]] = None,
+ interactive: bool = False
+ ) -> bool:
+ """
+ Calibrate court for coordinate transformation
+
+ Args:
+ corner_points: Manual corner points [TL, TR, BR, BL], or None for interactive
+ interactive: If True, use interactive calibration tool
+
+ Returns:
+ True if calibration successful
+ """
+ if corner_points is not None:
+ return self.calibrator.calibrate_manual(corner_points)
+
+ if interactive:
+ # Get first frame
+ self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
+ ret, frame = self.cap.read()
+ if not ret:
+ print("Error: Could not read first frame")
+ return False
+
+ # Interactive calibration
+ calibrator = InteractiveCalibrator()
+ points = calibrator.calibrate_interactive(frame)
+
+ if points is None:
+ return False
+
+ return self.calibrator.calibrate_manual(points)
+
+ print("Warning: No calibration method specified")
+ print("Processing will continue without coordinate transformation")
+ return False
+
+ def process_video(
+ self,
+ output_path: Optional[str] = None,
+ save_visualization: bool = False,
+ visualization_path: Optional[str] = None,
+ start_frame: int = 0,
+ end_frame: Optional[int] = None
+ ) -> Dict:
+ """
+ Process video and extract ball trajectory
+
+ Args:
+ output_path: Path to save JSON results
+ save_visualization: If True, save video with annotations
+ visualization_path: Path to save visualization video
+ start_frame: Frame to start processing from
+ end_frame: Frame to end processing at (None = end of video)
+
+ Returns:
+ Dictionary with processing results
+ """
+ if self.cap is None:
+ raise ValueError("No video loaded. Call load_video() first")
+
+ # Reset tracker
+ self.tracker.reset()
+
+ # Set frame range
+ if end_frame is None:
+ end_frame = self.total_frames
+
+ # Setup visualization writer if needed
+ video_writer = None
+ if save_visualization:
+ if visualization_path is None:
+ visualization_path = str(Path(self.video_path).stem) + "_tracked.mp4"
+
+ frame_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+ frame_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
+ video_writer = cv2.VideoWriter(
+ visualization_path,
+ fourcc,
+ self.fps,
+ (frame_width, frame_height)
+ )
+
+ # Processing results
+ results = {
+ 'video_path': self.video_path,
+ 'fps': self.fps,
+ 'total_frames': end_frame - start_frame,
+ 'duration_sec': (end_frame - start_frame) / self.fps,
+ 'court': {
+ 'width_m': self.config['court']['width_m'],
+ 'length_m': self.config['court']['length_m'],
+ 'calibrated': self.calibrator.is_calibrated()
+ },
+ 'frames': []
+ }
+
+ # Process frames
+ self.cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame)
+
+ print(f"\nProcessing frames {start_frame} to {end_frame}...")
+ start_time = time.time()
+
+ for frame_num in tqdm(range(start_frame, end_frame)):
+ ret, frame = self.cap.read()
+ if not ret:
+ break
+
+ # Detect ball
+ detections = self.detector.detect(frame)
+
+ # Track ball
+ ball = self.tracker.update(detections, frame_num)
+
+ # Transform to real-world coordinates if calibrated
+ frame_data = {
+ 'frame_number': frame_num,
+ 'timestamp': frame_num / self.fps,
+ 'ball': None
+ }
+
+ if ball is not None:
+ pixel_coords = ball['center']
+ real_coords = None
+
+ if self.calibrator.is_calibrated():
+ real_coords = self.calibrator.pixel_to_real(pixel_coords)
+
+ frame_data['ball'] = {
+ 'detected': True,
+ 'pixel_coords': {
+ 'x': float(pixel_coords[0]),
+ 'y': float(pixel_coords[1])
+ },
+ 'real_coords_m': {
+ 'x': float(real_coords[0]) if real_coords else None,
+ 'y': float(real_coords[1]) if real_coords else None
+ } if real_coords else None,
+ 'confidence': float(ball['confidence']),
+ 'interpolated': ball.get('interpolated', False)
+ }
+
+ results['frames'].append(frame_data)
+
+ # Draw visualization if needed
+ if save_visualization:
+ vis_frame = self._draw_visualization(frame, ball, frame_num)
+ video_writer.write(vis_frame)
+
+ # Cleanup
+ if video_writer is not None:
+ video_writer.release()
+
+ processing_time = time.time() - start_time
+ results['processing_time_sec'] = processing_time
+ results['fps_processing'] = (end_frame - start_frame) / processing_time
+
+ print(f"\n✓ Processing complete!")
+ print(f" Time: {processing_time:.2f} seconds")
+ print(f" Speed: {results['fps_processing']:.2f} FPS")
+
+ # Save results
+ if output_path:
+ self.save_results(results, output_path)
+
+ return results
+
+ def _draw_visualization(
+ self,
+ frame: np.ndarray,
+ ball: Optional[Dict],
+ frame_num: int
+ ) -> np.ndarray:
+ """
+ Draw visualization on frame
+
+ Args:
+ frame: Input frame
+ ball: Ball detection
+ frame_num: Current frame number
+
+ Returns:
+ Frame with visualization
+ """
+ vis = frame.copy()
+
+ # Draw court overlay if calibrated
+ if self.calibrator.is_calibrated():
+ vis = self.calibrator.draw_court_overlay(vis)
+
+ # Draw ball trajectory
+ vis = self.tracker.draw_trajectory(vis)
+
+ # Draw current ball
+ if ball is not None:
+ center = ball['center']
+ color = (0, 255, 0) if not ball.get('interpolated', False) else (255, 0, 0)
+
+ # Draw bounding box
+ bbox = ball.get('bbox', self.tracker._estimate_bbox(center))
+ cv2.rectangle(
+ vis,
+ (int(bbox[0]), int(bbox[1])),
+ (int(bbox[2]), int(bbox[3])),
+ color,
+ 2
+ )
+
+ # Draw center point
+ cv2.circle(vis, (int(center[0]), int(center[1])), 5, color, -1)
+
+ # Draw confidence
+ if ball['confidence'] > 0:
+ cv2.putText(
+ vis,
+ f"{ball['confidence']:.2f}",
+ (int(center[0]) + 10, int(center[1]) - 10),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ 0.5,
+ color,
+ 2
+ )
+
+ # Draw frame number
+ cv2.putText(
+ vis,
+ f"Frame: {frame_num}",
+ (10, 30),
+ cv2.FONT_HERSHEY_SIMPLEX,
+ 1,
+ (255, 255, 255),
+ 2
+ )
+
+ return vis
+
+ def save_results(self, results: Dict, output_path: str):
+ """
+ Save processing results to JSON file
+
+ Args:
+ results: Results dictionary
+ output_path: Path to save JSON
+ """
+ output_path = Path(output_path)
+ output_path.parent.mkdir(parents=True, exist_ok=True)
+
+ with open(output_path, 'w') as f:
+ json.dump(results, f, indent=2)
+
+ print(f"✓ Results saved to {output_path}")
+
+ def close(self):
+ """Release video resources"""
+ if self.cap is not None:
+ self.cap.release()