Dataset Viewer
Duplicate
The dataset viewer is not available for this split.
Cannot extract the features (columns) for the split 'train' of the config 'default' of the dataset.
Error code:   FeaturesError
Exception:    ArrowInvalid
Message:      Schema at index 1 was different: 
export_version: int64
export_date: string
addon_version: string
session_count: int64
behavior_count: int64
total_tracks: int64
vs
version: int64
global_stats: struct<total_sessions: int64, successful_sessions: int64>
footage_classes: struct<SD_30fps_AUTO: struct<sample_count: int64, success_count: int64, avg_success_rate: double, settings_history: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, solve_error: double, success_rate: double>>, experiences: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, outcome: string, reward: double, solve_error: double, bundle_count: int64, failure_type: null, success_rate: double>>, best_settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>>, HD_30fps_AUTO: struct<sample_count: int64, success_count: int64, avg_success_rate: double, settings_history: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, solve_error: double, success_rate: double>>, experiences: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, outcome: string, reward: double, solve_error: double, bundle_count: int64, failure_type: null, success_rate: double>>, best_settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, user_templates: struct<last_updated: int64, success_rate: double, regions: struct<mid-left: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, center: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, mid-right: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, top-center: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, top-left: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, top-right: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>>, overall: struct<avg_pattern_size: double, avg_search_size: double, avg_correlation: double, avg_velocity: double>>>, 4K_24fps_AUTO: struct<sample_count: int64, success_count: int64, avg_success_rate: double, settings_history: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double>, solve_error: double, success_rate: double>>, experiences: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double>, outcome: string, reward: double, solve_error: double, bundle_count: int64, failure_type: null, success_rate: double>>, best_settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double>, user_templates: struct<last_updated: int64, success_rate: double, regions: struct<mid-left: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, center: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, mid-right: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, bottom-center: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, bottom-right: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, bottom-left: struct<template_count: int64, success_count: int64, success_rate: double>, top-center: struct<template_count: int64, success_count: int64, success_rate: double>, top-left: struct<template_count: int64, success_count: int64, success_rate: double>>, overall: struct<avg_pattern_size: double, avg_search_size: double, avg_correlation: double, avg_velocity: double>>>, 4K_24fps_DRONE: struct<sample_count: int64, success_count: int64, avg_success_rate: double, settings_history: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, solve_error: double, success_rate: double>>, experiences: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, outcome: string, reward: double, solve_error: double, bundle_count: int64, failure_type: null, success_rate: double>>, best_settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, user_templates: struct<last_updated: int64, success_rate: double, regions: struct<mid-left: struct<template_count: int64, success_count: int64, success_rate: double>, top-left: struct<template_count: int64, success_count: int64, success_rate: double>, center: struct<template_count: int64, success_count: int64, success_rate: double>, mid-right: struct<template_count: int64, success_count: int64, success_rate: double>, bottom-center: struct<template_count: int64, success_count: int64, success_rate: double>, bottom-left: struct<template_count: int64, success_count: int64, success_rate: double>>, overall: struct<>>>, SD_24fps_AUTO: struct<sample_count: int64, success_count: int64, avg_success_rate: double, settings_history: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double>, solve_error: double, success_rate: double>>, experiences: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double>, outcome: string, reward: double, solve_error: double, bundle_count: int64, failure_type: null, success_rate: double>>, best_settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double>>>
region_models: struct<top-left: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, top-center: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, top-right: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, mid-left: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, center: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, mid-right: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, bottom-left: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, bottom-center: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, bottom-right: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>>
failure_patterns: struct<>
Traceback:    Traceback (most recent call last):
                File "/src/services/worker/src/worker/job_runners/split/first_rows.py", line 243, in compute_first_rows_from_streaming_response
                  iterable_dataset = iterable_dataset._resolve_features()
                                     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 3496, in _resolve_features
                  features = _infer_features_from_batch(self.with_format(None)._head())
                                                        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 2257, in _head
                  return next(iter(self.iter(batch_size=n)))
                         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 2461, in iter
                  for key, example in iterator:
                                      ^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 1952, in __iter__
                  for key, pa_table in self._iter_arrow():
                                       ^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 1974, in _iter_arrow
                  yield from self.ex_iterable._iter_arrow()
                File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 531, in _iter_arrow
                  yield new_key, pa.Table.from_batches(chunks_buffer)
                                 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "pyarrow/table.pxi", line 5039, in pyarrow.lib.Table.from_batches
                File "pyarrow/error.pxi", line 155, in pyarrow.lib.pyarrow_internal_check_status
                File "pyarrow/error.pxi", line 92, in pyarrow.lib.check_status
              pyarrow.lib.ArrowInvalid: Schema at index 1 was different: 
              export_version: int64
              export_date: string
              addon_version: string
              session_count: int64
              behavior_count: int64
              total_tracks: int64
              vs
              version: int64
              global_stats: struct<total_sessions: int64, successful_sessions: int64>
              footage_classes: struct<SD_30fps_AUTO: struct<sample_count: int64, success_count: int64, avg_success_rate: double, settings_history: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, solve_error: double, success_rate: double>>, experiences: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, outcome: string, reward: double, solve_error: double, bundle_count: int64, failure_type: null, success_rate: double>>, best_settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>>, HD_30fps_AUTO: struct<sample_count: int64, success_count: int64, avg_success_rate: double, settings_history: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, solve_error: double, success_rate: double>>, experiences: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, outcome: string, reward: double, solve_error: double, bundle_count: int64, failure_type: null, success_rate: double>>, best_settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, user_templates: struct<last_updated: int64, success_rate: double, regions: struct<mid-left: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, center: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, mid-right: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, top-center: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, top-left: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, top-right: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>>, overall: struct<avg_pattern_size: double, avg_search_size: double, avg_correlation: double, avg_velocity: double>>>, 4K_24fps_AUTO: struct<sample_count: int64, success_count: int64, avg_success_rate: double, settings_history: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double>, solve_error: double, success_rate: double>>, experiences: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double>, outcome: string, reward: double, solve_error: double, bundle_count: int64, failure_type: null, success_rate: double>>, best_settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double>, user_templates: struct<last_updated: int64, success_rate: double, regions: struct<mid-left: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, center: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, mid-right: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, bottom-center: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, bottom-right: struct<template_count: int64, success_count: int64, success_rate: double, recommended: struct<pattern_size: int64, search_size: int64, correlation: double>>, bottom-left: struct<template_count: int64, success_count: int64, success_rate: double>, top-center: struct<template_count: int64, success_count: int64, success_rate: double>, top-left: struct<template_count: int64, success_count: int64, success_rate: double>>, overall: struct<avg_pattern_size: double, avg_search_size: double, avg_correlation: double, avg_velocity: double>>>, 4K_24fps_DRONE: struct<sample_count: int64, success_count: int64, avg_success_rate: double, settings_history: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, solve_error: double, success_rate: double>>, experiences: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, outcome: string, reward: double, solve_error: double, bundle_count: int64, failure_type: null, success_rate: double>>, best_settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double, motion_model: string>, user_templates: struct<last_updated: int64, success_rate: double, regions: struct<mid-left: struct<template_count: int64, success_count: int64, success_rate: double>, top-left: struct<template_count: int64, success_count: int64, success_rate: double>, center: struct<template_count: int64, success_count: int64, success_rate: double>, mid-right: struct<template_count: int64, success_count: int64, success_rate: double>, bottom-center: struct<template_count: int64, success_count: int64, success_rate: double>, bottom-left: struct<template_count: int64, success_count: int64, success_rate: double>>, overall: struct<>>>, SD_24fps_AUTO: struct<sample_count: int64, success_count: int64, avg_success_rate: double, settings_history: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double>, solve_error: double, success_rate: double>>, experiences: list<item: struct<settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double>, outcome: string, reward: double, solve_error: double, bundle_count: int64, failure_type: null, success_rate: double>>, best_settings: struct<pattern_size: int64, search_size: int64, correlation: double, threshold: double>>>
              region_models: struct<top-left: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, top-center: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, top-right: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, mid-left: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, center: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, mid-right: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, bottom-left: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, bottom-center: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>, bottom-right: struct<total_tracks: int64, successful_tracks: int64, avg_lifespan: double>>
              failure_patterns: struct<>

Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.

πŸ§ͺ AutoSolve Research Dataset (Beta)

Community-driven telemetry for 3D Camera Tracking

This dataset collects anonymized tracking sessions from the AutoSolve Blender Addon. It trains an adaptive learning system that predicts optimal tracking settings (Search Size, Pattern Size, Motion Models) based on footage characteristics.


🀝 How to Contribute

Your data makes AutoSolve smarter for everyone.

Step 1: Export from Blender

  1. Open Blender and go to the Movie Clip Editor.
  2. In the AutoSolve panel, find the Research Beta sub-panel.
  3. Click Export (exports as autosolve_telemetry_YYYYMMDD_HHMMSS.zip).

Step 2: Upload Here

  1. Click the "Files and versions" tab at the top of this page.
  2. Click "Add file" β†’ "Upload file". (You need to be Logged-In to HuggingFace to upload)
  3. Drag and drop your .zip file.
  4. (Optional) Add a brief description: e.g., "10 drone shots, 4K 30fps, outdoor"
  5. Click "Commit changes" (creates a Pull Request).

Note: Contributions are reviewed before merging to ensure data quality and privacy compliance.

Step 3: Join the Community

Have questions or want to discuss your contributions?

Discord: Join our community
Documentation: Full contribution guide


πŸ“Š Dataset Structure

Each ZIP file contains anonymized numerical telemetry:

1. Session Records (/sessions/*.json)

Individual tracking attempts with complete metrics.

What's Included:

  • Footage Metadata: Resolution, FPS, Frame Count
  • Settings Used: Pattern Size, Search Size, Correlation, Motion Model
  • Results: Solve Error, Bundle Count, Success/Failure
  • Camera Intrinsics: Focal Length, Sensor Size, Distortion Coefficients (K1, K2, K3)
  • Motion Analysis: Motion Class (LOW/MEDIUM/HIGH), Parallax Score, Velocity Statistics
  • Feature Density: Count of trackable features per 9-grid region (from Blender's detect_features)
  • Time Series: Per-frame active tracks, dropout rates, velocity profiles
  • Track Lifecycle: Per-marker survival, jitter, reprojection error
  • Track Healing: Anchor tracks, healing attempts, gap interpolation results
  • Track Averaging: Merged segment counts

Example Session:

{
  "schema_version": 1,
  "timestamp": "2025-12-12T10:30:00",
  "resolution": [1920, 1080],
  "fps": 30,
  "frame_count": 240,
  "settings": {
    "pattern_size": 17,
    "search_size": 91,
    "correlation": 0.68,
    "motion_model": "LocRot"
  },
  "success": true,
  "solve_error": 0.42,
  "bundle_count": 45,
  "motion_class": "MEDIUM",
  "visual_features": {
    "feature_density": {
      "center": 12,
      "top-left": 8,
      "top-right": 6
    },
    "motion_magnitude": 0.015,
    "edge_density": {
      "center": 0.85,
      "top-left": 0.42
    }
  }
  "healing_stats": {
    "candidates_found": 5,
    "heals_attempted": 3,
    "heals_successful": 2,
    "avg_gap_frames": 15.0
  }
}

2. Behavior Records (/behavior/*.json)

THE KEY LEARNING DATA - How experts improve tracking.

What's Captured:

  • Track Additions: πŸ”‘ Which markers users manually add (region, position, quality)
  • Track Deletions: Which markers users remove (region, lifespan, error, reason)
  • Settings Adjustments: Which parameters users changed (before/after values)
  • Re-solve Results: Whether user changes improved solve error
  • Marker Refinements: Manual position adjustments
  • Net Track Change: How many tracks were added vs removed
  • Region Reinforcement: Which regions pros manually populated

Purpose: Teaches the AI how experts improve tracking, not just cleanup.

Example Behavior:

{
  "schema_version": 1,
  "clip_fingerprint": "a7f3c89b2e71d6f0",
  "contributor_id": "x7f2k9a1",
  "iteration": 3,
  "track_additions": [
    {
      "track_name": "Track.042",
      "region": "center",
      "initial_frame": 45,
      "position": [0.52, 0.48],
      "lifespan_achieved": 145,
      "had_bundle": true,
      "reprojection_error": 0.32
    }
  ],
  "track_deletions": [
    {
      "track_name": "Track.003",
      "region": "top-right",
      "lifespan": 12,
      "had_bundle": false,
      "reprojection_error": 2.8,
      "inferred_reason": "high_error"
    }
  ],
  "net_track_change": 3,
  "region_additions": { "center": 2, "bottom-center": 1 },
  "re_solve": {
    "attempted": true,
    "error_before": 0.87,
    "error_after": 0.42,
    "improvement": 0.45,
    "improved": true
  }
}

3. Model State (model.json)

The user's local statistical model state showing learned patterns.


πŸ“‹ What Gets Collected

Each contribution includes:

βœ… Numerical Metrics

  • Tracking settings that worked (or failed)
  • Motion analysis (velocity, direction, parallax)
  • Per-track survival and quality metrics
  • Feature density counts per region

βœ… Camera Characteristics

  • Focal length and sensor size
  • Lens distortion coefficients
  • Principal point coordinates

βœ… Time Series Data

  • Per-frame active track counts
  • Track dropout rates
  • Velocity profiles over time

πŸ”’ Data Privacy & Ethics

We take privacy seriously. This dataset contains numerical telemetry only.

❌ NOT Collected:

  • Images, video frames, or pixel data
  • File paths or project names
  • User identifiers (IPs, usernames, emails)
  • System information

βœ… Only Collected:

  • Resolution, FPS, frame count
  • Mathematical motion vectors
  • Tracking settings and success metrics
  • Feature density counts (not actual features)

For complete schema documentation, see TRAINING_DATA.md


πŸ›  Usage for Researchers

This data is ideal for training models related to:

Hyperparameter Optimization

Predicts optimal tracking settings (Search Size, Pattern Size, Correlation, Motion Models) based on footage characteristics and motion analysis.

Outlier Detection

Identifying "bad" 2D tracks before camera solve using lifecycle and jitter patterns.

Motion Classification

Classifying camera motion types (Drone, Handheld, Tripod) from sparse optical flow and feature density.

Temporal Modeling

Predicting track dropout using RNN/LSTM trained on per-frame time series data.


πŸ’» Loading the Dataset

Python Example

import json
import zipfile
from pathlib import Path
from collections import defaultdict

# Load a contributed ZIP
zip_path = Path('autosolve_telemetry_20251212_103045.zip')

with zipfile.ZipFile(zip_path, 'r') as zf:
    # Read manifest
    manifest = json.loads(zf.read('manifest.json'))
    print(f"Export Version: {manifest['export_version']}")
    print(f"Sessions: {manifest['session_count']}")
    print(f"Behaviors: {manifest['behavior_count']}")

    # Load all sessions
    sessions = []
    for filename in zf.namelist():
        if filename.startswith('sessions/') and filename.endswith('.json'):
            session_data = json.loads(zf.read(filename))
            sessions.append(session_data)

    # Analyze by footage class
    by_class = defaultdict(list)
    for s in sessions:
        width = s['resolution'][0]
        fps = s['fps']
        motion = s.get('motion_class', 'MEDIUM')
        cls = f"{'HD' if width >= 1920 else 'SD'}_{int(fps)}fps_{motion}"
        by_class[cls].append(s['success'])

    # Success rates per class
    print("\nSuccess Rates by Footage Class:")
    for cls, results in sorted(by_class.items()):
        rate = sum(results) / len(results)
        print(f"  {cls}: {rate:.1%} ({len(results)} sessions)")

Feature Extraction Example

# Extract feature density patterns
feature_densities = []
for session in sessions:
    vf = session.get('visual_features', {})
    density = vf.get('feature_density', {})
    if density:
        feature_densities.append({
            'motion_class': session.get('motion_class'),
            'center': density.get('center', 0),
            'edges': sum([
                density.get('top-left', 0),
                density.get('top-right', 0),
                density.get('bottom-left', 0),
                density.get('bottom-right', 0)
            ]) / 4,
            'success': session['success']
        })

# Analyze: Do edge-heavy clips succeed more?
import pandas as pd
df = pd.DataFrame(feature_densities)
print(df.groupby('success')['edges'].mean())

πŸ“ˆ Dataset Statistics

Current Status: Beta Collection Phase

Target:

  • 100+ unique footage types
  • 500+ successful tracking sessions
  • Diverse motion classes and resolutions

Contribute to help us reach production-ready dataset size! πŸš€


πŸ“– Citation

If you use this dataset in your research, please cite:

@misc{autosolve-telemetry-2025,
  title={AutoSolve Telemetry: Community-Driven Camera Tracking Dataset},
  author={Bin Shahid, Usama},
  year={2025},
  publisher={HuggingFace},
  url={https://huggingface.co/datasets/UsamaSQ/autosolve-telemetry}
}

🀝 Community & Support

Repository: GitHub.com/UsamaSQ/AutoSolve
Discord: Join our community
Maintainer: Usama Bin Shahid

Your contributions make AutoSolve better for everyone! πŸ™

Downloads last month
17