Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 36 additions & 0 deletions scripts/detect-cuts.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
#!/bin/bash
# Detect scene changes / cut points in a video
# Usage: ./detect-cuts.sh <video_file> [threshold]
#
# threshold: 0.0-1.0, lower = more sensitive (default: 0.3)
# Output: timestamps where scene changes occur

VIDEO="${1:?Usage: $0 <video_file> [threshold]}"
THRESHOLD="${2:-0.3}"

if [ ! -f "$VIDEO" ]; then
echo "File not found: $VIDEO"
exit 1
fi

echo "Analyzing: $VIDEO"
echo "Threshold: $THRESHOLD (lower = more sensitive)"
echo ""
echo "Detecting scene changes..."
echo ""

# Use FFmpeg's select filter with scene detection
# This outputs timestamps where scene change score exceeds threshold
ffmpeg -i "$VIDEO" -vf "select='gt(scene,$THRESHOLD)',showinfo" -vsync vfr -f null - 2>&1 | \
grep showinfo | \
sed -n 's/.*pts_time:\([0-9.]*\).*/\1/p' | \
while read -r timestamp; do
# Convert to HH:MM:SS format
hours=$(echo "$timestamp / 3600" | bc)
mins=$(echo "($timestamp % 3600) / 60" | bc)
secs=$(echo "$timestamp % 60" | bc)
printf "%02d:%02d:%05.2f\n" "$hours" "$mins" "$secs"
done

echo ""
echo "Done!"
67 changes: 67 additions & 0 deletions scripts/detect-slide-changes.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
#!/bin/bash
# Detect major slide changes in the processed video's slide overlay area
# The slides appear as a 320px-height overlay in the bottom-right corner with 40px margin
# Usage: ./detect-slide-changes.sh <video_file> [threshold]
#
# threshold: 0.0-1.0, higher = only major changes (default: 0.3)

VIDEO="${1:?Usage: $0 <video_file> [threshold]}"
THRESHOLD="${2:-0.3}"

if [ ! -f "$VIDEO" ]; then
echo "File not found: $VIDEO"
exit 1
fi

# Get video dimensions
dimensions=$(ffprobe -v error -select_streams v:0 -show_entries stream=width,height -of csv=p=0 "$VIDEO" 2>/dev/null)
width=$(echo "$dimensions" | cut -d',' -f1)
height=$(echo "$dimensions" | cut -d',' -f2)

# The slide overlay is 320px tall, aspect ratio ~16:9, so roughly 569x320
# Positioned at bottom-right with 40px margin
# crop=w:h:x:y
crop_w=569
crop_h=320
crop_x=$((width - crop_w - 40))
crop_y=$((height - crop_h - 40))

echo "========================================"
echo "Analyzing: $(basename "$VIDEO")"
echo "Video size: ${width}x${height}"
echo "Monitoring: Slide overlay area (${crop_w}x${crop_h} at ${crop_x},${crop_y})"
echo "Threshold: $THRESHOLD"
echo "========================================"
echo ""

# Get video duration
duration=$(ffprobe -v error -show_entries format=duration -of csv=p=0 "$VIDEO" 2>/dev/null)
echo "Video duration: $(printf '%02d:%02d:%02d' $((${duration%.*}/3600)) $((${duration%.*}%3600/60)) $((${duration%.*}%60)))"
echo ""
echo "Detecting slide changes (this may take a while)..."
echo ""

# Crop to the slide overlay area, then detect scene changes
ffmpeg -i "$VIDEO" \
-vf "crop=${crop_w}:${crop_h}:${crop_x}:${crop_y},select='gt(scene,$THRESHOLD)',showinfo" \
-vsync vfr -f null - 2>&1 | \
grep showinfo | \
sed -n 's/.*pts_time:\([0-9.]*\).*/\1/p' | \
while read -r timestamp; do
hours=$(echo "$timestamp / 3600" | bc)
mins=$(echo "($timestamp % 3600) / 60" | bc)
secs=$(echo "$timestamp % 60" | bc)
printf "%02d:%02d:%05.2f\n" "$hours" "$mins" "$secs"
done | tee /tmp/slide_changes.txt

count=$(wc -l < /tmp/slide_changes.txt)
echo ""
echo "========================================"
echo "Found $count potential talk boundaries"
echo "========================================"

if [ "$count" -gt 0 ]; then
echo ""
echo "To preview each cut point:"
echo " ffmpeg -ss TIMESTAMP -i \"$VIDEO\" -vframes 1 -q:v 2 preview.jpg && img2sixel preview.jpg"
fi
44 changes: 44 additions & 0 deletions scripts/download-webdav.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
#!/bin/bash
# Download all files from a WebDAV server using rclone
# Usage: ./download-webdav.sh <webdav_url> <username> <password> [output_dir]
#
# Requires: rclone
# Install with: curl https://rclone.org/install.sh | sudo bash

set -e

WEBDAV_URL="${1:?Usage: $0 <webdav_url> <username> <password> [output_dir]}"
USERNAME="${2:?Username required}"
PASSWORD="${3:?Password required}"
OUTPUT_DIR="${4:-.}"

# Remove trailing slash from URL
WEBDAV_URL="${WEBDAV_URL%/}"

echo "Downloading from: $WEBDAV_URL"
echo "Output directory: $OUTPUT_DIR"

mkdir -p "$OUTPUT_DIR"

# Check if rclone is installed
if ! command -v rclone &> /dev/null; then
echo "rclone not found. Install with: curl https://rclone.org/install.sh | sudo bash"
exit 1
fi

# Use rclone with inline WebDAV config
# --webdav-url: WebDAV server URL
# --webdav-user: username
# --webdav-pass: password (obscured)
OBSCURED_PASS=$(rclone obscure "$PASSWORD")

rclone copy \
--webdav-url="$WEBDAV_URL" \
--webdav-user="$USERNAME" \
--webdav-pass="$OBSCURED_PASS" \
--progress \
--transfers=4 \
":webdav:/" \
"$OUTPUT_DIR"

echo "Download complete!"
60 changes: 60 additions & 0 deletions scripts/find-talks.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
#!/bin/bash
# Find talk boundaries in a conference video
# Looks for major scene changes combined with silence gaps
# Usage: ./find-talks.sh <video_file> [min_gap_seconds]
#
# min_gap_seconds: minimum silence duration to consider a boundary (default: 2)

VIDEO="${1:?Usage: $0 <video_file> [min_gap_seconds]}"
MIN_GAP="${2:-2}"

if [ ! -f "$VIDEO" ]; then
echo "File not found: $VIDEO"
exit 1
fi

echo "========================================"
echo "Analyzing: $(basename "$VIDEO")"
echo "Min silence gap: ${MIN_GAP}s"
echo "========================================"
echo ""

# Get video duration
duration=$(ffprobe -v error -show_entries format=duration -of csv=p=0 "$VIDEO" 2>/dev/null)
echo "Video duration: $(printf '%02d:%02d:%02d' $((${duration%.*}/3600)) $((${duration%.*}%3600/60)) $((${duration%.*}%60)))"
echo ""

# Detect silence periods (potential talk boundaries)
echo "Detecting silence gaps (this may take a while)..."
echo ""

tmpfile=$(mktemp /tmp/silence_XXXXXX.txt)
trap "rm -f $tmpfile" EXIT

# silencedetect finds periods of silence
# -50dB threshold, minimum duration of MIN_GAP seconds
ffmpeg -i "$VIDEO" -af "silencedetect=noise=-50dB:d=$MIN_GAP" -f null - 2>&1 | \
grep -E "silence_(start|end)" > "$tmpfile"

echo "Potential talk boundaries (silence gaps):"
echo "----------------------------------------"

# Parse silence start/end pairs
grep "silence_start" "$tmpfile" | while read -r line; do
start=$(echo "$line" | sed -n 's/.*silence_start: \([0-9.]*\).*/\1/p')
if [ -n "$start" ]; then
hours=$(echo "$start / 3600" | bc)
mins=$(echo "($start % 3600) / 60" | bc)
secs=$(echo "$start % 60" | bc)
printf " %02d:%02d:%05.2f\n" "$hours" "$mins" "$secs"
fi
done

echo ""
echo "========================================"
echo ""
echo "To preview a cut point, run:"
echo " ./preview.sh \"$VIDEO\" <timestamp>"
echo ""
echo "To extract a segment:"
echo " ffmpeg -ss START -to END -i \"$VIDEO\" -c copy output.mp4"
17 changes: 17 additions & 0 deletions scripts/preview.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/bin/bash
# Show a full-size preview of frame 0 of a video using img2sixel
# Usage: ./preview.sh <video_file>

VIDEO="${1:?Usage: $0 <video_file>}"

if [ ! -f "$VIDEO" ]; then
echo "File not found: $VIDEO"
exit 1
fi

tmpfile=$(mktemp /tmp/preview_XXXXXX.jpg)
trap "rm -f $tmpfile" EXIT

ffmpeg -y -ss 0 -i "$VIDEO" -vframes 1 -q:v 2 "$tmpfile" 2>/dev/null

img2sixel "$tmpfile"
163 changes: 163 additions & 0 deletions scripts/process-videos.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
#!/usr/bin/env python3
"""
Process all tagged videos using FFmpeg with background compositing.

Usage: ./process-videos.py [background_image] [output_dir]

Reads: ~/videos/quadrant-tags.json
Requires: ffmpeg
"""

import json
import os
import subprocess
import sys
from pathlib import Path


def get_crop(quadrant: str) -> str:
"""Get FFmpeg crop parameters for a quadrant."""
crops = {
"top-left": "1912:1072:4:4",
"top-right": "1912:1072:1924:4",
"bottom-left": "1912:1072:4:1084",
"bottom-right": "1912:1072:1924:1084",
}
if quadrant not in crops:
raise ValueError(f"Invalid quadrant: {quadrant}")
return crops[quadrant]


def build_filter(presenter_crop: str, slides_crop: str) -> str:
"""Build FFmpeg filter complex.

Output: composited video with slides large and presenter small in corner.
"""
return (
f"[1:v]scale=2560:1440[bg]; "
f"[0:v]crop={slides_crop}[slides_cropped]; "
f"[slides_cropped]scale=1920:1080[slides]; "
f"[0:v]crop={presenter_crop}[presenter_raw]; "
f"[presenter_raw]scale=-1:320[presenter]; "
f"[slides]scale=1920:1080[slides_s]; "
f"[bg][slides_s]overlay=(W-w)/2:(H-h)/2[base]; "
f"[base][presenter]overlay=x=W-w-40:y=H-h-40[outv]"
)


def process_video(input_path: str, output_path: str, bg_image: str,
presenter: str, slides: str) -> bool:
"""Process a single video with FFmpeg."""
try:
presenter_crop = get_crop(presenter)
slides_crop = get_crop(slides)
except ValueError as e:
print(f" Error: {e}")
return False

filter_complex = build_filter(presenter_crop, slides_crop)

cmd = [
"ffmpeg", "-y",
"-i", input_path,
"-i", bg_image,
"-filter_complex", filter_complex,
"-map", "[outv]",
"-map", "0:a?",
"-c:v", "libx264",
"-crf", "18",
"-preset", "veryfast",
"-threads", "0",
"-c:a", "copy",
output_path
]

try:
result = subprocess.run(
cmd,
capture_output=True,
text=True
)
if result.returncode != 0:
print(f" FFmpeg error: {result.stderr[-500:]}")
return False
return True
except Exception as e:
print(f" Error running FFmpeg: {e}")
return False


def main():
# Parse arguments
bg_image = sys.argv[1] if len(sys.argv) > 1 else os.path.expanduser("~/gpc-bg.png")
output_dir = sys.argv[2] if len(sys.argv) > 2 else os.path.expanduser("~/videos/processed")
tags_file = os.path.expanduser("~/videos/quadrant-tags.json")

# Check dependencies
if not Path(bg_image).exists():
print(f"Background image not found: {bg_image}")
sys.exit(1)

if not Path(tags_file).exists():
print(f"Tags file not found: {tags_file}")
print("Run tag-videos.sh first to create it")
sys.exit(1)

# Create output directory
Path(output_dir).mkdir(parents=True, exist_ok=True)

# Load tags
with open(tags_file) as f:
tags = json.load(f)

total = len(tags)
print("=" * 40)
print(f"Processing {total} video(s)")
print(f"Background: {bg_image}")
print(f"Output dir: {output_dir}")
print("=" * 40)
print()

# Process each video
for i, (filename, data) in enumerate(tags.items(), 1):
presenter = data["presenter"]
slides = data["slides"]
input_path = data["path"]

# Output filename (change extension to .mp4)
output_name = Path(filename).stem + ".mp4"
output_path = os.path.join(output_dir, output_name)

# Skip if already processed
if Path(output_path).exists():
print(f"[{i}/{total}] Skipping {filename} (already exists)")
continue

print(f"[{i}/{total}] Processing: {filename}")
print(f" Input: {input_path}")
print(f" Presenter: {presenter}")
print(f" Slides: {slides}")
print(f" Output: {output_path}")
print(" Running FFmpeg...")

if process_video(input_path, output_path, bg_image, presenter, slides):
print(" Done!")
else:
print(" FAILED!")
print()

print("=" * 40)
print("Processing complete!")
print(f"Output directory: {output_dir}")
print("=" * 40)

# Show summary
print()
print("Processed files:")
for f in Path(output_dir).glob("*.mp4"):
size_mb = f.stat().st_size / (1024 * 1024)
print(f" {f.name}: {size_mb:.1f} MB")


if __name__ == "__main__":
main()
Loading