Compare commits

...

6 Commits
main ... main

Author SHA1 Message Date
L3D fbe67bb400
Adding render chunks 2025-03-01 19:21:47 +01:00
L3D 85babf1f4b
Adjust timings 2025-02-26 00:20:11 +01:00
L3D 0496d64321
Generate a README 2025-02-25 23:29:54 +01:00
L3D 6c6222730f
Adding into outro generator using images 2025-02-25 23:22:06 +01:00
L3D ac0f1b5a4e
Improve Filepath 2025-02-25 21:17:58 +01:00
L3D 4b77738ba8
Prepare Winterkongress Rendering 2025-02-25 21:13:51 +01:00
8 changed files with 169 additions and 136 deletions

View File

@ -1,6 +1,7 @@
MIT License
Copyright (c) 2021 Chaos-West TV
Copyright (c) 2025 L3D <l3d@winkaktze.tv>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

54
README.md Normal file
View File

@ -0,0 +1,54 @@
Video Release Tooling
=======================
## Ein Tool zur verarbeitung vom Streamdump zum media.ccc.de Release
### 📝 Beschreibung
Dieses Skript hilft dabei, automatisch das richtige Intro- und Outro-File sowie die passenden Teile einer chunked Aufnahme auszuwählen (z. B. OBS mit automatischem Split alle 5 Minuten).
**Features:**
- Automatische Auswahl der relevanten Videodateien
- Vorschau der ersten und letzten Datei in `mpv`
- Manuelle Eingabe der Start- und Endpunkte zur Feinanpassung
- Automatische Zusammenfügung mit Intro und Outro
- Audio-Normalisierung für das finale Video
Es wird angenommen, dass Intros und Outros als Bilder oder Video verfügbar sind.
Außerdem wurde der Stream in Snippets recorded.
### 🛠️ Voraussetzungen
Das Skript setzt die folgenden Abhängigkeiten voraus:
- `ffmpeg`
- `fzf`
- `mpv` (optional, für die Vorschau)
### ⚙️ Installation
Stelle sicher, dass `ffmpeg` und `fzf` installiert sind:
```bash
sudo apt install ffmpeg fzf # Debian/Ubuntu
brew install ffmpeg fzf # macOS (Homebrew)
yay -S ffmpeg fzf # Arch Linux (AUR)
```
### 🛠️ Nutzung
1. Bereite die Ordner mit Intos, Outros und Snippets vor
2. Führe das Skript aus:
```bash
./render-chunks.sh
```
3. Wähle intro, outro und die releavanten Videodateien aus.
3. Nutze `mpv`, um die Start- und Endpunkte zu bestimmen.
4. Gib diese Werte im Skript an.
5. Warte, bis das finale Video gerendert wurde.
### 👨👩👦 Autoren
- thunfisch
- iiidefix
- L3D
### ⚖️ Lizenz
Dieses Projekt steht unter der **[MIT-Lizenz](LICENSE)**.

View File

@ -1,23 +0,0 @@
#!/bin/bash
set -euxo pipefail
FONT='/opt/comic.ttf'
STREAM="${1:-cwtv}"
STREAM_OUT_SUFFIX="monitor"
SRT_SRC_HOST="srt://ingest${2:-}.c3voc.de:1337"
SRT_DST_HOST="srt://127.0.0.1:1337"
ffmpeg -y -i "${SRT_SRC_HOST}?streamid=play/$STREAM" \
-nostats -progress "/tmp/ffmpeg/${STREAM}_${STREAM_OUT_SUFFIX}" -loglevel repeat+level+info \
-filter_complex \
"[0:v:0]scale=640x360[orig_scaled];\
[orig_scaled]drawtext=fontfile=$FONT:text=$STREAM:fontcolor=white:fontsize=100:box=1:boxcolor=black@0.5:boxborderw=5:x=(w-text_w)/2:y=(h-text_h)/1.5[preview_overlay];\
[0:a:0]ebur128=video=1:size=640x480:meter=9:target=-16:gauge=shortterm[native][native_a]; [native_a]anullsink; \
[native]drawtext=fontfile=$FONT:text=native:fontcolor=white:fontsize=60:box=1:boxcolor=black@0.5:boxborderw=5:x=(w-text_w)/2:y=(h-text_h)/1.5[native_overlay];\
[0:a:0]showvolume=r=50:w=640:h=60:b=0:ds=log:dm=1.0[native_vu]; \
[preview_overlay][native_vu][native_overlay]vstack=inputs=3[full_mix]; \
[full_mix]framerate=fps=10[out]" \
-map "[out]" \
-g 30 \
-c:v libx264 -preset ultrafast -f mpegts "${SRT_DST_HOST}?streamid=publish/${STREAM}_${STREAM_OUT_SUFFIX}"

View File

@ -1,31 +0,0 @@
#!/bin/bash
set -euxo pipefail
FONT='/opt/comic.ttf'
STREAM="${1:-cwtv}"
STREAM_OUT_SUFFIX="monitor"
SRT_SRC_HOST="srt://ingest${2:-}.c3voc.de:1337"
SRT_DST_HOST="srt://127.0.0.1:1337"
ffmpeg -y -i "${SRT_SRC_HOST}?streamid=play/$STREAM" \
-nostats -progress "/tmp/ffmpeg/${STREAM}_${STREAM_OUT_SUFFIX}" -loglevel repeat+level+info \
-filter_complex \
"[0:v:0]scale=640x360[orig_scaled]; \
[0:a:0]ebur128=video=1:size=640x480:meter=9:target=-16:gauge=shortterm[native][native_a]; [native_a]anullsink; \
[0:a:1]ebur128=video=1:size=640x480:meter=9:target=-16:gauge=shortterm[lingo1][lingo1_a]; [lingo1_a]anullsink; \
[0:a:2]ebur128=video=1:size=640x480:meter=9:target=-16:gauge=shortterm[lingo2][lingo2_a]; [lingo2_a]anullsink; \
[native]drawtext=fontfile=$FONT:text=native:fontcolor=white:fontsize=60:box=1:boxcolor=black@0.5:boxborderw=5:x=(w-text_w)/2:y=(h-text_h)/1.5[native_overlay];\
[lingo1]drawtext=fontfile=$FONT:text=lingo1:fontcolor=white:fontsize=60:box=1:boxcolor=black@0.5:boxborderw=5:x=(w-text_w)/2:y=(h-text_h)/1.5[lingo1_overlay];\
[lingo2]drawtext=fontfile=$FONT:text=lingo2:fontcolor=white:fontsize=60:box=1:boxcolor=black@0.5:boxborderw=5:x=(w-text_w)/2:y=(h-text_h)/1.5[lingo2_overlay];\
[0:a:0]showvolume=r=50:w=640:h=20:b=0:ds=log:dm=1.0[native_vu]; \
[0:a:1]showvolume=r=50:w=640:h=20:b=0:ds=log:dm=1.0[lingo1_vu]; \
[0:a:2]showvolume=r=50:w=640:h=20:b=0:ds=log:dm=1.0[lingo2_vu]; \
[orig_scaled][native_vu][lingo1_vu][lingo2_vu]vstack=inputs=4[left_top]; \
[left_top]drawtext=fontfile=$FONT:text=$STREAM:fontcolor=white:fontsize=100:box=1:boxcolor=black@0.5:boxborderw=5:x=(w-text_w)/2:y=(h-text_h)/1.5[left_top_overlay];\
[left_top_overlay][native_overlay]vstack=inputs=2[left]; \
[lingo1_overlay][lingo2_overlay]vstack=inputs=2[right]; \
[left][right]hstack=inputs=2[full_mix];[full_mix]framerate=fps=10[out]" \
-map "[out]" \
-g 30 \
-c:v libx264 -preset ultrafast -f mpegts "${SRT_DST_HOST}?streamid=publish/${STREAM}_${STREAM_OUT_SUFFIX}"

View File

@ -1,28 +0,0 @@
#!/bin/sh
set -euxo pipefail
SOURCE="${1:-pgm}"
STREAM="${2:-cwtv}"
KEY_PLAY=$(cat /opt/srt_play)
KEY_PUBLISH=$(cat /opt/srt_publish)
mkdir -p "/opt/hls/data/$STREAM"
ffmpeg -y -i "srt://*****?streamid=play/${SOURCE}/$KEY_PLAY" \
-nostats -progress "/tmp/ffmpeg/${STREAM}_hls" -loglevel repeat+level\
-map 0:v:0 -map 0:a:0 \
-map 0:v:0 -map 0:a:0 \
-map 0:v:0 -map 0:a:0 \
-map 0:a:0 -map 0:a:1 -map 0:a:2 \
-g 75 \
-c:v libx264 -crf 21 -c:a aac -ar 48000 -preset fast \
-maxrate:v:0 6000k -b:a:0 192k \
-filter:v:1 scale=w=1280:h=-2 -maxrate:v:1 2500k -b:a:1 192k \
-filter:v:2 scale=w=848:h=-2 -maxrate:v:2 1000k -b:a:2 192k \
-b:a:3 192k -b:a:4 192k -b:a:5 192k \
-var_stream_map "v:0,a:0,name:1080p,agroup:main v:1,a:1,name:720p,agroup:main v:2,a:2,name:480p,agroup:main a:3,agroup:main,name:native,default:yes,language:Native a:4,agroup:main,name:lingo1,language:Translation_1 a:5,agroup:main,name:lingo2,language:Translation_2" \
-f hls -hls_time 6 -hls_list_size 100 -master_pl_publish_rate 5 -hls_flags +delete_segments+append_list+omit_endlist+independent_segments+temp_file -hls_allow_cache 0 \
-hls_start_number_source epoch -strftime 1 -hls_segment_filename "/opt/hls/data/$STREAM/%v-%Y%m%d-%s.ts" \
-master_pl_name "main.m3u8" "/opt/hls/data/$STREAM/%v.m3u8" \
-vf fps=1 -update 1 "/opt/hls/data/$STREAM/poster.png"

View File

@ -1,30 +0,0 @@
#!/bin/sh
set -euxo pipefail
FONT='/opt/comic.ttf'
STREAM="${1:-pgm}"
STREAM_OUT_SUFFIX="${2:-monitor}"
KEY_PLAY=$(cat /opt/srt_play)
KEY_PUBLISH=$(cat /opt/srt_publish)
SRT_HOST="srt://srt.mon2.de:20000"
ffmpeg -y -i "${SRT_HOST}?streamid=play/$STREAM/${KEY_PLAY}" \
-nostats -progress "/tmp/ffmpeg/${STREAM}_${STREAM_OUT_SUFFIX}" -loglevel repeat+level+info \
-filter_complex \
"[0:a:0]ebur128=video=1:meter=18:target=-14[native][native_a]; [native_a]anullsink; \
[native]drawtext=fontfile=$FONT:text=native:fontcolor=white:fontsize=60:box=1:boxcolor=black@0.5:boxborderw=5:x=(w-text_w)/2:y=(h-text_h)/1.5[native_overlay];\
[0:a:0]showvolume=r=50:w=640:h=20:b=0:ds=log:dm=1.0[native_vu]; \
[0:a:1]ebur128=video=1:meter=18:target=-14[lingo1][lingo1_a]; [lingo1_a]anullsink; \
[lingo1]drawtext=fontfile=$FONT:text=lingo1:fontcolor=white:fontsize=60:box=1:boxcolor=black@0.5:boxborderw=5:x=(w-text_w)/2:y=(h-text_h)/1.5[lingo1_overlay];\
[0:a:1]showvolume=r=50:w=640:h=20:b=0:ds=log:dm=1.0[lingo1_vu]; \
[0:a:2]ebur128=video=1:meter=18:target=-14[lingo2][lingo2_a]; [lingo2_a]anullsink; \
[lingo2]drawtext=fontfile=$FONT:text=lingo2:fontcolor=white:fontsize=60:box=1:boxcolor=black@0.5:boxborderw=5:x=(w-text_w)/2:y=(h-text_h)/1.5[lingo2_overlay];\
[0:a:2]showvolume=r=50:w=640:h=20:b=0:ds=log:dm=1.0[lingo2_vu]; \
[native_overlay][lingo1_overlay][lingo2_overlay]hstack=inputs=3[ebu_mix]; \
[0:v:0]drawtext=fontfile=$FONT:text=$STREAM:fontcolor=white:fontsize=100:box=1:boxcolor=black@0.5:boxborderw=5:x=(w-text_w)/2:y=(h-text_h)/1.5[preview_overlay];\
[native_vu][lingo1_vu][lingo2_vu]hstack=inputs=3[vu_mix]; \
[preview_overlay][vu_mix][ebu_mix]vstack=inputs=3[full_mix];[full_mix]framerate=fps=25[out]" \
-map "[out]" \
-g 75 \
-c:v libx264 -preset ultrafast -f mpegts "${SRT_HOST}?streamid=publish/${STREAM}_${STREAM_OUT_SUFFIX}/${KEY_PUBLISH}"

View File

@ -16,16 +16,24 @@ set -euo pipefail
# After that the script will render the introfile with the first chunk and the
# last chunk with the outrofile, and then in the last step assemble everything
# into the final recording with audio normalization.
# It will also assume, that the intros and outros are pictures...
# This script requires you to have ffmpeg and fzf installed.
which ffmpeg >/dev/null || (echo "Please install ffmpeg" ; exit 1)
which fzf >/dev/null || (echo "Please install fzf" ; exit 1)
which mpv >/dev/null || (echo "Please install mpv" ; exit 1)
INTROS_PATH="/Users/j/dhcp24_voc_files/intros"
OUTROS_PATH="/Users/j/dhcp24_voc_files/outros"
CHUNKS_PATH="/Users/j/voc_obs/obs_record"
OUTPUT_PATH="/Users/j/voc_obs/obs_record/rendered"
INTROS_PATH="$HOME/Dokumente/syncthing/intros"
OUTROS_PATH="$HOME/Dokumente/syncthing/outros"
CHUNKS_PATH="$HOME/Dokumente/syncthing/"
OUTPUT_PATH="$HOME/Videos/WK25/rendered"
# temp dir
WORKDIR=$(mktemp -d)
function finish {
rm -r "$WORKDIR"
}
trap finish EXIT
# Select the appropriate files
SELECTED_INTRO="$(find "$INTROS_PATH" -type f | sort --reverse --human-numeric-sort | fzf --delimiter / --with-nth -1 --prompt "Intro File:")"
@ -37,6 +45,43 @@ FOOBARWTF="$(basename "$SELECTED_INTRO")"
DEFAULT_OUTPUT_NAME="${FOOBARWTF%.*}"
read -p "Please enter a name for the outputfile (path and extension will be added automatically): " -i "$DEFAULT_OUTPUT_NAME" -e OUTPUT_NAME
# Function to convert image to video with audio
convert_image_to_video() {
local image_file="$1"
local output_video="$2"
local duration="$3"
local fade_in="$4"
local fade_out="$5"
if [[ $fade_in == 0 ]]
then
ffmpeg -loop 1 \
-framerate 50 \
-t "$duration" \
-i "$image_file" -f lavfi \
-i anullsrc=r=48000:cl=stereo \
-vf "fade=t=out:st=$(($duration - $fade_out)):d=$fade_out,format=pix_fmts=yuv420p,fps=50" \
-c:a aac -b:a 192k \
-c:v libx264 -threads 0 -pix_fmt yuv420p -crf 18 \
-profile:v high -level 4.1 -disposition default -color_range tv \
-metadata:s:a:0 language=native \
-t "$duration" "$output_video"
else
ffmpeg -loop 1 \
-framerate 50 \
-t "$duration" \
-i "$image_file" -f lavfi \
-i anullsrc=r=48000:cl=stereo \
-filter_complex "fade=t=in:st=0:d=$fade_in,format=pix_fmts=yuv420p,fps=50" \
-c:a aac -b:a 192k \
-c:v libx264 -threads 0 -pix_fmt yuv420p -crf 18 \
-profile:v high -level 4.1 -disposition default -color_range tv \
-metadata:s:a:0 language=native \
-t "$duration" "$output_video"
fi
}
# find the start-offset for the first chunk
read -p "Do you want to play the first chunk ${CHUNKS_ARRAY[0]} to find the start-offset? (y/n) [n]: " PLAY_FIRST_CHUNK
PLAY_FIRST_CHUNK="${PLAY_FIRST_CHUNK:-n}"
@ -56,6 +101,22 @@ PLAY_LAST_CHUNK="${PLAY_LAST_CHUNK:-n}"
read -p "Enter the end-offset in seconds for the last chunk ${CHUNKS_ARRAY[0]} [1]: " END_OFFSET
END_OFFSET="${END_OFFSET:-1}"
# Check if intro is an image and convert if necessary
EXT_INTRO="${SELECTED_INTRO##*.}"
if [[ "$EXT_INTRO" == "png" || "$EXT_INTRO" == "jpg" || "$EXT_INTRO" == "jpeg" ]]; then
INTRO_VIDEO="$WORKDIR/intro_converted.mkv"
convert_image_to_video "$SELECTED_INTRO" "$INTRO_VIDEO" 4 0.5 0
SELECTED_INTRO="$INTRO_VIDEO"
fi
# Check if outro is an image and convert if necessary
EXT_OUTRO="${SELECTED_OUTRO##*.}"
if [[ "$EXT_OUTRO" == "png" || "$EXT_OUTRO" == "jpg" || "$EXT_OUTRO" == "jpeg" ]]; then
OUTRO_VIDEO="$WORKDIR/outro_converted.mkv"
convert_image_to_video "$SELECTED_OUTRO" "$OUTRO_VIDEO" 6 0 1
SELECTED_OUTRO="$OUTRO_VIDEO"
fi
cat <<EOT
@ -66,7 +127,7 @@ I will be rendering with the following configuration:
+ Video Chunks:
+ First chunk: ${CHUNKS_ARRAY[0]}
Starting at second ${START_OFFSET}
+ Last chunk: ${CHUNKS_ARRAY[-1]}
+ Last chunk: ${CHUNKS_ARRAY[$((${#CHUNKS_ARRAY[@]} - 1))]}
Ending at second ${END_OFFSET}
+ All chunks:
EOT
@ -74,6 +135,7 @@ for index in "${!CHUNKS_ARRAY[@]}"
do
echo " + $index: ${CHUNKS_ARRAY[index]}"
done
echo "Export: ${OUTPUT_PATH}/${OUTPUT_NAME}.mkv"
echo ; echo ; echo
read -p "Do you want to proceed with this configuration? (y/n) [y]" PROCEED
PROCEED="${PROCEED:-y}"
@ -81,6 +143,7 @@ PROCEED="${PROCEED:-y}"
[[ "$PROCEED" == "y" ]] || (echo "aborting"; exit 1)
echo "doing ffmpeg things here"
# combine the videos...
ARRAY_LENGTH="${#CHUNKS_ARRAY[@]}"
if [[ ${ARRAY_LENGTH} -lt 2 ]]
then
@ -97,39 +160,64 @@ function finish {
trap finish EXIT
# STEP 2
# introfile with first chunk and crossfade encode
# Dauer des Intros ermitteln
echo "==== STEP 2 ===="
DURATION_INTRO=$(ffprobe -i "$SELECTED_INTRO" -show_entries format=duration -v quiet -of csv="p=0")
# Sicherstellen, dass die Dauer gültig ist
if [[ -z "$DURATION_INTRO" || "$DURATION_INTRO" == "N/A" ]]; then
echo "Fehler: Die Dauer des Intros konnte nicht ermittelt werden."
exit 1
fi
# Offset berechnen und negative Werte verhindern
OFFSET=$(echo "scale=2; $DURATION_INTRO - 0.5" | bc)
if (( $(echo "$OFFSET < 0" | bc -l) )); then OFFSET=0; fi
# Prüfen, ob CHUNKS_ARRAY existiert und nicht leer ist
if [[ -z "${CHUNKS_ARRAY[0]}" ]]; then
echo "Fehler: CHUNKS_ARRAY ist leer oder nicht definiert."
exit 1
fi
CROSSFADE_DURATION="0.5"
# Intro mit Crossfade zum ersten Chunk
ffmpeg -i "$SELECTED_INTRO" -ss "$START_OFFSET" -i "${CHUNKS_ARRAY[0]}" \
-filter_complex \
"[1:v:0]fade=t=in:st=0:d=0.2[x];
[1:a:0]afade=t=in:st=0:d=0.2[a];
[0:v:0][0:a:0]
[x][a]
concat=n=2:v=1:a=1
[v0][a0]" \
-map '[v0]' -map '[a0]' \
"[0:v:0]format=pix_fmts=yuv420p,fps=50[va]; \
[1:v:0]format=pix_fmts=yuv420p,fps=50[vb]; \
[va][vb]xfade=transition=fade:duration=${CROSSFADE_DURATION}:offset=${OFFSET}[v]; \
[1:a:0]afade=t=in:st=0:d=${CROSSFADE_DURATION}[a]; \
[0:a:0][a]concat=n=2:v=0:a=1[a0]" \
-map '[v]' -map '[a0]' \
-c:a aac -b:a 192k \
-c:v libx264 -threads 0 -pix_fmt yuv420p -crf 18 -profile:v high -level 4.1 -disposition default \
-c:v libx264 -threads 4 -pix_fmt yuv420p -crf 18 -profile:v high -level 4.1 -disposition default \
-movflags +faststart \
-metadata:s:a:0 language=native \
"${WORKDIR}/introcombined.mkv"
# STEP 3
# outrofile with last chunk and corssface encode
ffmpeg -i "$SELECTED_OUTRO" -t "$END_OFFSET" -i "${CHUNKS_ARRAY[-1]}" \
# Outro mit Crossfade vom letzten Chunk
echo "==== STEP 3 ===="
FOO=$(echo "${END_OFFSET} - ${CROSSFADE_DURATION}" | bc)
ffmpeg -i "$SELECTED_OUTRO" -t "$END_OFFSET" -i "${CHUNKS_ARRAY[$((${#CHUNKS_ARRAY[@]} - 1))]}" \
-filter_complex \
"[1:v:0]fade=t=out:st=$(($END_OFFSET - 1)):d=1.0[x];
[1:a:0]afade=t=out:st=$(($END_OFFSET - 1)):d=1.0[a];
[x][a]
[0:v:0][0:a:0]
concat=n=2:v=1:a=1
[v0][a0]" \
-map '[v0]' -map '[a0]' \
"[1:v:0]format=pix_fmts=yuv420p,fps=50[v1]; \
[0:v:0]format=pix_fmts=yuv420p,fps=50[v0]; \
[v1][v0]xfade=transition=fade:duration=${CROSSFADE_DURATION}:offset=${FOO}[v]; \
[1:a:0]afade=t=out:st=${FOO}:d=${CROSSFADE_DURATION}[a1]; \
[0:a:0]afade=t=in:st=0:d=${CROSSFADE_DURATION}[a0]; \
[a1][a0]acrossfade=d=1[a]" \
-map "[v]" -map "[a]" \
-c:a aac -b:a 192k \
-c:v libx264 -threads 0 -pix_fmt yuv420p -crf 18 -profile:v high -level 4.1 -disposition default \
-metadata:s:a:0 language=native \
"${WORKDIR}/outrocombined.mkv"
# STEP 4
# encoded intro+outro and all chunks in between with c:v copy and audio dynnorm + encode
# Encoded intro+outro und alle Chunks in between mit c:v copy und audio dynnorm + encode
echo "==== STEP 4 ===="
CHUNKLIST="${WORKDIR}/chunklist.txt"
@ -154,3 +242,5 @@ ffmpeg \
-c:a aac -b:a 192k \
-metadata:s:a:0 language=native \
"${OUTPUT_PATH}/${OUTPUT_NAME}.mkv"
echo "Video Exported to ${OUTPUT_PATH}/${OUTPUT_NAME}.mkv"