Rewrote to be able to process large audio files

This commit is contained in:
JorySeverijnse 2026-01-19 00:40:23 +01:00
parent 85434e84f2
commit d539ce9a0c
4 changed files with 273 additions and 267 deletions

View File

@ -1,79 +1,136 @@
//! WAV audio decoding module.
//!
//! Handles reading and decoding WAV files into normalized sample data.
//! Audio streaming module using ffmpeg.
use anyhow::{anyhow, Context, Result};
use bytemuck;
use std::path::Path;
use std::process::{Command, Stdio};
use std::io::{Read, BufReader};
/// Normalized audio sample data.
#[derive(Debug, Clone)]
pub struct AudioData {
/// Left channel samples, normalized to [-1.0, 1.0]
pub left_channel: Vec<f32>,
/// Right channel samples, normalized to [-1.0, 1.0]
pub right_channel: Vec<f32>,
/// Sample rate in Hz
/// A streaming audio reader that decodes chunks on demand.
pub struct AudioStream {
child: std::process::Child,
reader: BufReader<std::process::ChildStdout>,
pub sample_rate: u32,
/// Duration in seconds
pub duration: f64,
/// Current sample position in the stream
pub current_sample_pos: usize,
/// Sliding window of left channel samples
left_window: Vec<f32>,
/// Sliding window of right channel samples
right_window: Vec<f32>,
/// Window size for lookahead (e.g. FFT)
window_size: usize,
}
impl AudioData {
/// Load and decode audio from any supported format using ffmpeg.
pub fn from_wav(file_path: &Path) -> Result<Self> {
let output = std::process::Command::new("ffmpeg")
.arg("-i")
impl AudioStream {
/// Create a new AudioStream from a file path.
pub fn new(file_path: &Path, window_size: usize) -> Result<Self> {
// 1. Get metadata using ffprobe
let output = Command::new("ffprobe")
.args([
"-v", "error",
"-select_streams", "a:0",
"-show_entries", "stream=sample_rate,duration",
"-of", "default=noprint_wrappers=1:nokey=1",
])
.arg(file_path)
.arg("-f")
.arg("s16le")
.arg("-acodec")
.arg("pcm_s16le")
.arg("-ar")
.arg("48000")
.arg("-ac")
.arg("2")
.arg("-")
.output()
.with_context(|| "Failed to decode audio with ffmpeg")?;
.with_context(|| "Failed to run ffprobe")?;
if !output.status.success() {
return Err(anyhow!("Audio decoding failed: {}", String::from_utf8_lossy(&output.stderr)));
return Err(anyhow!("ffprobe failed: {}", String::from_utf8_lossy(&output.stderr)));
}
let pcm_data = output.stdout;
let sample_rate = 48000;
let num_channels = 2;
let total_samples = pcm_data.len() / (2 * num_channels);
let duration = total_samples as f64 / sample_rate as f64;
let metadata = String::from_utf8_lossy(&output.stdout);
let mut lines = metadata.lines();
let sample_rate: u32 = lines.next()
.ok_or_else(|| anyhow!("Could not get sample rate"))?
.parse()?;
let duration: f64 = lines.next()
.ok_or_else(|| anyhow!("Could not get duration"))?
.parse()?;
let mut left_channel = Vec::with_capacity(total_samples);
let mut right_channel = Vec::with_capacity(total_samples);
// 2. Start ffmpeg for streaming
let mut child = Command::new("ffmpeg")
.arg("-i")
.arg(file_path)
.args([
"-f", "s16le",
"-acodec", "pcm_s16le",
"-ar", &sample_rate.to_string(),
"-ac", "2",
"-",
])
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn()
.with_context(|| "Failed to spawn ffmpeg")?;
// Convert PCM data to f32 samples efficiently
let samples: &[i16] = bytemuck::cast_slice(&pcm_data);
for chunk in samples.chunks_exact(num_channels) {
let left_val = chunk[0];
let right_val = chunk[1];
left_channel.push(left_val as f32 / 32768.0);
right_channel.push(right_val as f32 / 32768.0);
}
let stdout = child.stdout.take().ok_or_else(|| anyhow!("Failed to open ffmpeg stdout"))?;
let reader = BufReader::new(stdout);
Ok(AudioData {
left_channel,
right_channel,
Ok(Self {
child,
reader,
sample_rate,
duration,
current_sample_pos: 0,
left_window: Vec::with_capacity(window_size),
right_window: Vec::with_capacity(window_size),
window_size,
})
}
/// Get the total number of samples.
pub fn len(&self) -> usize {
self.left_channel.len()
/// Read samples up to the target position.
pub fn fill_until(&mut self, target_pos: usize) -> Result<()> {
if target_pos <= self.current_sample_pos {
return Ok(());
}
let samples_to_read = target_pos - self.current_sample_pos;
let mut buffer = vec![0i16; samples_to_read * 2]; // 2 channels
// Read raw PCM bytes
let byte_buffer: &mut [u8] = bytemuck::cast_slice_mut(&mut buffer);
self.reader.read_exact(byte_buffer)
.with_context(|| "Failed to read audio data from ffmpeg")?;
// Process samples and update sliding window
for chunk in buffer.chunks_exact(2) {
let left = chunk[0] as f32 / 32768.0;
let right = chunk[1] as f32 / 32768.0;
self.left_window.push(left);
self.right_window.push(right);
}
// Maintain window size (keep at least window_size samples)
if self.left_window.len() > self.window_size * 2 {
let drain_amount = self.left_window.len() - self.window_size;
self.left_window.drain(0..drain_amount);
self.right_window.drain(0..drain_amount);
}
self.current_sample_pos = target_pos;
Ok(())
}
/// Check if the audio data is empty.
pub fn is_empty(&self) -> bool {
self.left_channel.is_empty()
/// Get the last N samples for rendering.
pub fn get_last_samples(&self, count: usize) -> (&[f32], &[f32]) {
let actual_count = count.min(self.left_window.len());
let start = self.left_window.len() - actual_count;
(&self.left_window[start..], &self.right_window[start..])
}
/// Get a window of samples for FFT.
pub fn get_fft_window(&self, size: usize) -> (&[f32], &[f32]) {
let actual_size = size.min(self.left_window.len());
let start = self.left_window.len() - actual_size;
(&self.left_window[start..], &self.right_window[start..])
}
}
impl Drop for AudioStream {
fn drop(&mut self) {
let _ = self.child.kill();
}
}

View File

@ -20,6 +20,6 @@ pub mod audio;
pub mod render;
pub mod video;
pub use audio::AudioData;
pub use audio::AudioStream;
pub use render::{stream_frames, RenderMode, RenderOptions};
pub use video::VideoEncoder;

View File

@ -1,7 +1,7 @@
//! Oscilloscope Video Generator
//!
//! A high-performance tool for generating oscilloscope-style visualizations
//! from audio files. Uses parallel rendering for fast processing.
//! from audio files. Uses streaming for handling large files.
use anyhow::{Context, Result};
use clap::{Parser, ValueEnum};
@ -10,16 +10,13 @@ use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use oscilloscope_video_gen::audio::AudioData;
use oscilloscope_video_gen::audio::AudioStream;
use oscilloscope_video_gen::render::{parse_rgb_hex, stream_frames, RenderMode, RenderOptions};
use oscilloscope_video_gen::video::{VideoEncoder, VideoQuality};
#[derive(Debug, Clone, Copy, ValueEnum)]
enum OutputMode {
Combined,
Separate,
All,
Spectrometer,
Combined, Separate, All, Spectrometer,
}
impl From<OutputMode> for RenderMode {
@ -35,9 +32,7 @@ impl From<OutputMode> for RenderMode {
#[derive(Debug, Clone, Copy, ValueEnum)]
enum OutputQuality {
Low,
Medium,
High,
Low, Medium, High,
}
impl From<OutputQuality> for VideoQuality {
@ -52,10 +47,9 @@ impl From<OutputQuality> for VideoQuality {
/// Generate oscilloscope visualizations from audio files
#[derive(Parser, Debug)]
#[command(name = "oscilloscope-video-gen")]
#[command(author, version, about, long_about = None)]
#[command(name = "oscilloscope-video-gen", author, version, about)]
struct Args {
/// Input audio file (WAV)
/// Input audio file
#[arg(short, long)]
input: PathBuf,
@ -63,39 +57,39 @@ struct Args {
#[arg(short, long)]
output: Option<PathBuf>,
/// Video width (default: 1920)
/// Video width
#[arg(long, default_value = "1920")]
width: u32,
/// Video height (default: 1080)
/// Video height
#[arg(long, default_value = "1080")]
height: u32,
/// Frames per second (default: 30)
/// Frames per second
#[arg(long, default_value = "30")]
fps: u32,
/// Display mode: combined, separate, all, spectrometer
/// Display mode
#[arg(long, value_enum, default_value = "all")]
mode: OutputMode,
/// Quality: low, medium, high
/// Quality preset
#[arg(long, value_enum, default_value = "high")]
quality: OutputQuality,
/// Left channel color (RGB hex, default: #00ff00)
/// Left channel color
#[arg(long, default_value = "#00ff00")]
left_color: String,
/// Right channel color (RGB hex, default: #00ccff)
/// Right channel color
#[arg(long, default_value = "#00ccff")]
right_color: String,
/// XY mode color (RGB hex, default: #ff8800)
/// XY mode color
#[arg(long, default_value = "#ff8800")]
xy_color: String,
/// Background color (RGB hex, default: #0a0f0a)
/// Background color
#[arg(long, default_value = "#0a0f0a")]
background: String,
@ -103,15 +97,11 @@ struct Args {
#[arg(long, default_value = "true")]
show_grid: bool,
/// Line thickness (default: 2)
/// Line thickness
#[arg(long, default_value = "2")]
line_thickness: u32,
/// Number of rendering threads
#[arg(long)]
threads: Option<usize>,
/// Overwrite output file if it exists
/// Overwrite output file
#[arg(long, default_value = "false")]
overwrite: bool,
@ -123,120 +113,61 @@ struct Args {
fn main() -> Result<()> {
let args = Args::parse();
// Set number of threads
if let Some(threads) = args.threads {
rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.build_global()
.unwrap();
}
let left_color = parse_rgb_hex(&args.left_color)?;
let right_color = parse_rgb_hex(&args.right_color)?;
let xy_color = parse_rgb_hex(&args.xy_color)?;
let background = parse_rgb_hex(&args.background)?;
// Parse colors
let left_color =
parse_rgb_hex(&args.left_color).context("Failed to parse left_color")?;
let right_color =
parse_rgb_hex(&args.right_color).context("Failed to parse right_color")?;
let xy_color = parse_rgb_hex(&args.xy_color).context("Failed to parse xy_color")?;
let background =
parse_rgb_hex(&args.background).context("Failed to parse background")?;
// Create options
let options = RenderOptions {
width: args.width,
height: args.height,
fps: args.fps,
width: args.width, height: args.height, fps: args.fps,
mode: args.mode.into(),
left_color,
right_color,
xy_color,
background,
show_grid: args.show_grid,
line_thickness: args.line_thickness,
left_color, right_color, xy_color, background,
show_grid: args.show_grid, line_thickness: args.line_thickness,
};
// Determine output path
let output = match args.output {
Some(path) => path,
None => {
let mut path = args.input.clone();
path.set_extension("mp4");
path
}
};
let output = args.output.clone().unwrap_or_else(|| {
let mut path = args.input.clone();
path.set_extension("mp4");
path
});
if args.verbose {
println!("Oscilloscope Video Generator");
println!("============================");
println!("Input: {}", args.input.display());
println!("Output: {}", output.display());
println!("Resolution: {}x{}", args.width, args.height);
println!("FPS: {}", args.fps);
println!("Mode: {:?}", args.mode);
println!("Quality: {:?}", args.quality);
println!("Threads: {:?}", args.threads.unwrap_or_else(|| rayon::current_num_threads()));
println!();
} else {
println!("Oscilloscope Video Generator");
println!("============================");
println!("Input: {}", args.input.display());
println!("Output: {}", output.display());
println!("Resolution: {}x{} @ {}fps", args.width, args.height, args.fps);
println!("Mode: {:?}", args.mode);
println!();
}
println!("Oscilloscope Video Generator");
println!("============================");
println!("Input: {}", args.input.display());
println!("Output: {}", output.display());
println!("Resolution: {}x{} @ {}fps", args.width, args.height, args.fps);
println!();
// Decode audio
let audio_data = AudioData::from_wav(&args.input)
.with_context(|| format!("Failed to decode audio: {}", args.input.display()))?;
// Initialize streaming audio reader (with FFT window size lookahead)
let mut audio_stream = AudioStream::new(&args.input, 2048)
.with_context(|| format!("Failed to open audio stream: {}", args.input.display()))?;
if args.verbose {
println!(
"Audio: {}Hz, {:.2}s duration, {} samples",
audio_data.sample_rate,
audio_data.duration,
audio_data.len()
);
}
// Progress callback
let progress = Arc::new(AtomicUsize::new(0));
let progress_callback = move |percent: f64, current: usize, total: usize| {
let prev = progress.fetch_add(0, Ordering::SeqCst);
if current - prev >= 30 || current == total || current == 1 {
if current - prev >= 30 || current == total {
progress.store(current, Ordering::SeqCst);
print!("\rRendering and Encoding: {:.0}% ({}/{})", percent, current, total);
print!("\rRendering and Encoding: {:.1}% ({}/{})", percent, current, total);
let _ = std::io::stdout().flush();
}
};
// Check if output exists and handle overwrite
if output.exists() && !args.overwrite {
return Err(anyhow::anyhow!(
"Output file already exists: {}. Use --overwrite to replace it.",
output.display()
));
return Err(anyhow::anyhow!("Output exists. Use --overwrite."));
}
let mut encoder = VideoEncoder::new(
&args.input,
&output,
args.width,
args.height,
args.fps,
args.quality.into(),
args.overwrite,
&args.input, &output, args.width, args.height, args.fps,
args.quality.into(), args.overwrite,
)?;
println!("Rendering and encoding...");
stream_frames(&audio_data, &options, &mut encoder, &progress_callback)?;
println!("Starting streaming process...");
stream_frames(&mut audio_stream, &options, &mut encoder, &progress_callback)?;
println!();
encoder.finish().context("Failed to finish video encoding")?;
let file_size = std::fs::metadata(&output)
.map(|m| m.len())
.unwrap_or(0);
let file_size = std::fs::metadata(&output).map(|m| m.len()).unwrap_or(0);
println!("\nDone!");
println!("Output: {}", output.display());
println!("Size: {:.2} MB", file_size as f64 / 1_000_000.0);

View File

@ -1,6 +1,6 @@
//! Frame rendering module.
//! Frame rendering module with true single-pass streaming.
use crate::audio::AudioData;
use crate::audio::AudioStream;
use crate::video::VideoEncoder;
use anyhow::{anyhow, Result};
use image::ImageBuffer;
@ -14,8 +14,8 @@ const MAX_FREQ: f32 = 20000.0;
const FREQ_BOOST_FACTOR: f32 = 5.0;
const DYNAMIC_RANGE_SCALE: f32 = 20.0;
const NOISE_FLOOR: f32 = 0.05;
const SMOOTH_RISE: f32 = 0.6; // How quickly bars rise
const SMOOTH_FALL: f32 = 0.3; // How quickly bars fall (gravity)
const SMOOTH_RISE: f32 = 0.6;
const SMOOTH_FALL: f32 = 0.3;
thread_local! {
static FFT_PLANNER: RefCell<FftPlanner<f32>> = RefCell::new(FftPlanner::new());
@ -38,44 +38,96 @@ pub struct RenderOptions {
pub show_grid: bool, pub line_thickness: u32,
}
fn compute_raw_bars(spectrum: &[f32], num_bars: usize, sample_rate: u32) -> Vec<f32> {
/// Compute and update smoothed spectrometer bars for the current frame.
fn update_spectrometer(
left: &[f32],
right: &[f32],
prev_bars: &mut [f32],
sample_rate: u32,
) {
let mut buffer: Vec<Complex<f32>> = (0..FFT_SIZE).map(|i| {
let l = *left.get(i).unwrap_or(&0.0);
let r = *right.get(i).unwrap_or(&0.0);
Complex::new(l + r, 0.0)
}).collect();
HANN_WINDOW.with(|win| {
for (sample, &w) in buffer.iter_mut().zip(win.iter()) {
sample.re *= w;
}
});
let fft = FFT_PLANNER.with(|p| p.borrow_mut().plan_fft_forward(FFT_SIZE));
fft.process(&mut buffer);
let nyquist = sample_rate as f32 / 2.0;
let mut bars = vec![0.0; num_bars];
let spectrum: Vec<f32> = buffer[1..FFT_SIZE / 2].iter().map(|c| c.norm() / FFT_SIZE as f32).collect();
let num_bars = prev_bars.len();
for i in 0..num_bars {
let f_start = MIN_FREQ * (MAX_FREQ / MIN_FREQ).powf(i as f32 / num_bars as f32);
let f_end = MIN_FREQ * (MAX_FREQ / MIN_FREQ).powf((i + 1) as f32 / num_bars as f32);
let bin_start = (f_start / nyquist * spectrum.len() as f32).floor() as usize;
let bin_end = (f_end / nyquist * spectrum.len() as f32).ceil() as usize;
let bin_end = bin_end.max(bin_start + 1).min(spectrum.len());
let mut magnitude = 0.0f32;
if bin_start < spectrum.len() {
for k in bin_start..bin_end { magnitude = magnitude.max(spectrum[k]); }
for k in bin_start..bin_end {
magnitude = magnitude.max(spectrum[k]);
}
}
let freq_factor = 1.0 + (f_start / MAX_FREQ) * FREQ_BOOST_FACTOR;
let mut val = (magnitude * freq_factor * DYNAMIC_RANGE_SCALE).sqrt().min(1.0);
if val < NOISE_FLOOR { val = 0.0; }
bars[i] = val;
let mut current_val = (magnitude * freq_factor * DYNAMIC_RANGE_SCALE).sqrt().min(1.0);
if current_val < NOISE_FLOOR { current_val = 0.0; }
let factor = if current_val > prev_bars[i] { SMOOTH_RISE } else { SMOOTH_FALL };
prev_bars[i] = prev_bars[i] * (1.0 - factor) + current_val * factor;
}
bars
}
fn smooth_bars(raw_bars: &mut Vec<Vec<f32>>) {
if raw_bars.is_empty() { return; }
let num_bars = raw_bars[0].len();
let mut prev_bars = vec![0.0; num_bars];
for frame_bars in raw_bars.iter_mut() {
for i in 0..num_bars {
let current_val = frame_bars[i];
let prev_val = prev_bars[i];
let factor = if current_val > prev_val { SMOOTH_RISE } else { SMOOTH_FALL };
let smoothed_val = prev_val * (1.0 - factor) + current_val * factor;
frame_bars[i] = smoothed_val;
prev_bars[i] = smoothed_val;
pub fn stream_frames(
audio_stream: &mut AudioStream,
options: &RenderOptions,
encoder: &mut VideoEncoder,
progress_callback: &(impl Fn(f64, usize, usize) + Send + Sync),
) -> Result<()> {
let total_frames = ((audio_stream.duration * options.fps as f64) as usize).max(1);
let num_bars = if options.mode == RenderMode::Spectrometer { 64 } else { 32 };
let mut smoothed_bars = vec![0.0; num_bars];
println!("Processing {} frames...", total_frames);
for frame_idx in 0..total_frames {
// Calculate precise target sample position using 64-bit math to prevent drift
let target_sample_pos = ((frame_idx as u64 + 1) * audio_stream.sample_rate as u64 / options.fps as u64) as usize;
// Load audio data for this frame from the pipe
audio_stream.fill_until(target_sample_pos)?;
let samples_per_frame = (audio_stream.sample_rate / options.fps) as usize;
let (left, right) = audio_stream.get_last_samples(samples_per_frame);
let (fft_l, fft_r) = audio_stream.get_fft_window(FFT_SIZE);
// Process spectrometer if active
if matches!(options.mode, RenderMode::Spectrometer | RenderMode::All) {
update_spectrometer(fft_l, fft_r, &mut smoothed_bars, audio_stream.sample_rate);
}
// Render the frame sequentially (correct order for smoothing)
let frame = draw_frame(left, right, &smoothed_bars, options);
encoder.write_frame(&frame.into_raw())?;
if frame_idx % 30 == 0 || frame_idx == total_frames - 1 {
progress_callback((frame_idx + 1) as f64 / total_frames as f64 * 100.0, frame_idx + 1, total_frames);
}
}
Ok(())
}
fn render_smoothed_bars(
fn render_bars(
buffer: &mut ImageBuffer<image::Rgb<u8>, Vec<u8>>,
bars: &[f32],
x_offset: u32, y_offset: u32, width: u32, height: u32, color: image::Rgb<u8>,
@ -99,104 +151,68 @@ fn render_smoothed_bars(
}
}
pub fn stream_frames(
audio_data: &AudioData, options: &RenderOptions, encoder: &mut VideoEncoder,
progress_callback: &(impl Fn(f64, usize, usize) + Send + Sync),
) -> Result<()> {
let total_samples = audio_data.left_channel.len();
let samples_per_frame = (audio_data.sample_rate / options.fps) as usize;
let total_frames = ((audio_data.duration * options.fps as f64) as usize).max(1);
use rayon::prelude::*;
println!("Pass 1/3: Analyzing spectrum...");
let num_bars = if options.mode == RenderMode::Spectrometer { 64 } else { 32 };
let mut all_raw_bars = if matches!(options.mode, RenderMode::Spectrometer | RenderMode::All) {
(0..total_frames).into_par_iter().map(|frame_idx| {
let start_sample = (frame_idx * samples_per_frame).min(total_samples.saturating_sub(1));
let mut buffer: Vec<Complex<f32>> = (0..FFT_SIZE).map(|i| {
if let Some(sample) = audio_data.left_channel.get(start_sample + i) {
Complex::new(sample + audio_data.right_channel[start_sample + i], 0.0)
} else { Complex::new(0.0, 0.0) }
}).collect();
HANN_WINDOW.with(|win| {
for (sample, &w) in buffer.iter_mut().zip(win.iter()) { sample.re *= w; }
});
let fft = FFT_PLANNER.with(|p| p.borrow_mut().plan_fft_forward(FFT_SIZE));
fft.process(&mut buffer);
let spectrum: Vec<f32> = buffer[1..FFT_SIZE / 2].iter().map(|c| c.norm() / FFT_SIZE as f32).collect();
compute_raw_bars(&spectrum, num_bars, audio_data.sample_rate)
}).collect()
} else { Vec::new() };
println!("Pass 2/3: Smoothing data...");
if !all_raw_bars.is_empty() { smooth_bars(&mut all_raw_bars); }
println!("Pass 3/3: Rendering and encoding frames...");
let chunk_size = rayon::current_num_threads() * 2;
for chunk_start in (0..total_frames).step_by(chunk_size) {
let chunk_end = (chunk_start + chunk_size).min(total_frames);
let frames: Vec<Result<Vec<u8>>> = (chunk_start..chunk_end).into_par_iter().map(|frame_idx| {
let start_sample = (frame_idx * samples_per_frame).min(total_samples.saturating_sub(1));
let smoothed_bars = if all_raw_bars.is_empty() { None } else { Some(all_raw_bars[frame_idx].as_slice()) };
Ok(draw_frame(audio_data, start_sample, samples_per_frame, options, smoothed_bars).into_raw())
}).collect();
for frame in frames { encoder.write_frame(&frame?)?; }
progress_callback(chunk_end as f64 / total_frames as f64 * 100.0, chunk_end, total_frames);
}
Ok(())
}
pub fn draw_frame(
audio_data: &AudioData, start_sample: usize, samples_per_frame: usize,
options: &RenderOptions, smoothed_bars: Option<&[f32]>,
left: &[f32],
right: &[f32],
smoothed_bars: &[f32],
options: &RenderOptions,
) -> ImageBuffer<image::Rgb<u8>, Vec<u8>> {
let (width, height) = (options.width, options.height);
let mut buffer = ImageBuffer::new(width, height);
for p in buffer.pixels_mut() { *p = options.background; }
if options.show_grid { draw_graticule(&mut buffer, options.left_color); }
let end_sample = (start_sample + samples_per_frame).min(audio_data.left_channel.len());
match options.mode {
RenderMode::All => {
let (hh, hw) = (height / 2, width / 2);
let samples_per_pixel = samples_per_frame as f32 / hw as f32;
for x in 0..width { buffer.put_pixel(x, hh, image::Rgb([40, 40, 40])); }
for y in 0..height { buffer.put_pixel(hw, y, image::Rgb([40, 40, 40])); }
let samples_per_pixel = left.len() as f32 / hw as f32;
// Top-left
let mut pl = (hh/2) as i32;
for x in 0..hw {
let idx = (x as f32 * samples_per_pixel) as usize;
if idx >= left.len() { break; }
let yl = (hh/2) as i32 - (left[idx] * (hh as f32 * 0.35)) as i32;
draw_line(&mut buffer, x as i32, pl, x as i32, yl, options.left_color);
pl = yl;
}
// Top-right
let mut pr = (hh/2) as i32;
for x in 0..hw {
let idx = start_sample + (x as f32 * samples_per_pixel) as usize;
if idx >= audio_data.left_channel.len() { break; }
let yl = (hh/2) as i32 - (audio_data.left_channel[idx] * (hh as f32 * 0.35)) as i32;
let yr = (hh/2) as i32 - (audio_data.right_channel[idx] * (hh as f32 * 0.35)) as i32;
draw_line(&mut buffer, x as i32, pl, x as i32, yl, options.left_color);
let idx = (x as f32 * samples_per_pixel) as usize;
if idx >= right.len() { break; }
let yr = (hh/2) as i32 - (right[idx] * (hh as f32 * 0.35)) as i32;
draw_line(&mut buffer, (hw+x) as i32, pr, (hw+x) as i32, yr, options.right_color);
pl = yl; pr = yr;
pr = yr;
}
// Bottom-left
let (cx, cy) = (hw/2, hh + hh/2);
let scale = hw.min(hh) as f32 * 0.35;
if start_sample < audio_data.left_channel.len() {
let mut px = cx as i32 + (audio_data.left_channel[start_sample] * scale) as i32;
let mut py = cy as i32 - (audio_data.right_channel[start_sample] * scale) as i32;
for i in 1..(end_sample - start_sample).min(samples_per_frame) {
let idx = start_sample + i;
if idx >= audio_data.left_channel.len() { break; }
let x = cx as i32 + (audio_data.left_channel[idx] * scale) as i32;
let y = cy as i32 - (audio_data.right_channel[idx] * scale) as i32;
if !left.is_empty() {
let mut px = cx as i32 + (left[0] * scale) as i32;
let mut py = cy as i32 - (right[0] * scale) as i32;
for i in 1..left.len() {
let x = cx as i32 + (left[i] * scale) as i32;
let y = cy as i32 - (right[i] * scale) as i32;
draw_line(&mut buffer, px, py, x, y, options.xy_color);
px = x; py = y;
}
}
if let Some(bars) = smoothed_bars {
render_smoothed_bars(&mut buffer, bars, hw, hh, hw, hh, options.left_color);
}
for x in 0..width { buffer.put_pixel(x, hh, image::Rgb([40, 40, 40])); }
for y in 0..height { buffer.put_pixel(hw, y, image::Rgb([40, 40, 40])); }
// Bottom-right
render_bars(&mut buffer, smoothed_bars, hw, hh, hw, hh, options.left_color);
}
RenderMode::Spectrometer => {
if let Some(bars) = smoothed_bars {
render_smoothed_bars(&mut buffer, bars, 0, 0, width, height, options.left_color);
}
render_bars(&mut buffer, smoothed_bars, 0, 0, width, height, options.left_color);
}
_ => { /* Simple waveform logic for Combined/Separate for completeness */ }
_ => { /* Combined/Separate modes logic here if needed */ }
}
buffer
}
@ -218,11 +234,13 @@ pub fn draw_line(buffer: &mut ImageBuffer<image::Rgb<u8>, Vec<u8>>, x0: i32, y0:
if e2 <= dx { err += dx; y += sy; }
}
}
fn draw_graticule(buffer: &mut ImageBuffer<image::Rgb<u8>, Vec<u8>>, color: image::Rgb<u8>) {
let (w, h) = buffer.dimensions();
for x in 0..w { buffer.put_pixel(x, h / 2, color); }
for y in 0..h { buffer.put_pixel(w / 2, y, color); }
}
pub fn parse_rgb_hex(hex: &str) -> Result<image::Rgb<u8>> {
let hex = hex.trim_start_matches('#');
if hex.len() != 6 { return Err(anyhow!("Invalid RGB hex")); }