personal_website/src/hooks/useOfflineVideoExport.ts
gpt-engineer-app[bot] e23f6b55fb Changes
2025-12-21 13:39:53 +00:00

310 lines
11 KiB
TypeScript

import { useState, useCallback, useRef } from 'react';
export const useOfflineVideoExport = () => {
const [state, setState] = useState({
isExporting: false,
progress: 0,
error: null,
stage: 'idle' as 'idle' | 'preparing' | 'rendering' | 'encoding' | 'complete',
fps: 0,
});
const cancelledRef = useRef(false);
const downloadBlob = useCallback((blob: Blob, filename: string) => {
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = filename;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
URL.revokeObjectURL(url);
}, []);
const cancelExport = useCallback(() => {
console.log('Cancel export requested');
cancelledRef.current = true;
setState(prev => ({ ...prev, error: 'Cancelling...' }));
}, []);
const generateVideoWithAudio = useCallback(async (
audioFile: File,
drawFrame: (ctx: CanvasRenderingContext2D, width: number, height: number, leftData: Uint8Array, rightData: Uint8Array) => void,
options: { fps: number; format: 'webm' | 'mp4'; width: number; height: number; quality?: 'low' | 'medium' | 'high'; }
): Promise<Blob | null> => {
console.log('🚀 Starting video export with options:', options);
cancelledRef.current = false;
setState({ isExporting: true, progress: 0, error: null, stage: 'preparing', fps: 0 });
try {
const { fps, width, height, quality = 'medium' } = options;
// Quality settings
const qualitySettings = {
low: { bitrateMultiplier: 0.5, samplesPerFrame: 1024 },
medium: { bitrateMultiplier: 1.0, samplesPerFrame: 2048 },
high: { bitrateMultiplier: 1.5, samplesPerFrame: 4096 }
};
const qualityConfig = qualitySettings[quality];
// Create canvas for rendering
const canvas = document.createElement('canvas');
canvas.width = width;
canvas.height = height;
const ctx = canvas.getContext('2d');
if (!ctx) {
throw new Error('Canvas not supported');
}
setState(prev => ({ ...prev, stage: 'rendering', progress: 10 }));
// Get supported codecs
const codecs = [
'video/webm;codecs=vp9',
'video/webm;codecs=vp8',
'video/mp4;codecs=h264',
'video/mp4',
'video/webm'
];
let selectedCodec = null;
let videoBitsPerSecond = 2000000; // Default 2Mbps
for (const codec of codecs) {
if (MediaRecorder.isTypeSupported(codec)) {
selectedCodec = codec;
console.log(`✅ Using codec: ${codec}`);
// Adjust bitrate based on codec and quality setting
if (codec.includes('vp9')) {
videoBitsPerSecond = Math.floor(3000000 * qualityConfig.bitrateMultiplier);
} else if (codec.includes('h264')) {
videoBitsPerSecond = Math.floor(4000000 * qualityConfig.bitrateMultiplier);
} else if (codec.includes('vp8')) {
videoBitsPerSecond = Math.floor(2000000 * qualityConfig.bitrateMultiplier);
}
break;
}
}
if (!selectedCodec) {
throw new Error('No video codec supported');
}
// Use real audio data if available, otherwise generate mock data
let audioBuffer: AudioBuffer;
let sampleRate: number;
let totalSamples: number;
let duration: number;
try {
// Try to decode the actual uploaded audio file
const arrayBuffer = await audioFile.arrayBuffer();
const audioContext = new AudioContext();
audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
sampleRate = audioBuffer.sampleRate;
totalSamples = audioBuffer.length;
duration = totalSamples / sampleRate;
console.log(`✅ Using real audio: ${duration.toFixed(1)}s, ${totalSamples} samples`);
} catch (audioError) {
console.warn('⚠️ Could not decode audio file, using mock data:', audioError);
// Generate mock audio data
duration = 5.0; // 5 seconds
sampleRate = 44100;
totalSamples = Math.floor(duration * sampleRate);
// Create a proper AudioBuffer for mock data
const mockAudioContext = new AudioContext();
audioBuffer = mockAudioContext.createBuffer(2, totalSamples, sampleRate);
// Fill with sine wave
const leftChannel = audioBuffer.getChannelData(0);
const rightChannel = audioBuffer.getChannelData(1);
for (let i = 0; i < totalSamples; i++) {
const time = i / sampleRate;
const frequency = 440; // A4 note
const value = Math.sin(2 * Math.PI * frequency * time) * 0.5;
leftChannel[i] = value;
rightChannel[i] = value;
}
console.log(`📊 Using mock audio: ${duration.toFixed(1)}s, ${totalSamples} samples`);
}
// Create audio context for recording
const recordingAudioContext = new AudioContext();
// Resume audio context if suspended
if (recordingAudioContext.state === 'suspended') {
await recordingAudioContext.resume();
}
// Create audio source and destination
const recordingAudioSource = recordingAudioContext.createBufferSource();
recordingAudioSource.buffer = audioBuffer;
recordingAudioSource.loop = false;
const audioDestination = recordingAudioContext.createMediaStreamDestination();
recordingAudioSource.connect(audioDestination);
recordingAudioSource.connect(recordingAudioContext.destination);
// Combine video and audio streams
const combinedStream = new MediaStream();
canvas.captureStream(fps).getVideoTracks().forEach(track => combinedStream.addTrack(track));
audioDestination.stream.getAudioTracks().forEach(track => combinedStream.addTrack(track));
console.log(`✅ Combined stream: ${combinedStream.getVideoTracks().length} video, ${combinedStream.getAudioTracks().length} audio tracks`);
// Chunks array to collect recorded data
const chunks: Blob[] = [];
const recorder = new MediaRecorder(combinedStream, {
mimeType: selectedCodec,
videoBitsPerSecond: videoBitsPerSecond,
});
recorder.ondataavailable = (e) => {
if (e.data.size > 0) {
chunks.push(e.data);
}
};
console.log('✅ MediaRecorder created with audio and video');
recorder.start(1000); // 1 second chunks
// Start audio playback synchronized with recording
recordingAudioSource.start(0);
console.log('🔊 Audio playback started for recording');
// Generate animation frames for full audio duration
const totalFrames = Math.ceil(duration * fps);
const samplesPerFrame = Math.min(qualityConfig.samplesPerFrame, Math.floor(totalSamples / totalFrames));
console.log(`🎬 Quality: ${quality}, Frames: ${totalFrames}, Samples/frame: ${samplesPerFrame}, Duration: ${duration.toFixed(1)}s`);
for (let frameIndex = 0; frameIndex < totalFrames; frameIndex++) {
if (cancelledRef.current) {
try {
recordingAudioSource.stop();
recordingAudioContext.close();
} catch (e) {}
recorder.stop();
setState({ isExporting: false, progress: 0, error: 'Cancelled', stage: 'idle', fps: 0 });
return null;
}
// Calculate current audio position for this frame
const currentSample = Math.min(frameIndex * samplesPerFrame, totalSamples - samplesPerFrame);
// Get waveform data from actual audio buffer
const leftChannel = audioBuffer.getChannelData(0);
const rightChannel = audioBuffer.numberOfChannels > 1 ? audioBuffer.getChannelData(1) : leftChannel;
// Create waveform data for this frame
const leftData = new Uint8Array(samplesPerFrame);
const rightData = new Uint8Array(samplesPerFrame);
for (let i = 0; i < samplesPerFrame; i++) {
const sampleIndex = currentSample + i;
if (sampleIndex >= 0 && sampleIndex < totalSamples) {
// Convert from -1..1 range to 0..255 range
leftData[i] = Math.round(((leftChannel[sampleIndex] + 1) / 2) * 255);
rightData[i] = Math.round(((rightChannel[sampleIndex] + 1) / 2) * 255);
} else {
leftData[i] = 128;
rightData[i] = 128;
}
}
// Clear canvas
ctx.fillStyle = '#0a0f0a';
ctx.fillRect(0, 0, width, height);
// Draw oscilloscope with mock audio data
try {
drawFrame(ctx, width, height, leftData, rightData);
} catch (drawError) {
console.error('❌ Error in drawFrame:', drawError);
// Fallback: simple waveform
ctx.strokeStyle = '#00ff00';
ctx.lineWidth = 2;
ctx.beginPath();
for (let x = 0; x < width; x += 4) {
const sampleIndex = Math.floor((x / width) * samplesPerFrame);
const value = sampleIndex < leftData.length ? leftData[sampleIndex] : 128;
const y = height / 2 + ((value - 128) / 128) * (height / 4);
if (x === 0) {
ctx.moveTo(x, y);
} else {
ctx.lineTo(x, y);
}
}
ctx.stroke();
}
// Add frame info
ctx.fillStyle = '#ffffff';
ctx.font = '16px monospace';
ctx.fillText(`Frame ${frameIndex + 1}/${totalFrames}`, 20, 30);
ctx.fillText(`Time: ${(frameIndex / fps).toFixed(1)}s`, 20, 50);
const progress = 20 + Math.round((frameIndex / totalFrames) * 70);
setState(prev => ({ ...prev, progress }));
if (frameIndex % Math.max(1, Math.floor(totalFrames / 10)) === 0) {
console.log(`📸 Frame ${frameIndex + 1}/${totalFrames} (${progress}%) - Time: ${(frameIndex / fps).toFixed(1)}s`);
}
// Frame timing
await new Promise(resolve => setTimeout(resolve, 1000 / fps));
}
setState(prev => ({ ...prev, progress: 90 }));
console.log('⏹️ Stopping recorder...');
recorder.stop();
try {
recordingAudioSource.stop();
recordingAudioContext.close();
} catch (e) {
console.warn('Error stopping audio:', e);
}
// Wait for completion
await new Promise<void>((resolve) => {
const checkInterval = setInterval(() => {
if (recorder.state === 'inactive') {
clearInterval(checkInterval);
resolve();
}
}, 100);
});
if (chunks.length === 0) {
throw new Error('No video chunks recorded');
}
const videoBlob = new Blob(chunks, { type: selectedCodec });
console.log(`✅ Video created: ${(videoBlob.size / 1024 / 1024).toFixed(2)} MB`);
setState({ isExporting: false, progress: 100, error: null, stage: 'complete', fps: 0 });
return videoBlob;
} catch (error) {
console.error('❌ Export failed:', error);
setState({ isExporting: false, progress: 0, error: error.message || 'Export failed', stage: 'idle', fps: 0 });
return null;
}
}, []);
return {
...state,
generateVideoWithAudio,
cancelExport,
downloadBlob,
};
};