mirror of
https://github.com/JorySeverijnse/ui-fixer-supreme.git
synced 2026-01-29 17:58:38 +00:00
405 lines
14 KiB
TypeScript
405 lines
14 KiB
TypeScript
import { useState, useCallback, useRef } from 'react';
|
|
|
|
export const useOfflineVideoExport = () => {
|
|
const [state, setState] = useState({
|
|
isExporting: false,
|
|
progress: 0,
|
|
error: null,
|
|
stage: 'idle' as 'idle' | 'preparing' | 'rendering' | 'encoding' | 'complete',
|
|
fps: 0,
|
|
});
|
|
|
|
const cancelledRef = useRef(false);
|
|
|
|
const downloadBlob = useCallback((blob: Blob, filename: string) => {
|
|
const url = URL.createObjectURL(blob);
|
|
const a = document.createElement('a');
|
|
a.href = url;
|
|
a.download = filename;
|
|
document.body.appendChild(a);
|
|
a.click();
|
|
document.body.removeChild(a);
|
|
URL.revokeObjectURL(url);
|
|
}, []);
|
|
|
|
const cancelExport = useCallback(() => {
|
|
console.log('Cancel export requested');
|
|
cancelledRef.current = true;
|
|
setState(prev => ({ ...prev, error: 'Cancelling...' }));
|
|
}, []);
|
|
|
|
const generateVideoWithAudio = useCallback(async (
|
|
audioFile: File,
|
|
drawFrame: (ctx: CanvasRenderingContext2D, width: number, height: number, leftData: Uint8Array, rightData: Uint8Array) => void,
|
|
options: { fps: number; format: 'webm' | 'mp4'; width: number; height: number; quality?: 'low' | 'medium' | 'high'; }
|
|
): Promise<Blob | null> => {
|
|
console.log('🚀 Starting video export with options:', options);
|
|
cancelledRef.current = false;
|
|
setState({ isExporting: true, progress: 0, error: null, stage: 'preparing', fps: 0 });
|
|
|
|
try {
|
|
const { fps, width, height, quality = 'medium' } = options;
|
|
|
|
// Quality settings
|
|
const qualitySettings = {
|
|
low: { bitrateMultiplier: 0.5, samplesPerFrame: 1024 },
|
|
medium: { bitrateMultiplier: 1.0, samplesPerFrame: 2048 },
|
|
high: { bitrateMultiplier: 1.5, samplesPerFrame: 4096 }
|
|
};
|
|
|
|
const qualityConfig = qualitySettings[quality];
|
|
|
|
// Create canvas for rendering
|
|
const canvas = document.createElement('canvas');
|
|
canvas.width = width;
|
|
canvas.height = height;
|
|
const ctx = canvas.getContext('2d');
|
|
|
|
if (!ctx) {
|
|
throw new Error('Canvas not supported');
|
|
}
|
|
|
|
setState(prev => ({ ...prev, stage: 'rendering', progress: 5 }));
|
|
|
|
// Load intro video - always use webm
|
|
console.log('📹 Loading intro video...');
|
|
const introVideo = document.createElement('video');
|
|
introVideo.muted = true;
|
|
introVideo.playsInline = true;
|
|
introVideo.preload = 'auto';
|
|
introVideo.src = '/intro.webm';
|
|
|
|
let introDuration = 0;
|
|
|
|
// Wait for video to be fully loaded
|
|
await new Promise<void>((resolve) => {
|
|
introVideo.onloadeddata = () => {
|
|
introDuration = introVideo.duration;
|
|
console.log(`✅ Intro video loaded: ${introDuration.toFixed(2)}s, ${introVideo.videoWidth}x${introVideo.videoHeight}`);
|
|
resolve();
|
|
};
|
|
introVideo.onerror = (e) => {
|
|
console.error('❌ Failed to load intro video:', e);
|
|
resolve();
|
|
};
|
|
introVideo.load();
|
|
});
|
|
|
|
setState(prev => ({ ...prev, progress: 10 }));
|
|
|
|
// Get supported codecs
|
|
const codecs = [
|
|
'video/webm;codecs=vp9',
|
|
'video/webm;codecs=vp8',
|
|
'video/mp4;codecs=h264',
|
|
'video/mp4',
|
|
'video/webm'
|
|
];
|
|
|
|
let selectedCodec = null;
|
|
let videoBitsPerSecond = 2000000; // Default 2Mbps
|
|
|
|
for (const codec of codecs) {
|
|
if (MediaRecorder.isTypeSupported(codec)) {
|
|
selectedCodec = codec;
|
|
console.log(`✅ Using codec: ${codec}`);
|
|
|
|
// Adjust bitrate based on codec and quality setting
|
|
if (codec.includes('vp9')) {
|
|
videoBitsPerSecond = Math.floor(3000000 * qualityConfig.bitrateMultiplier);
|
|
} else if (codec.includes('h264')) {
|
|
videoBitsPerSecond = Math.floor(4000000 * qualityConfig.bitrateMultiplier);
|
|
} else if (codec.includes('vp8')) {
|
|
videoBitsPerSecond = Math.floor(2000000 * qualityConfig.bitrateMultiplier);
|
|
}
|
|
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (!selectedCodec) {
|
|
throw new Error('No video codec supported');
|
|
}
|
|
|
|
// Use real audio data if available, otherwise generate mock data
|
|
let audioBuffer: AudioBuffer;
|
|
let sampleRate: number;
|
|
let totalSamples: number;
|
|
let duration: number;
|
|
|
|
try {
|
|
// Try to decode the actual uploaded audio file
|
|
const arrayBuffer = await audioFile.arrayBuffer();
|
|
const audioContext = new AudioContext();
|
|
audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
|
|
sampleRate = audioBuffer.sampleRate;
|
|
totalSamples = audioBuffer.length;
|
|
duration = totalSamples / sampleRate;
|
|
console.log(`✅ Using real audio: ${duration.toFixed(1)}s, ${totalSamples} samples`);
|
|
} catch (audioError) {
|
|
console.warn('⚠️ Could not decode audio file, using mock data:', audioError);
|
|
// Generate mock audio data
|
|
duration = 5.0; // 5 seconds
|
|
sampleRate = 44100;
|
|
totalSamples = Math.floor(duration * sampleRate);
|
|
|
|
// Create a proper AudioBuffer for mock data
|
|
const mockAudioContext = new AudioContext();
|
|
audioBuffer = mockAudioContext.createBuffer(2, totalSamples, sampleRate);
|
|
|
|
// Fill with sine wave
|
|
const leftChannel = audioBuffer.getChannelData(0);
|
|
const rightChannel = audioBuffer.getChannelData(1);
|
|
|
|
for (let i = 0; i < totalSamples; i++) {
|
|
const time = i / sampleRate;
|
|
const frequency = 440; // A4 note
|
|
const value = Math.sin(2 * Math.PI * frequency * time) * 0.5;
|
|
leftChannel[i] = value;
|
|
rightChannel[i] = value;
|
|
}
|
|
console.log(`📊 Using mock audio: ${duration.toFixed(1)}s, ${totalSamples} samples`);
|
|
}
|
|
|
|
// Create audio context for recording
|
|
const recordingAudioContext = new AudioContext();
|
|
|
|
// Resume audio context if suspended
|
|
if (recordingAudioContext.state === 'suspended') {
|
|
await recordingAudioContext.resume();
|
|
}
|
|
|
|
// Create audio source and destination
|
|
const recordingAudioSource = recordingAudioContext.createBufferSource();
|
|
recordingAudioSource.buffer = audioBuffer;
|
|
recordingAudioSource.loop = false;
|
|
|
|
const audioDestination = recordingAudioContext.createMediaStreamDestination();
|
|
recordingAudioSource.connect(audioDestination);
|
|
recordingAudioSource.connect(recordingAudioContext.destination);
|
|
|
|
// Combine video and audio streams
|
|
const combinedStream = new MediaStream();
|
|
canvas.captureStream(fps).getVideoTracks().forEach(track => combinedStream.addTrack(track));
|
|
audioDestination.stream.getAudioTracks().forEach(track => combinedStream.addTrack(track));
|
|
|
|
console.log(`✅ Combined stream: ${combinedStream.getVideoTracks().length} video, ${combinedStream.getAudioTracks().length} audio tracks`);
|
|
|
|
// Chunks array to collect recorded data
|
|
const chunks: Blob[] = [];
|
|
|
|
const recorder = new MediaRecorder(combinedStream, {
|
|
mimeType: selectedCodec,
|
|
videoBitsPerSecond: videoBitsPerSecond,
|
|
});
|
|
|
|
recorder.ondataavailable = (e) => {
|
|
if (e.data.size > 0) {
|
|
chunks.push(e.data);
|
|
}
|
|
};
|
|
|
|
console.log('✅ MediaRecorder created with audio and video');
|
|
recorder.start(1000); // 1 second chunks
|
|
|
|
// Calculate total frames including intro
|
|
const introFrames = introDuration > 0 ? Math.ceil(introDuration * fps) : 0;
|
|
const mainFrames = Math.ceil(duration * fps);
|
|
const fadeFrames = Math.ceil(fps * 0.5); // 0.5 second fade
|
|
const totalFrames = introFrames + mainFrames;
|
|
const samplesPerFrame = Math.min(qualityConfig.samplesPerFrame, Math.floor(totalSamples / mainFrames));
|
|
|
|
console.log(`🎬 Total frames: ${totalFrames} (intro: ${introFrames}, main: ${mainFrames}, fade: ${fadeFrames})`);
|
|
|
|
// Render intro frames first
|
|
if (introFrames > 0) {
|
|
console.log('📹 Rendering intro frames...');
|
|
|
|
for (let frameIndex = 0; frameIndex < introFrames; frameIndex++) {
|
|
if (cancelledRef.current) {
|
|
recorder.stop();
|
|
setState({ isExporting: false, progress: 0, error: 'Cancelled', stage: 'idle', fps: 0 });
|
|
return null;
|
|
}
|
|
|
|
// Seek to correct time and wait for frame
|
|
const targetTime = frameIndex / fps;
|
|
introVideo.currentTime = targetTime;
|
|
|
|
// Wait for the seek to complete
|
|
await new Promise<void>((resolve) => {
|
|
const onSeeked = () => {
|
|
introVideo.removeEventListener('seeked', onSeeked);
|
|
resolve();
|
|
};
|
|
introVideo.addEventListener('seeked', onSeeked);
|
|
// Fallback timeout
|
|
setTimeout(resolve, 50);
|
|
});
|
|
|
|
// Draw intro video frame scaled to canvas
|
|
ctx.fillStyle = '#0a0f0a';
|
|
ctx.fillRect(0, 0, width, height);
|
|
|
|
// Calculate aspect-ratio-correct scaling
|
|
const videoAspect = introVideo.videoWidth / introVideo.videoHeight;
|
|
const canvasAspect = width / height;
|
|
let drawWidth = width;
|
|
let drawHeight = height;
|
|
let drawX = 0;
|
|
let drawY = 0;
|
|
|
|
if (videoAspect > canvasAspect) {
|
|
drawHeight = width / videoAspect;
|
|
drawY = (height - drawHeight) / 2;
|
|
} else {
|
|
drawWidth = height * videoAspect;
|
|
drawX = (width - drawWidth) / 2;
|
|
}
|
|
|
|
ctx.drawImage(introVideo, drawX, drawY, drawWidth, drawHeight);
|
|
|
|
const progress = 10 + Math.round((frameIndex / introFrames) * 20);
|
|
setState(prev => ({ ...prev, progress }));
|
|
|
|
await new Promise(resolve => setTimeout(resolve, 1000 / fps));
|
|
}
|
|
|
|
console.log('✅ Intro frames complete');
|
|
}
|
|
|
|
// Start audio playback for main content
|
|
recordingAudioSource.start(0);
|
|
console.log('🔊 Audio playback started for recording');
|
|
|
|
// Render main oscilloscope frames with fade-in from intro
|
|
for (let frameIndex = 0; frameIndex < mainFrames; frameIndex++) {
|
|
if (cancelledRef.current) {
|
|
try {
|
|
recordingAudioSource.stop();
|
|
recordingAudioContext.close();
|
|
} catch (e) {}
|
|
recorder.stop();
|
|
setState({ isExporting: false, progress: 0, error: 'Cancelled', stage: 'idle', fps: 0 });
|
|
return null;
|
|
}
|
|
|
|
// Calculate current audio position for this frame
|
|
const currentSample = Math.min(frameIndex * samplesPerFrame, totalSamples - samplesPerFrame);
|
|
|
|
// Get waveform data from actual audio buffer
|
|
const leftChannel = audioBuffer.getChannelData(0);
|
|
const rightChannel = audioBuffer.numberOfChannels > 1 ? audioBuffer.getChannelData(1) : leftChannel;
|
|
|
|
// Create waveform data for this frame
|
|
const leftData = new Uint8Array(samplesPerFrame);
|
|
const rightData = new Uint8Array(samplesPerFrame);
|
|
|
|
for (let i = 0; i < samplesPerFrame; i++) {
|
|
const sampleIndex = currentSample + i;
|
|
if (sampleIndex >= 0 && sampleIndex < totalSamples) {
|
|
// Convert from -1..1 range to 0..255 range
|
|
leftData[i] = Math.round(((leftChannel[sampleIndex] + 1) / 2) * 255);
|
|
rightData[i] = Math.round(((rightChannel[sampleIndex] + 1) / 2) * 255);
|
|
} else {
|
|
leftData[i] = 128;
|
|
rightData[i] = 128;
|
|
}
|
|
}
|
|
|
|
// Clear canvas
|
|
ctx.fillStyle = '#0a0f0a';
|
|
ctx.fillRect(0, 0, width, height);
|
|
|
|
// Draw oscilloscope with audio data
|
|
try {
|
|
drawFrame(ctx, width, height, leftData, rightData);
|
|
} catch (drawError) {
|
|
console.error('❌ Error in drawFrame:', drawError);
|
|
// Fallback: simple waveform
|
|
ctx.strokeStyle = '#00ff00';
|
|
ctx.lineWidth = 2;
|
|
ctx.beginPath();
|
|
for (let x = 0; x < width; x += 4) {
|
|
const sampleIndex = Math.floor((x / width) * samplesPerFrame);
|
|
const value = sampleIndex < leftData.length ? leftData[sampleIndex] : 128;
|
|
const y = height / 2 + ((value - 128) / 128) * (height / 4);
|
|
if (x === 0) {
|
|
ctx.moveTo(x, y);
|
|
} else {
|
|
ctx.lineTo(x, y);
|
|
}
|
|
}
|
|
ctx.stroke();
|
|
}
|
|
|
|
// Apply fade-in effect from intro (first fadeFrames of main content)
|
|
if (introDuration > 0 && frameIndex < fadeFrames) {
|
|
const fadeProgress = frameIndex / fadeFrames;
|
|
// Draw a semi-transparent black overlay that fades out
|
|
ctx.fillStyle = `rgba(10, 15, 10, ${1 - fadeProgress})`;
|
|
ctx.fillRect(0, 0, width, height);
|
|
}
|
|
|
|
// Add frame info
|
|
ctx.fillStyle = '#ffffff';
|
|
ctx.font = '16px monospace';
|
|
ctx.fillText(`Frame ${introFrames + frameIndex + 1}/${totalFrames}`, 20, 30);
|
|
ctx.fillText(`Time: ${(frameIndex / fps).toFixed(1)}s`, 20, 50);
|
|
|
|
const progress = 30 + Math.round((frameIndex / mainFrames) * 60);
|
|
setState(prev => ({ ...prev, progress }));
|
|
|
|
if (frameIndex % Math.max(1, Math.floor(mainFrames / 10)) === 0) {
|
|
console.log(`📸 Frame ${frameIndex + 1}/${mainFrames} (${progress}%) - Time: ${(frameIndex / fps).toFixed(1)}s`);
|
|
}
|
|
|
|
// Frame timing
|
|
await new Promise(resolve => setTimeout(resolve, 1000 / fps));
|
|
}
|
|
|
|
setState(prev => ({ ...prev, progress: 90 }));
|
|
|
|
console.log('⏹️ Stopping recorder...');
|
|
recorder.stop();
|
|
try {
|
|
recordingAudioSource.stop();
|
|
recordingAudioContext.close();
|
|
} catch (e) {
|
|
console.warn('Error stopping audio:', e);
|
|
}
|
|
|
|
// Wait for completion
|
|
await new Promise<void>((resolve) => {
|
|
const checkInterval = setInterval(() => {
|
|
if (recorder.state === 'inactive') {
|
|
clearInterval(checkInterval);
|
|
resolve();
|
|
}
|
|
}, 100);
|
|
});
|
|
|
|
if (chunks.length === 0) {
|
|
throw new Error('No video chunks recorded');
|
|
}
|
|
|
|
const videoBlob = new Blob(chunks, { type: selectedCodec });
|
|
console.log(`✅ Video created: ${(videoBlob.size / 1024 / 1024).toFixed(2)} MB`);
|
|
|
|
setState({ isExporting: false, progress: 100, error: null, stage: 'complete', fps: 0 });
|
|
|
|
return videoBlob;
|
|
} catch (error) {
|
|
console.error('❌ Export failed:', error);
|
|
setState({ isExporting: false, progress: 0, error: error.message || 'Export failed', stage: 'idle', fps: 0 });
|
|
return null;
|
|
}
|
|
}, []);
|
|
|
|
return {
|
|
...state,
|
|
generateVideoWithAudio,
|
|
cancelExport,
|
|
downloadBlob,
|
|
};
|
|
}; |