import React, { useState, useEffect, useRef } from 'react'; import { Mic, Square, Users, Headphones } from 'lucide-react'; import { invoke } from "@tauri-apps/api/core"; import { listen } from '@tauri-apps/api/event'; import logo from '../assets/logo.png'; // Import logo interface PromptTemplate { id: string; name: string; content: string; keywords?: string[]; } interface HistoryItem { id: string; date: string; transcription: string; summary: string; } interface RecorderProps { apiKey: string; productId: string; prompts: PromptTemplate[]; onOpenSettings: () => void; // Lifted State Props (still passed for state management, though unused in view) transcription: string; setTranscription: (val: string) => void; summary: string; setSummary: (val: string) => void; // History Props history: HistoryItem[]; onSaveToHistory: (t?: string, s?: string) => void; onDeleteHistory: (id: string) => void; onLoadHistory: (item: HistoryItem) => void; savePath: string | null; onRecordingComplete: () => void; autoStart?: boolean; recordingSubject?: string; onAutoStartHandled?: () => void; addToast: (msg: string, type: 'success' | 'error' | 'info', duration?: number) => void; selectedModel: string; onModelChange: (model: string) => void; } interface AudioDevice { id: string; name: string; } const Recorder: React.FC = ({ apiKey, productId, prompts, setTranscription, setSummary, onSaveToHistory, savePath, onRecordingComplete, onOpenSettings, addToast, selectedModel, onModelChange, ...props }) => { const [isRecording, setIsRecording] = useState(false); const [isStopping, setIsStopping] = useState(false); // New lock state const [isPaused, setIsPaused] = useState(false); const [isWaiting, setIsWaiting] = useState(false); // New state for Auto-Start const [autoStartEnabled, setAutoStartEnabled] = useState(false); // Toggle state const [status, setStatus] = useState('Ready to record'); const [selectedDevice, setSelectedDevice] = useState(''); const [selectedPromptId, setSelectedPromptId] = useState(''); // selectedModel is now a prop const [recordingMode, setRecordingMode] = useState<'voice' | 'meeting'>('voice'); const [devices, setDevices] = useState([]); const [availableModels, setAvailableModels] = useState>([]); const [lastSpeechTime, setLastSpeechTime] = useState(Date.now()); const [silenceDuration, setSilenceDuration] = useState(0); // Filtered devices based on mode const filteredDevices = devices.filter(d => { const isVirtual = d.name.toLowerCase().includes('hearbit') || d.name.toLowerCase().includes('blackhole'); return recordingMode === 'meeting' ? isVirtual : !isVirtual; }); useEffect(() => { loadDevices(); if (apiKey && productId) { loadModels(); } }, [apiKey, productId]); const loadModels = async () => { try { const models = await invoke>('get_available_models', { apiKey, productId }); if (models && models.length > 0) { models.sort((a, b) => a.name.localeCompare(b.name)); setAvailableModels(models); } } catch (e) { console.error("Failed to load models dynamically, using defaults:", e); } }; // Set default prompt selection useEffect(() => { if (prompts.length > 0 && !selectedPromptId) { setSelectedPromptId(prompts[0].id); } else if (prompts.length > 0 && selectedPromptId) { if (!prompts.find(p => p.id === selectedPromptId)) { setSelectedPromptId(prompts[0].id); } } }, [prompts, selectedPromptId]); const loadDevices = async () => { try { const devList = await invoke('get_input_devices'); // Alias BlackHole const aliasedDevs = devList.map(d => ({ ...d, name: d.name.includes('BlackHole') ? 'Hearbit Virtual Mic (BlackHole)' : d.name })); setDevices(aliasedDevs); // Select Hearbit mic by default if available and no selection made // Smart Auto-select based on mode if (!selectedDevice) { // Prioritize "Hearbit Audio" (Aggregate) over "Hearbit Virtual Mic" (BlackHole) const aggregateDev = aliasedDevs.find(d => d.name === 'Hearbit Audio'); const virtualDev = aliasedDevs.find(d => d.name.includes('Hearbit Virtual')); if (aggregateDev) { setRecordingMode('meeting'); setSelectedDevice(aggregateDev.id); } else if (virtualDev) { setRecordingMode('meeting'); setSelectedDevice(virtualDev.id); } else { setRecordingMode('voice'); if (aliasedDevs.length > 0) setSelectedDevice(aliasedDevs[0].id); } } } catch (e) { console.error('Failed to load devices', e); } }; const openAudioSetup = async () => { try { await invoke('open_audio_midi_setup'); } catch (e) { console.error(e); addToast('Failed to open Audio Setup', 'error'); setStatus('Failed to open Audio Setup'); } }; const startRecording = async (deviceIdOverride?: string) => { try { setStatus('Starting...'); // Check override or state const targetDeviceId = deviceIdOverride || selectedDevice; // Pass customFilename (camelCase key maps to snake_case in Rust automatically or we need to check Tauri mapping, usually it maps camel to camel? Rust expects snake. Let's use snake_case in invoke args to be safe) await invoke('start_recording', { deviceId: targetDeviceId, savePath: savePath || null, customFilename: props.recordingSubject || null, waitForSpeech: autoStartEnabled // Pass the toggle state }); setIsRecording(true); setIsPaused(false); setTranscription(''); setSummary(''); if (autoStartEnabled) { setIsWaiting(true); setStatus('Waiting for audio...'); addToast('Standing by for audio...', 'info', 3000); } else { setIsWaiting(false); setStatus('Recording...'); addToast('Recording started', 'success', 2000); } } catch (e) { console.error(e); setStatus(`Error: ${e}`); addToast(`Error starting recording: ${e}`, 'error'); setIsRecording(false); } }; // Refs for interval access to avoid dependency cycles const lastSpeechTimeRef = useRef(Date.now()); const isStoppingRef = useRef(false); // Update refs when state changes useEffect(() => { lastSpeechTimeRef.current = lastSpeechTime; }, [lastSpeechTime]); useEffect(() => { isStoppingRef.current = isStopping; }, [isStopping]); // 1. Event Listeners Effect (Run ONCE when recording starts) useEffect(() => { let unlistenVAD: () => void; let unlistenTrigger: () => void; const setupListeners = async () => { if (!isRecording) return; console.log("Setting up VAD listeners..."); // VAD Event Listener unlistenVAD = await listen<{ is_speech: boolean, probability: number }>('vad-event', (event) => { if (event.payload.is_speech) { setLastSpeechTime(Date.now()); lastSpeechTimeRef.current = Date.now(); // Update ref immediately setSilenceDuration(0); } }); // Auto-Start Trigger Listener unlistenTrigger = await listen('auto-recording-triggered', () => { console.log("Auto-Start Triggered from Backend!"); // Only trigger if we are actually waiting setIsWaiting((prev) => { if (prev) { addToast("Audio detected! Recording started.", 'success', 4000); return false; } return prev; }); setStatus('Recording (Auto-Started)...'); setLastSpeechTime(Date.now()); }); }; if (isRecording) { setupListeners(); } return () => { // Cleanup listeners if (unlistenVAD) unlistenVAD(); if (unlistenTrigger) unlistenTrigger(); }; }, [isRecording, addToast]); // Dependencies for listener setup // Auto-Stop Interval Effect useEffect(() => { if (!isRecording || isPaused || isWaiting) return; const interval = setInterval(() => { const now = Date.now(); const diff = (now - lastSpeechTimeRef.current) / 1000; setSilenceDuration(diff); // Auto-stop after 30 seconds of silence if (diff > 30 && !isStoppingRef.current) { console.log("Auto-stopping due to silence"); addToast("Auto-stopping (Silence detected)", "info", 3000); stopRecording(); } }, 1000); return () => clearInterval(interval); }, [isRecording, isPaused, isWaiting, addToast]); // Dependencies for interval lifecycle // Handle Auto Start Prop useEffect(() => { if (props.autoStart && !isRecording && devices.length > 0) { // Force meeting mode for auto-joins if (recordingMode !== 'meeting') { setRecordingMode('meeting'); } // Find best device (Race condition fix: we can't rely on selectedDevice state update being instant) const aggregateDev = devices.find(d => d.name === 'Hearbit Audio'); const virtualDev = devices.find(d => d.name.includes('Hearbit Virtual')); const bestDevice = aggregateDev || virtualDev; if (bestDevice) { setSelectedDevice(bestDevice.id); // Update UI state for consistency console.log("Auto-starting with device:", bestDevice.name); startRecording(bestDevice.id); // Pass ID directly } else { console.warn("Auto-start: No meeting device found, trying default."); startRecording(); } if (props.onAutoStartHandled) { props.onAutoStartHandled(); } } }, [props.autoStart, devices]); // Handle Custom Event (Legacy/Fallback) useEffect(() => { const handleStartReq = () => { if (!isRecording) { if (recordingMode !== 'meeting') { setRecordingMode('meeting'); } startRecording(); } }; window.addEventListener('start-recording-req', handleStartReq); return () => window.removeEventListener('start-recording-req', handleStartReq); }, [isRecording, recordingMode]); const togglePause = async () => { try { if (isPaused) { await invoke('resume_recording'); setIsPaused(false); setStatus('Recording...'); } else { await invoke('pause_recording'); setIsPaused(true); setStatus('Paused'); } } catch (e) { console.error("Pause/Resume error:", e); } }; const stopRecording = async () => { if (isStopping) return; setIsStopping(true); try { setIsRecording(false); setIsPaused(false); setIsWaiting(false); // Reset waiting state setStatus('Processing...'); const filePath = await invoke('stop_recording'); // Wait a moment for file flush (safety) await new Promise(r => setTimeout(r, 500)); setStatus('Transcribing (Infomaniak Whisper)...'); const transText = await invoke('transcribe_audio', { filePath, apiKey, productId }); setTranscription(transText); // Check if transcription is empty or just whitespace if (!transText || transText.trim().length === 0) { setStatus('Done (No speech detected)'); setTranscription('(No speech detected. Check your microphone settings.)'); setTimeout(() => setStatus('Ready to record'), 3000); return; } // Find selected prompt content - SMART SELECTION let activePrompt = prompts.find(p => p.id === selectedPromptId); // Smart Auto-Select based on keywords const lowerText = transText.toLowerCase(); let bestMatchId = selectedPromptId; let maxMatches = 0; for (const p of prompts) { if (!p.keywords) continue; let matches = 0; for (const kw of p.keywords) { if (lowerText.includes(kw.toLowerCase())) { matches++; } } if (matches > maxMatches) { maxMatches = matches; bestMatchId = p.id; } } if (bestMatchId !== selectedPromptId) { const newPrompt = prompts.find(p => p.id === bestMatchId); if (newPrompt) { console.log(`Smart Select: Switched to '${newPrompt.name}' with ${maxMatches} matches.`); setStatus(`Smart Select: Using "${newPrompt.name}"...`); addToast(`Smart Select: Switched to "${newPrompt.name}"`, 'success', 4000); activePrompt = newPrompt; // Optional: Update UI selection? setSelectedPromptId(bestMatchId); // Let's verify with user preference? For now, we override as "Magic". } } const promptContent = activePrompt ? activePrompt.content : "Summarize this."; setStatus(`Summarizing (${selectedModel})...`); const sumText = await invoke('summarize_text', { text: transText, apiKey, productId, prompt: promptContent, model: selectedModel }); setSummary(sumText); // Auto-save to history onSaveToHistory(transText, sumText); setStatus('Done!'); addToast('Transcription & Summary complete!', 'success', 4000); onRecordingComplete(); // Auto-switch tab setTimeout(() => setStatus('Ready to record'), 3000); } catch (e) { console.error(e); setStatus(`Error: ${e}`); addToast(`Error processing: ${e}`, 'error'); } finally { setIsStopping(false); } }; return (
{/* Fixed Header - Reduced padding */}
Logo
{/* Scrollable Content - Reduced spacing */}
{isRecording ? (
) : (
)}

{isRecording ? (isWaiting ? 'Waiting for Audio...' : isPaused ? 'Paused' : 'Listening...') : 'Ready to Record'}

{status} {isRecording && !isPaused && !isWaiting && silenceDuration > 10 && ( Silence detected: {Math.floor(silenceDuration)}s )}

{!isRecording ? ( <>
) : (
{/* In Waiting mode, we can only Stop (Cancel) */} {!isWaiting && ( )}
)}
{/* INPUT DEVICE SECTION */}
{recordingMode === 'meeting' && filteredDevices.length === 0 && ( )}
); }; export default Recorder;