- New Import tab with drag-and-drop support for audio files - Support for 8 formats: MP3, MP4, WAV, M4A, FLAC, OGG, AAC, WMA - File metadata display (duration, size, format) - Editable meeting titles - Progress tracking with visual indicators - Smart template selection - Auto-navigation to Transcription view - Updated README with BlackHole requirement and Teams config - Added get_audio_metadata Rust command - Version bump to 1.1.0
603 lines
26 KiB
TypeScript
603 lines
26 KiB
TypeScript
import React, { useState, useEffect, useRef } from 'react';
|
|
import { Mic, Square, Users, Headphones } from 'lucide-react';
|
|
import { invoke } from "@tauri-apps/api/core";
|
|
import { listen } from '@tauri-apps/api/event';
|
|
import logo from '../assets/logo.png'; // Import logo
|
|
|
|
interface PromptTemplate {
|
|
id: string;
|
|
name: string;
|
|
content: string;
|
|
keywords?: string[];
|
|
}
|
|
|
|
interface HistoryItem {
|
|
id: string;
|
|
date: string;
|
|
transcription: string;
|
|
summary: string;
|
|
}
|
|
|
|
interface RecorderProps {
|
|
apiKey: string;
|
|
productId: string;
|
|
prompts: PromptTemplate[];
|
|
onOpenSettings: () => void;
|
|
// Lifted State Props (still passed for state management, though unused in view)
|
|
transcription: string;
|
|
setTranscription: (val: string) => void;
|
|
summary: string;
|
|
setSummary: (val: string) => void;
|
|
// History Props
|
|
history: HistoryItem[];
|
|
onSaveToHistory: (t?: string, s?: string) => void;
|
|
onDeleteHistory: (id: string) => void;
|
|
onLoadHistory: (item: HistoryItem) => void;
|
|
savePath: string | null;
|
|
|
|
onRecordingComplete: () => void;
|
|
autoStart?: boolean;
|
|
recordingSubject?: string;
|
|
onAutoStartHandled?: () => void;
|
|
addToast: (msg: string, type: 'success' | 'error' | 'info', duration?: number) => void;
|
|
selectedModel: string;
|
|
onModelChange: (model: string) => void;
|
|
}
|
|
|
|
interface AudioDevice {
|
|
id: string;
|
|
name: string;
|
|
}
|
|
|
|
const Recorder: React.FC<RecorderProps> = ({
|
|
apiKey, productId, prompts,
|
|
setTranscription, setSummary,
|
|
onSaveToHistory, savePath, onRecordingComplete,
|
|
onOpenSettings, addToast, selectedModel, onModelChange, ...props
|
|
}) => {
|
|
const [isRecording, setIsRecording] = useState(false);
|
|
const [isStopping, setIsStopping] = useState(false); // New lock state
|
|
const [isPaused, setIsPaused] = useState(false);
|
|
const [isWaiting, setIsWaiting] = useState(false); // New state for Auto-Start
|
|
const [autoStartEnabled, setAutoStartEnabled] = useState(false); // Toggle state
|
|
|
|
|
|
const [status, setStatus] = useState<string>('Ready to record');
|
|
const [selectedDevice, setSelectedDevice] = useState<string>('');
|
|
const [selectedPromptId, setSelectedPromptId] = useState<string>('');
|
|
// selectedModel is now a prop
|
|
const [recordingMode, setRecordingMode] = useState<'voice' | 'meeting'>('voice');
|
|
const [devices, setDevices] = useState<AudioDevice[]>([]);
|
|
const [availableModels, setAvailableModels] = useState<Array<{ id: string, name: string }>>([]);
|
|
const [lastSpeechTime, setLastSpeechTime] = useState<number>(Date.now());
|
|
const [silenceDuration, setSilenceDuration] = useState(0);
|
|
|
|
// Filtered devices based on mode
|
|
const filteredDevices = devices.filter(d => {
|
|
const isVirtual = d.name.toLowerCase().includes('hearbit') || d.name.toLowerCase().includes('blackhole');
|
|
return recordingMode === 'meeting' ? isVirtual : !isVirtual;
|
|
});
|
|
|
|
useEffect(() => {
|
|
loadDevices();
|
|
if (apiKey && productId) {
|
|
loadModels();
|
|
}
|
|
}, [apiKey, productId]);
|
|
|
|
const loadModels = async () => {
|
|
try {
|
|
const models = await invoke<Array<{ id: string, name: string }>>('get_available_models', { apiKey, productId });
|
|
if (models && models.length > 0) {
|
|
models.sort((a, b) => a.name.localeCompare(b.name));
|
|
setAvailableModels(models);
|
|
}
|
|
} catch (e) {
|
|
console.error("Failed to load models dynamically, using defaults:", e);
|
|
}
|
|
};
|
|
|
|
// Set default prompt selection
|
|
useEffect(() => {
|
|
if (prompts.length > 0 && !selectedPromptId) {
|
|
setSelectedPromptId(prompts[0].id);
|
|
} else if (prompts.length > 0 && selectedPromptId) {
|
|
if (!prompts.find(p => p.id === selectedPromptId)) {
|
|
setSelectedPromptId(prompts[0].id);
|
|
}
|
|
}
|
|
}, [prompts, selectedPromptId]);
|
|
|
|
const loadDevices = async () => {
|
|
try {
|
|
const devList = await invoke<AudioDevice[]>('get_input_devices');
|
|
// Alias BlackHole
|
|
const aliasedDevs = devList.map(d => ({
|
|
...d,
|
|
name: d.name.includes('BlackHole') ? 'Hearbit Virtual Mic (BlackHole)' : d.name
|
|
}));
|
|
setDevices(aliasedDevs);
|
|
|
|
// Select Hearbit mic by default if available and no selection made
|
|
// Smart Auto-select based on mode
|
|
if (!selectedDevice) {
|
|
// Prioritize "Hearbit Audio" (Aggregate) over "Hearbit Virtual Mic" (BlackHole)
|
|
const aggregateDev = aliasedDevs.find(d => d.name === 'Hearbit Audio');
|
|
const virtualDev = aliasedDevs.find(d => d.name.includes('Hearbit Virtual'));
|
|
|
|
if (aggregateDev) {
|
|
setRecordingMode('meeting');
|
|
setSelectedDevice(aggregateDev.id);
|
|
} else if (virtualDev) {
|
|
setRecordingMode('meeting');
|
|
setSelectedDevice(virtualDev.id);
|
|
} else {
|
|
setRecordingMode('voice');
|
|
if (aliasedDevs.length > 0) setSelectedDevice(aliasedDevs[0].id);
|
|
}
|
|
}
|
|
} catch (e) {
|
|
console.error('Failed to load devices', e);
|
|
}
|
|
};
|
|
|
|
const openAudioSetup = async () => {
|
|
try {
|
|
await invoke('open_audio_midi_setup');
|
|
} catch (e) {
|
|
console.error(e);
|
|
addToast('Failed to open Audio Setup', 'error');
|
|
setStatus('Failed to open Audio Setup');
|
|
}
|
|
};
|
|
|
|
const startRecording = async (deviceIdOverride?: string) => {
|
|
try {
|
|
setStatus('Starting...');
|
|
// Check override or state
|
|
const targetDeviceId = deviceIdOverride || selectedDevice;
|
|
|
|
// Pass customFilename (camelCase key maps to snake_case in Rust automatically or we need to check Tauri mapping, usually it maps camel to camel? Rust expects snake. Let's use snake_case in invoke args to be safe)
|
|
await invoke('start_recording', {
|
|
deviceId: targetDeviceId,
|
|
savePath: savePath || null,
|
|
customFilename: props.recordingSubject || null,
|
|
waitForSpeech: autoStartEnabled // Pass the toggle state
|
|
});
|
|
|
|
setIsRecording(true);
|
|
setIsPaused(false);
|
|
setTranscription('');
|
|
setSummary('');
|
|
|
|
if (autoStartEnabled) {
|
|
setIsWaiting(true);
|
|
setStatus('Waiting for audio...');
|
|
addToast('Standing by for audio...', 'info', 3000);
|
|
} else {
|
|
setIsWaiting(false);
|
|
setStatus('Recording...');
|
|
addToast('Recording started', 'success', 2000);
|
|
}
|
|
|
|
} catch (e) {
|
|
console.error(e);
|
|
setStatus(`Error: ${e}`);
|
|
addToast(`Error starting recording: ${e}`, 'error');
|
|
setIsRecording(false);
|
|
}
|
|
};
|
|
|
|
// Refs for interval access to avoid dependency cycles
|
|
const lastSpeechTimeRef = useRef<number>(Date.now());
|
|
const isStoppingRef = useRef(false);
|
|
|
|
// Update refs when state changes
|
|
useEffect(() => {
|
|
lastSpeechTimeRef.current = lastSpeechTime;
|
|
}, [lastSpeechTime]);
|
|
|
|
useEffect(() => {
|
|
isStoppingRef.current = isStopping;
|
|
}, [isStopping]);
|
|
|
|
// 1. Event Listeners Effect (Run ONCE when recording starts)
|
|
useEffect(() => {
|
|
let unlistenVAD: () => void;
|
|
let unlistenTrigger: () => void;
|
|
|
|
const setupListeners = async () => {
|
|
if (!isRecording) return;
|
|
|
|
console.log("Setting up VAD listeners...");
|
|
// VAD Event Listener
|
|
unlistenVAD = await listen<{ is_speech: boolean, probability: number }>('vad-event', (event) => {
|
|
if (event.payload.is_speech) {
|
|
setLastSpeechTime(Date.now());
|
|
lastSpeechTimeRef.current = Date.now(); // Update ref immediately
|
|
setSilenceDuration(0);
|
|
}
|
|
});
|
|
|
|
// Auto-Start Trigger Listener
|
|
unlistenTrigger = await listen('auto-recording-triggered', () => {
|
|
console.log("Auto-Start Triggered from Backend!");
|
|
// Only trigger if we are actually waiting
|
|
setIsWaiting((prev) => {
|
|
if (prev) {
|
|
addToast("Audio detected! Recording started.", 'success', 4000);
|
|
return false;
|
|
}
|
|
return prev;
|
|
});
|
|
setStatus('Recording (Auto-Started)...');
|
|
setLastSpeechTime(Date.now());
|
|
});
|
|
};
|
|
|
|
if (isRecording) {
|
|
setupListeners();
|
|
}
|
|
|
|
return () => {
|
|
// Cleanup listeners
|
|
if (unlistenVAD) unlistenVAD();
|
|
if (unlistenTrigger) unlistenTrigger();
|
|
};
|
|
}, [isRecording, addToast]); // Dependencies for listener setup
|
|
|
|
// Auto-Stop Interval Effect
|
|
useEffect(() => {
|
|
if (!isRecording || isPaused || isWaiting) return;
|
|
|
|
const interval = setInterval(() => {
|
|
const now = Date.now();
|
|
const diff = (now - lastSpeechTimeRef.current) / 1000;
|
|
setSilenceDuration(diff);
|
|
|
|
// Auto-stop after 30 seconds of silence
|
|
if (diff > 30 && !isStoppingRef.current) {
|
|
console.log("Auto-stopping due to silence");
|
|
addToast("Auto-stopping (Silence detected)", "info", 3000);
|
|
stopRecording();
|
|
}
|
|
}, 1000);
|
|
|
|
return () => clearInterval(interval);
|
|
}, [isRecording, isPaused, isWaiting, addToast]); // Dependencies for interval lifecycle
|
|
|
|
// Handle Auto Start Prop
|
|
useEffect(() => {
|
|
if (props.autoStart && !isRecording && devices.length > 0) {
|
|
// Force meeting mode for auto-joins
|
|
if (recordingMode !== 'meeting') {
|
|
setRecordingMode('meeting');
|
|
}
|
|
|
|
// Find best device (Race condition fix: we can't rely on selectedDevice state update being instant)
|
|
const aggregateDev = devices.find(d => d.name === 'Hearbit Audio');
|
|
const virtualDev = devices.find(d => d.name.includes('Hearbit Virtual'));
|
|
const bestDevice = aggregateDev || virtualDev;
|
|
|
|
if (bestDevice) {
|
|
setSelectedDevice(bestDevice.id); // Update UI state for consistency
|
|
console.log("Auto-starting with device:", bestDevice.name);
|
|
startRecording(bestDevice.id); // Pass ID directly
|
|
} else {
|
|
console.warn("Auto-start: No meeting device found, trying default.");
|
|
startRecording();
|
|
}
|
|
|
|
if (props.onAutoStartHandled) {
|
|
props.onAutoStartHandled();
|
|
}
|
|
}
|
|
}, [props.autoStart, devices]);
|
|
|
|
// Handle Custom Event (Legacy/Fallback)
|
|
useEffect(() => {
|
|
const handleStartReq = () => {
|
|
if (!isRecording) {
|
|
if (recordingMode !== 'meeting') {
|
|
setRecordingMode('meeting');
|
|
}
|
|
startRecording();
|
|
}
|
|
};
|
|
window.addEventListener('start-recording-req', handleStartReq);
|
|
return () => window.removeEventListener('start-recording-req', handleStartReq);
|
|
}, [isRecording, recordingMode]);
|
|
|
|
const togglePause = async () => {
|
|
try {
|
|
if (isPaused) {
|
|
await invoke('resume_recording');
|
|
setIsPaused(false);
|
|
setStatus('Recording...');
|
|
} else {
|
|
await invoke('pause_recording');
|
|
setIsPaused(true);
|
|
setStatus('Paused');
|
|
}
|
|
} catch (e) {
|
|
console.error("Pause/Resume error:", e);
|
|
}
|
|
};
|
|
|
|
const stopRecording = async () => {
|
|
if (isStopping) return;
|
|
setIsStopping(true);
|
|
|
|
try {
|
|
setIsRecording(false);
|
|
setIsPaused(false);
|
|
setIsWaiting(false); // Reset waiting state
|
|
setStatus('Processing...');
|
|
const filePath = await invoke<string>('stop_recording');
|
|
|
|
// Wait a moment for file flush (safety)
|
|
await new Promise(r => setTimeout(r, 500));
|
|
|
|
setStatus('Transcribing (Infomaniak Whisper)...');
|
|
const transText = await invoke<string>('transcribe_audio', {
|
|
filePath,
|
|
apiKey,
|
|
productId
|
|
});
|
|
setTranscription(transText);
|
|
|
|
// Check if transcription is empty or just whitespace
|
|
if (!transText || transText.trim().length === 0) {
|
|
setStatus('Done (No speech detected)');
|
|
setTranscription('(No speech detected. Check your microphone settings.)');
|
|
setTimeout(() => setStatus('Ready to record'), 3000);
|
|
return;
|
|
}
|
|
|
|
// Find selected prompt content - SMART SELECTION
|
|
let activePrompt = prompts.find(p => p.id === selectedPromptId);
|
|
|
|
// Smart Auto-Select based on keywords
|
|
const lowerText = transText.toLowerCase();
|
|
let bestMatchId = selectedPromptId;
|
|
let maxMatches = 0;
|
|
|
|
for (const p of prompts) {
|
|
if (!p.keywords) continue;
|
|
let matches = 0;
|
|
for (const kw of p.keywords) {
|
|
if (lowerText.includes(kw.toLowerCase())) {
|
|
matches++;
|
|
}
|
|
}
|
|
if (matches > maxMatches) {
|
|
maxMatches = matches;
|
|
bestMatchId = p.id;
|
|
}
|
|
}
|
|
|
|
if (bestMatchId !== selectedPromptId) {
|
|
const newPrompt = prompts.find(p => p.id === bestMatchId);
|
|
if (newPrompt) {
|
|
console.log(`Smart Select: Switched to '${newPrompt.name}' with ${maxMatches} matches.`);
|
|
setStatus(`Smart Select: Using "${newPrompt.name}"...`);
|
|
addToast(`Smart Select: Switched to "${newPrompt.name}"`, 'success', 4000);
|
|
activePrompt = newPrompt;
|
|
// Optional: Update UI selection? setSelectedPromptId(bestMatchId);
|
|
// Let's verify with user preference? For now, we override as "Magic".
|
|
}
|
|
}
|
|
|
|
const promptContent = activePrompt ? activePrompt.content : "Summarize this.";
|
|
|
|
setStatus(`Summarizing (${selectedModel})...`);
|
|
const sumText = await invoke<string>('summarize_text', {
|
|
text: transText,
|
|
apiKey,
|
|
productId,
|
|
prompt: promptContent,
|
|
model: selectedModel
|
|
});
|
|
setSummary(sumText);
|
|
|
|
// Auto-save to history
|
|
onSaveToHistory(transText, sumText);
|
|
|
|
setStatus('Done!');
|
|
addToast('Transcription & Summary complete!', 'success', 4000);
|
|
onRecordingComplete(); // Auto-switch tab
|
|
setTimeout(() => setStatus('Ready to record'), 3000);
|
|
} catch (e) {
|
|
console.error(e);
|
|
setStatus(`Error: ${e}`);
|
|
addToast(`Error processing: ${e}`, 'error');
|
|
} finally {
|
|
setIsStopping(false);
|
|
}
|
|
};
|
|
|
|
|
|
|
|
return (
|
|
<div className="flex flex-col w-full h-full bg-background relative">
|
|
{/* Fixed Header - Reduced padding */}
|
|
<div className="w-full flex justify-center items-center p-4 shrink-0">
|
|
<img src={logo} alt="Logo" className="h-10 object-contain" />
|
|
</div>
|
|
|
|
{/* Scrollable Content - Reduced spacing */}
|
|
<div className="flex-1 overflow-y-auto px-6 pb-6 flex flex-col items-center">
|
|
<div className="mb-4 relative shrink-0">
|
|
<div className={`w-24 h-24 rounded-full flex items-center justify-center transition-all duration-300 ${isRecording ? (isWaiting ? 'bg-blue-500/20' : isPaused ? 'bg-yellow-500/10' : 'bg-red-500/10 animate-pulse') : 'bg-secondary'}`}>
|
|
{isRecording ? (
|
|
<div className={`w-16 h-16 rounded-full flex items-center justify-center shadow-[0_0_20px_rgba(239,68,68,0.5)] ${isWaiting ? 'bg-blue-500 animate-pulse' : isPaused ? 'bg-yellow-500' : 'bg-red-500'}`}>
|
|
<Mic size={32} className="text-white animate-bounce" />
|
|
</div>
|
|
) : (
|
|
<div className="w-16 h-16 rounded-full bg-primary flex items-center justify-center">
|
|
<Mic size={32} className="text-primary-foreground" />
|
|
</div>
|
|
)}
|
|
</div>
|
|
</div>
|
|
|
|
<h1 className="text-xl font-bold mb-1 text-foreground">
|
|
{isRecording ? (isWaiting ? 'Waiting for Audio...' : isPaused ? 'Paused' : 'Listening...') : 'Ready to Record'}
|
|
</h1>
|
|
|
|
<p className="text-muted-foreground mb-4 text-center text-xs h-5">
|
|
{status}
|
|
{isRecording && !isPaused && !isWaiting && silenceDuration > 10 && (
|
|
<span className="block text-xs text-yellow-500 mt-0.5 opacity-80">
|
|
Silence detected: {Math.floor(silenceDuration)}s
|
|
</span>
|
|
)}
|
|
</p>
|
|
|
|
<div className="w-full max-w-sm space-y-3 mb-4 shrink-0">
|
|
{!isRecording ? (
|
|
<>
|
|
<button
|
|
onClick={() => startRecording()}
|
|
disabled={!apiKey || !productId}
|
|
className="w-full py-3 text-base font-semibold bg-primary text-primary-foreground rounded-lg hover:bg-primary/90 disabled:opacity-50 disabled:cursor-not-allowed transition-all shadow-md hover:shadow-lg"
|
|
>
|
|
{!apiKey ? 'Configure API Key First' : (autoStartEnabled ? 'Standby (Auto-Start)' : 'Start Recording')}
|
|
</button>
|
|
<div className="flex items-center justify-center gap-2 mt-2">
|
|
<label className="flex items-center gap-2 cursor-pointer select-none">
|
|
<input
|
|
type="checkbox"
|
|
checked={autoStartEnabled}
|
|
onChange={(e) => setAutoStartEnabled(e.target.checked)}
|
|
className="w-4 h-4 accent-primary rounded cursor-pointer"
|
|
/>
|
|
<span className="text-xs text-muted-foreground font-medium">Auto-start when audio detected</span>
|
|
</label>
|
|
</div>
|
|
</>
|
|
) : (
|
|
<div className="flex gap-2 w-full">
|
|
{/* In Waiting mode, we can only Stop (Cancel) */}
|
|
{!isWaiting && (
|
|
<button
|
|
onClick={togglePause}
|
|
className={`flex-1 py-4 text-lg font-semibold rounded-lg transition-all shadow-md hover:shadow-lg flex items-center justify-center gap-2 ${isPaused
|
|
? 'bg-blue-600 text-white hover:bg-blue-700'
|
|
: 'bg-yellow-500 text-white hover:bg-yellow-600'
|
|
}`}
|
|
>
|
|
{isPaused ? 'Resume' : 'Pause'}
|
|
</button>
|
|
)}
|
|
<button
|
|
onClick={stopRecording}
|
|
className="flex-1 py-4 text-lg font-semibold bg-destructive text-destructive-foreground rounded-lg hover:bg-destructive/90 transition-all shadow-md hover:shadow-lg flex items-center justify-center gap-2"
|
|
>
|
|
<Square size={20} fill="currentColor" />
|
|
{isWaiting ? 'Cancel' : 'Stop'}
|
|
</button>
|
|
</div>
|
|
)}
|
|
|
|
<div className="grid grid-cols-2 gap-4 pt-2">
|
|
</div>
|
|
|
|
{/* INPUT DEVICE SECTION */}
|
|
<div className="col-span-2">
|
|
<div className="flex bg-secondary p-1 rounded-lg mb-2">
|
|
<button
|
|
onClick={() => { setRecordingMode('voice'); setSelectedDevice(''); }}
|
|
className={`flex-1 flex items-center justify-center gap-2 py-1.5 text-xs font-semibold rounded-md transition-all ${recordingMode === 'voice' ? 'bg-background shadow text-foreground' : 'text-muted-foreground hover:text-foreground'}`}
|
|
>
|
|
<Headphones size={14} /> Voice Memo
|
|
</button>
|
|
<button
|
|
onClick={() => { setRecordingMode('meeting'); setSelectedDevice(''); }}
|
|
className={`flex-1 flex items-center justify-center gap-2 py-1.5 text-xs font-semibold rounded-md transition-all ${recordingMode === 'meeting' ? 'bg-background shadow text-foreground' : 'text-muted-foreground hover:text-foreground'}`}
|
|
>
|
|
<Users size={14} /> Meeting
|
|
</button>
|
|
</div>
|
|
|
|
<select
|
|
value={selectedDevice}
|
|
onChange={(e) => setSelectedDevice(e.target.value)}
|
|
className="w-full p-2 text-sm bg-secondary rounded border border-border outline-none focus:ring-2 focus:ring-primary"
|
|
disabled={isRecording}
|
|
>
|
|
{filteredDevices.map(d => (
|
|
<option key={d.id} value={d.id}>{d.name}</option>
|
|
))}
|
|
{filteredDevices.length === 0 && (
|
|
<option value="">
|
|
{recordingMode === 'meeting' ? 'No Meeting Device (Create in Settings)' : 'No Microphone Found'}
|
|
</option>
|
|
)}
|
|
</select>
|
|
</div>
|
|
|
|
<div className="col-span-2 grid grid-cols-2 gap-4">
|
|
<div>
|
|
<label className="text-xs font-semibold text-muted-foreground uppercase tracking-wider block mb-1">
|
|
LLM Model
|
|
</label>
|
|
<select
|
|
value={selectedModel}
|
|
onChange={(e) => {
|
|
onModelChange(e.target.value);
|
|
// localStorage handled in App.tsx
|
|
}}
|
|
className="w-full p-2 text-sm bg-secondary rounded border border-border outline-none focus:ring-2 focus:ring-primary"
|
|
// Allow changing model while recording (since it's used for summary after)
|
|
disabled={false}
|
|
>
|
|
{availableModels.map(m => (
|
|
<option key={m.id} value={m.id}>{m.name}</option>
|
|
))}
|
|
</select>
|
|
</div>
|
|
</div>
|
|
|
|
<div className="w-full">
|
|
<label className="text-xs font-semibold text-muted-foreground uppercase tracking-wider block mb-1">
|
|
AI Template
|
|
</label>
|
|
<select
|
|
value={selectedPromptId}
|
|
onChange={(e) => setSelectedPromptId(e.target.value)}
|
|
className="w-full p-2 text-sm bg-secondary rounded border border-border outline-none focus:ring-2 focus:ring-primary"
|
|
// Allow changing template while recording
|
|
disabled={prompts.length === 0}
|
|
>
|
|
{prompts.map(p => (
|
|
<option key={p.id} value={p.id}>{p.name}</option>
|
|
))}
|
|
{prompts.length === 0 && <option value="">No templates</option>}
|
|
</select>
|
|
</div>
|
|
|
|
<div className="flex flex-col gap-2 mt-2 w-full">
|
|
{recordingMode === 'meeting' && filteredDevices.length === 0 && (
|
|
<button
|
|
onClick={onOpenSettings}
|
|
className="text-xs bg-primary/10 text-primary hover:bg-primary/20 w-full text-center border border-primary/20 rounded p-2 mb-2 font-semibold"
|
|
>
|
|
🪄 Create "Hearbit Audio" Device
|
|
</button>
|
|
)}
|
|
<button
|
|
onClick={openAudioSetup}
|
|
className="text-xs text-muted-foreground hover:text-foreground w-full text-center border border-dashed border-border/50 rounded p-1"
|
|
>
|
|
Open Audio MIDI Setup
|
|
</button>
|
|
</div>
|
|
</div>
|
|
</div>
|
|
</div>
|
|
);
|
|
};
|
|
|
|
export default Recorder;
|