Release 1.1.0: Add Import Audio Files feature
- New Import tab with drag-and-drop support for audio files - Support for 8 formats: MP3, MP4, WAV, M4A, FLAC, OGG, AAC, WMA - File metadata display (duration, size, format) - Editable meeting titles - Progress tracking with visual indicators - Smart template selection - Auto-navigation to Transcription view - Updated README with BlackHole requirement and Teams config - Added get_audio_metadata Rust command - Version bump to 1.1.0
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
import React, { useState, useEffect } from 'react';
|
||||
import React, { useState, useEffect, useRef } from 'react';
|
||||
import { Mic, Square, Users, Headphones } from 'lucide-react';
|
||||
import { invoke } from "@tauri-apps/api/core";
|
||||
import { listen } from '@tauri-apps/api/event';
|
||||
@@ -58,6 +58,10 @@ const Recorder: React.FC<RecorderProps> = ({
|
||||
const [isRecording, setIsRecording] = useState(false);
|
||||
const [isStopping, setIsStopping] = useState(false); // New lock state
|
||||
const [isPaused, setIsPaused] = useState(false);
|
||||
const [isWaiting, setIsWaiting] = useState(false); // New state for Auto-Start
|
||||
const [autoStartEnabled, setAutoStartEnabled] = useState(false); // Toggle state
|
||||
|
||||
|
||||
const [status, setStatus] = useState<string>('Ready to record');
|
||||
const [selectedDevice, setSelectedDevice] = useState<string>('');
|
||||
const [selectedPromptId, setSelectedPromptId] = useState<string>('');
|
||||
@@ -149,19 +153,33 @@ const Recorder: React.FC<RecorderProps> = ({
|
||||
|
||||
const startRecording = async (deviceIdOverride?: string) => {
|
||||
try {
|
||||
setStatus('Starting...');
|
||||
setStatus('Starting...');
|
||||
// Check override or state
|
||||
const targetDeviceId = deviceIdOverride || selectedDevice;
|
||||
|
||||
// Pass customFilename (camelCase key maps to snake_case in Rust automatically or we need to check Tauri mapping, usually it maps camel to camel? Rust expects snake. Let's use snake_case in invoke args to be safe)
|
||||
await invoke('start_recording', { deviceId: targetDeviceId, savePath: savePath || null, customFilename: props.recordingSubject || null });
|
||||
await invoke('start_recording', {
|
||||
deviceId: targetDeviceId,
|
||||
savePath: savePath || null,
|
||||
customFilename: props.recordingSubject || null,
|
||||
waitForSpeech: autoStartEnabled // Pass the toggle state
|
||||
});
|
||||
|
||||
setIsRecording(true);
|
||||
setIsPaused(false);
|
||||
setTranscription('');
|
||||
setSummary('');
|
||||
setStatus('Recording...');
|
||||
addToast('Recording started', 'success', 2000);
|
||||
|
||||
if (autoStartEnabled) {
|
||||
setIsWaiting(true);
|
||||
setStatus('Waiting for audio...');
|
||||
addToast('Standing by for audio...', 'info', 3000);
|
||||
} else {
|
||||
setIsWaiting(false);
|
||||
setStatus('Recording...');
|
||||
addToast('Recording started', 'success', 2000);
|
||||
}
|
||||
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
setStatus(`Error: ${e}`);
|
||||
@@ -170,43 +188,83 @@ const Recorder: React.FC<RecorderProps> = ({
|
||||
}
|
||||
};
|
||||
|
||||
// VAD & Auto-Stop Logic
|
||||
useEffect(() => {
|
||||
let unlisten: () => void;
|
||||
// Refs for interval access to avoid dependency cycles
|
||||
const lastSpeechTimeRef = useRef<number>(Date.now());
|
||||
const isStoppingRef = useRef(false);
|
||||
|
||||
const setupListener = async () => {
|
||||
unlisten = await listen<{ is_speech: boolean, probability: number }>('vad-event', (event) => {
|
||||
// Update refs when state changes
|
||||
useEffect(() => {
|
||||
lastSpeechTimeRef.current = lastSpeechTime;
|
||||
}, [lastSpeechTime]);
|
||||
|
||||
useEffect(() => {
|
||||
isStoppingRef.current = isStopping;
|
||||
}, [isStopping]);
|
||||
|
||||
// 1. Event Listeners Effect (Run ONCE when recording starts)
|
||||
useEffect(() => {
|
||||
let unlistenVAD: () => void;
|
||||
let unlistenTrigger: () => void;
|
||||
|
||||
const setupListeners = async () => {
|
||||
if (!isRecording) return;
|
||||
|
||||
console.log("Setting up VAD listeners...");
|
||||
// VAD Event Listener
|
||||
unlistenVAD = await listen<{ is_speech: boolean, probability: number }>('vad-event', (event) => {
|
||||
if (event.payload.is_speech) {
|
||||
setLastSpeechTime(Date.now());
|
||||
lastSpeechTimeRef.current = Date.now(); // Update ref immediately
|
||||
setSilenceDuration(0);
|
||||
}
|
||||
});
|
||||
|
||||
// Auto-Start Trigger Listener
|
||||
unlistenTrigger = await listen('auto-recording-triggered', () => {
|
||||
console.log("Auto-Start Triggered from Backend!");
|
||||
// Only trigger if we are actually waiting
|
||||
setIsWaiting((prev) => {
|
||||
if (prev) {
|
||||
addToast("Audio detected! Recording started.", 'success', 4000);
|
||||
return false;
|
||||
}
|
||||
return prev;
|
||||
});
|
||||
setStatus('Recording (Auto-Started)...');
|
||||
setLastSpeechTime(Date.now());
|
||||
});
|
||||
};
|
||||
|
||||
if (isRecording && !isPaused) {
|
||||
setupListener();
|
||||
setLastSpeechTime(Date.now()); // Reset on start
|
||||
if (isRecording) {
|
||||
setupListeners();
|
||||
}
|
||||
|
||||
const interval = setInterval(() => {
|
||||
if (isRecording && !isPaused) {
|
||||
const diff = (Date.now() - lastSpeechTime) / 1000;
|
||||
setSilenceDuration(diff);
|
||||
return () => {
|
||||
// Cleanup listeners
|
||||
if (unlistenVAD) unlistenVAD();
|
||||
if (unlistenTrigger) unlistenTrigger();
|
||||
};
|
||||
}, [isRecording, addToast]); // Dependencies for listener setup
|
||||
|
||||
// Auto-stop after 30 seconds of silence
|
||||
if (diff > 30 && !isStopping) { // Check lock
|
||||
console.log("Auto-stopping due to silence");
|
||||
addToast("Auto-stopping (Silence detected)", "info", 3000);
|
||||
stopRecording();
|
||||
}
|
||||
// Auto-Stop Interval Effect
|
||||
useEffect(() => {
|
||||
if (!isRecording || isPaused || isWaiting) return;
|
||||
|
||||
const interval = setInterval(() => {
|
||||
const now = Date.now();
|
||||
const diff = (now - lastSpeechTimeRef.current) / 1000;
|
||||
setSilenceDuration(diff);
|
||||
|
||||
// Auto-stop after 30 seconds of silence
|
||||
if (diff > 30 && !isStoppingRef.current) {
|
||||
console.log("Auto-stopping due to silence");
|
||||
addToast("Auto-stopping (Silence detected)", "info", 3000);
|
||||
stopRecording();
|
||||
}
|
||||
}, 1000);
|
||||
|
||||
return () => {
|
||||
if (unlisten) unlisten();
|
||||
clearInterval(interval);
|
||||
};
|
||||
}, [isRecording, isPaused, lastSpeechTime]);
|
||||
return () => clearInterval(interval);
|
||||
}, [isRecording, isPaused, isWaiting, addToast]); // Dependencies for interval lifecycle
|
||||
|
||||
// Handle Auto Start Prop
|
||||
useEffect(() => {
|
||||
@@ -273,6 +331,7 @@ const Recorder: React.FC<RecorderProps> = ({
|
||||
try {
|
||||
setIsRecording(false);
|
||||
setIsPaused(false);
|
||||
setIsWaiting(false); // Reset waiting state
|
||||
setStatus('Processing...');
|
||||
const filePath = await invoke<string>('stop_recording');
|
||||
|
||||
@@ -357,6 +416,8 @@ const Recorder: React.FC<RecorderProps> = ({
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
return (
|
||||
<div className="flex flex-col w-full h-full bg-background relative">
|
||||
{/* Fixed Header - Reduced padding */}
|
||||
@@ -367,9 +428,9 @@ const Recorder: React.FC<RecorderProps> = ({
|
||||
{/* Scrollable Content - Reduced spacing */}
|
||||
<div className="flex-1 overflow-y-auto px-6 pb-6 flex flex-col items-center">
|
||||
<div className="mb-4 relative shrink-0">
|
||||
<div className={`w-24 h-24 rounded-full flex items-center justify-center transition-all duration-300 ${isRecording ? (isPaused ? 'bg-yellow-500/10' : 'bg-red-500/10 animate-pulse') : 'bg-secondary'}`}>
|
||||
<div className={`w-24 h-24 rounded-full flex items-center justify-center transition-all duration-300 ${isRecording ? (isWaiting ? 'bg-blue-500/20' : isPaused ? 'bg-yellow-500/10' : 'bg-red-500/10 animate-pulse') : 'bg-secondary'}`}>
|
||||
{isRecording ? (
|
||||
<div className={`w-16 h-16 rounded-full flex items-center justify-center shadow-[0_0_20px_rgba(239,68,68,0.5)] ${isPaused ? 'bg-yellow-500' : 'bg-red-500'}`}>
|
||||
<div className={`w-16 h-16 rounded-full flex items-center justify-center shadow-[0_0_20px_rgba(239,68,68,0.5)] ${isWaiting ? 'bg-blue-500 animate-pulse' : isPaused ? 'bg-yellow-500' : 'bg-red-500'}`}>
|
||||
<Mic size={32} className="text-white animate-bounce" />
|
||||
</div>
|
||||
) : (
|
||||
@@ -381,12 +442,12 @@ const Recorder: React.FC<RecorderProps> = ({
|
||||
</div>
|
||||
|
||||
<h1 className="text-xl font-bold mb-1 text-foreground">
|
||||
{isRecording ? (isPaused ? 'Paused' : 'Listening...') : 'Ready to Record'}
|
||||
{isRecording ? (isWaiting ? 'Waiting for Audio...' : isPaused ? 'Paused' : 'Listening...') : 'Ready to Record'}
|
||||
</h1>
|
||||
|
||||
<p className="text-muted-foreground mb-4 text-center text-xs h-5">
|
||||
{status}
|
||||
{isRecording && !isPaused && silenceDuration > 10 && (
|
||||
{isRecording && !isPaused && !isWaiting && silenceDuration > 10 && (
|
||||
<span className="block text-xs text-yellow-500 mt-0.5 opacity-80">
|
||||
Silence detected: {Math.floor(silenceDuration)}s
|
||||
</span>
|
||||
@@ -395,30 +456,46 @@ const Recorder: React.FC<RecorderProps> = ({
|
||||
|
||||
<div className="w-full max-w-sm space-y-3 mb-4 shrink-0">
|
||||
{!isRecording ? (
|
||||
<button
|
||||
onClick={() => startRecording()}
|
||||
disabled={!apiKey || !productId}
|
||||
className="w-full py-3 text-base font-semibold bg-primary text-primary-foreground rounded-lg hover:bg-primary/90 disabled:opacity-50 disabled:cursor-not-allowed transition-all shadow-md hover:shadow-lg"
|
||||
>
|
||||
{!apiKey ? 'Configure API Key First' : 'Start Recording'}
|
||||
</button>
|
||||
<>
|
||||
<button
|
||||
onClick={() => startRecording()}
|
||||
disabled={!apiKey || !productId}
|
||||
className="w-full py-3 text-base font-semibold bg-primary text-primary-foreground rounded-lg hover:bg-primary/90 disabled:opacity-50 disabled:cursor-not-allowed transition-all shadow-md hover:shadow-lg"
|
||||
>
|
||||
{!apiKey ? 'Configure API Key First' : (autoStartEnabled ? 'Standby (Auto-Start)' : 'Start Recording')}
|
||||
</button>
|
||||
<div className="flex items-center justify-center gap-2 mt-2">
|
||||
<label className="flex items-center gap-2 cursor-pointer select-none">
|
||||
<input
|
||||
type="checkbox"
|
||||
checked={autoStartEnabled}
|
||||
onChange={(e) => setAutoStartEnabled(e.target.checked)}
|
||||
className="w-4 h-4 accent-primary rounded cursor-pointer"
|
||||
/>
|
||||
<span className="text-xs text-muted-foreground font-medium">Auto-start when audio detected</span>
|
||||
</label>
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
<div className="flex gap-2 w-full">
|
||||
<button
|
||||
onClick={togglePause}
|
||||
className={`flex-1 py-4 text-lg font-semibold rounded-lg transition-all shadow-md hover:shadow-lg flex items-center justify-center gap-2 ${isPaused
|
||||
? 'bg-blue-600 text-white hover:bg-blue-700'
|
||||
: 'bg-yellow-500 text-white hover:bg-yellow-600'
|
||||
}`}
|
||||
>
|
||||
{isPaused ? 'Resume' : 'Pause'}
|
||||
</button>
|
||||
{/* In Waiting mode, we can only Stop (Cancel) */}
|
||||
{!isWaiting && (
|
||||
<button
|
||||
onClick={togglePause}
|
||||
className={`flex-1 py-4 text-lg font-semibold rounded-lg transition-all shadow-md hover:shadow-lg flex items-center justify-center gap-2 ${isPaused
|
||||
? 'bg-blue-600 text-white hover:bg-blue-700'
|
||||
: 'bg-yellow-500 text-white hover:bg-yellow-600'
|
||||
}`}
|
||||
>
|
||||
{isPaused ? 'Resume' : 'Pause'}
|
||||
</button>
|
||||
)}
|
||||
<button
|
||||
onClick={stopRecording}
|
||||
className="flex-1 py-4 text-lg font-semibold bg-destructive text-destructive-foreground rounded-lg hover:bg-destructive/90 transition-all shadow-md hover:shadow-lg flex items-center justify-center gap-2"
|
||||
>
|
||||
<Square size={20} fill="currentColor" />
|
||||
Stop
|
||||
{isWaiting ? 'Cancel' : 'Stop'}
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
|
||||
Reference in New Issue
Block a user