feat: complete history, attendees list, and smart templates

This commit is contained in:
michael.borak
2026-01-20 15:00:56 +01:00
parent d266de942a
commit 52ccd7ee03
18 changed files with 2222 additions and 480 deletions

View File

@@ -10,7 +10,11 @@
* **🧠 Powered by Infomaniak AI**:
* **Precision Transcription**: Standard-compliant formatting with **second-by-second timestamps** (e.g., `[00:12]`).
* **Smart Summaries**: Uses advanced LLMs (Mixtral, Llama 3) to create actionable meeting notes.
* **📝 Professional Templates**: Comes with 3 built-in expert prompts:
* **<EFBFBD> Smart VAD (Voice Activity Detection)**: Automatically filters out silence and background noise, ensuring your transcripts are clean and focused.
* **📅 Microsoft 365 Integration**:
* **Upcoming Meetings Panel**: View your daily schedule directly in the app.
* **One-Click Join & Record**: Instantly launch Teams/Zoom meetings and start recording with a single click.
* **<EFBFBD>📝 Professional Templates**: Comes with 3 built-in expert prompts:
* **Meeting Protocol**: For general business meetings.
* **1:1 / Jour Fixe**: For confidential personnel discussions.
* **Client Meeting**: For official, client-ready documentation.
@@ -34,25 +38,15 @@
## 🎧 Recording System Audio (Teams, Zoom, etc.)
To record clear meeting audio from other applications, you need a "virtual cable". We recommend **BlackHole 2ch**.
We've made this easy! Hearbit AI includes a built-in helper to set up your audio devices.
1. **Install BlackHole**: Download and install [BlackHole 2ch](https://existential.audio/blackhole/).
2. **Create a Multi-Output Device** (So you can hear the audio too!):
* Open **Audio MIDI Setup** on your Mac.
* Create a "Multi-Output Device".
* Select both **BlackHole 2ch** AND your **Headphones/Speakers**.
* *Tip: Use this Multi-Output Device as your SPEAKER in Teams/Zoom.*
![Multi-Output Device Setup](docs/screenshots/multi_output_setup.png)
3. **Create Aggregate Device (Optional)**:
* If you want to record BOTH your Microphone and System Audio, create an **Aggregate Device**.
* Select **BlackHole 2ch** AND your **Microphone**.
![Aggregate Device Setup](docs/screenshots/aggregate_device_setup.png)
4. **Select Input in Hearbit AI**:
* In Hearbit AI, select **BlackHole 2ch** (or your new Aggregate Device) as the **Input Device**.
1. **Open Audio MIDI Setup**: Click the "Open Audio MIDI Setup" button in the recorder view.
2. **Create "Hearbit Audio" Device**:
* If you don't have a virtual device, click **"🪄 Create Hearbit Audio Device"** in the app (appears in Meeting mode if no device is found).
* This will automatically configure a Multi-Output Device so you can record and hear at the same time.
3. **Select "Hearbit Audio" in Teams/Zoom**:
* In your meeting app settings (Teams/Zoom), set your **Speaker** to **Hearbit Audio**.
* In Hearbit AI, select **Hearbit Audio** (or BlackHole) as your input.
---
@@ -61,14 +55,20 @@ To record clear meeting audio from other applications, you need a "virtual cable
1. **Configuration**:
* Click the **Settings** (gear icon).
* Enter your **Infomaniak API Key** and **Product ID**.
* (Optional) Customize where recordings are saved.
2. **Recording**:
2. **Connect M365 (Optional)**:
* Copy the **Application (client) ID**.
* Click the **Meetings** tab.
* Enter your **Client ID** and click "Connect".
* Proceed with MS login.
* View your upcoming meetings.
3. **Recording**:
* Choose your **Template** (e.g., "Meeting Protocol").
* Select your **Input Device**.
* Click **Start Recording**.
3. **Processing**:
4. **Processing**:
* Click **Stop** when finished.
* The app will transcribe the audio (with timestamps!) and generate a summary based on your selected template.
* You will be automatically taken to the **Transcription** tab to review the results.

10
package-lock.json generated
View File

@@ -11,6 +11,7 @@
"@tailwindcss/postcss": "^4.1.18",
"@tauri-apps/api": "^2",
"@tauri-apps/plugin-dialog": "^2.6.0",
"@tauri-apps/plugin-fs": "^2.4.5",
"@tauri-apps/plugin-opener": "^2",
"jimp": "^1.6.0",
"lucide-react": "^0.562.0",
@@ -2086,6 +2087,15 @@
"@tauri-apps/api": "^2.8.0"
}
},
"node_modules/@tauri-apps/plugin-fs": {
"version": "2.4.5",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-fs/-/plugin-fs-2.4.5.tgz",
"integrity": "sha512-dVxWWGE6VrOxC7/jlhyE+ON/Cc2REJlM35R3PJX3UvFw2XwYhLGQVAIyrehenDdKjotipjYEVc4YjOl3qq90fA==",
"license": "MIT OR Apache-2.0",
"dependencies": {
"@tauri-apps/api": "^2.8.0"
}
},
"node_modules/@tauri-apps/plugin-opener": {
"version": "2.5.3",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-opener/-/plugin-opener-2.5.3.tgz",

View File

@@ -13,6 +13,7 @@
"@tailwindcss/postcss": "^4.1.18",
"@tauri-apps/api": "^2",
"@tauri-apps/plugin-dialog": "^2.6.0",
"@tauri-apps/plugin-fs": "^2.4.5",
"@tauri-apps/plugin-opener": "^2",
"jimp": "^1.6.0",
"lucide-react": "^0.562.0",

1107
src-tauri/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -26,5 +26,11 @@ serde_json = "1.0"
chrono = "0.4"
cpal = "0.17.1"
hound = "3.5.1"
reqwest = { version = "0.13.1", features = ["json", "multipart"] }
reqwest = { version = "0.11", features = ["json", "multipart"] }
tokio = { version = "1.40.0", features = ["full"] }
tauri-plugin-fs = "2.4.5"
voice_activity_detector = "0.2.1"
rubato = "0.14.1"
tauri-plugin-oauth = "2.0.0"
oauth2 = "4.4"
url = "2.5"

View File

@@ -8,6 +8,7 @@
"permissions": [
"core:default",
"opener:default",
"dialog:default"
"dialog:default",
"fs:default"
]
}

View File

@@ -0,0 +1,182 @@
#!/usr/bin/env swift
import Foundation
import CoreAudio
// Extensions and Helpers
extension Int32 {
var fourCC: String {
let utf16 = [
UInt16((self >> 24) & 0xFF),
UInt16((self >> 16) & 0xFF),
UInt16((self >> 8) & 0xFF),
UInt16(self & 0xFF)
]
return String(utf16CodeUnits: utf16, count: 4)
}
}
// Safer Property Getter
func getPropertyData<T>(objectID: AudioObjectID, selector: AudioObjectPropertySelector, initialValue: T) -> T? {
var address = AudioObjectPropertyAddress(
mSelector: selector,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMain
)
var size = UInt32(MemoryLayout<T>.size)
var value = initialValue
let status = AudioObjectGetPropertyData(objectID, &address, 0, nil, &size, &value)
if status == noErr {
return value
}
return nil
}
// CFString Helper
func getStringProperty(objectID: AudioObjectID, selector: AudioObjectPropertySelector) -> String? {
var address = AudioObjectPropertyAddress(
mSelector: selector,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMain
)
// CFStringRef is just a pointer, so size of Optional<Unmanaged<CFString>> is pointer size
var size = UInt32(MemoryLayout<Unmanaged<CFString>?>.size)
var value: Unmanaged<CFString>?
let status = AudioObjectGetPropertyData(objectID, &address, 0, nil, &size, &value)
if status == noErr, let existingValue = value {
return existingValue.takeRetainedValue() as String
}
return nil
}
func findDeviceByName(_ name: String) -> AudioObjectID? {
// System Object is 1
let systemID = AudioObjectID(kAudioObjectSystemObject)
// Get all devices
var address = AudioObjectPropertyAddress(
mSelector: kAudioHardwarePropertyDevices,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMain
)
var size: UInt32 = 0
guard AudioObjectGetPropertyDataSize(systemID, &address, 0, nil, &size) == noErr else { return nil }
let count = Int(size) / MemoryLayout<AudioObjectID>.size
var deviceIDs = [AudioObjectID](repeating: 0, count: count)
guard AudioObjectGetPropertyData(systemID, &address, 0, nil, &size, &deviceIDs) == noErr else { return nil }
for id in deviceIDs {
if let devName = getStringProperty(objectID: id, selector: kAudioDevicePropertyDeviceNameCFString) {
if devName == name {
return id
}
}
}
return nil
}
func findDeviceByUID(_ uid: String) -> AudioObjectID? {
let systemID = AudioObjectID(kAudioObjectSystemObject)
var address = AudioObjectPropertyAddress(
mSelector: kAudioHardwarePropertyDevices,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMain
)
var size: UInt32 = 0
guard AudioObjectGetPropertyDataSize(systemID, &address, 0, nil, &size) == noErr else { return nil }
let count = Int(size) / MemoryLayout<AudioObjectID>.size
var deviceIDs = [AudioObjectID](repeating: 0, count: count)
guard AudioObjectGetPropertyData(systemID, &address, 0, nil, &size, &deviceIDs) == noErr else { return nil }
for id in deviceIDs {
if let devUID = getStringProperty(objectID: id, selector: kAudioDevicePropertyDeviceUID) {
if devUID == uid {
return id
}
}
}
return nil
}
func createAggregateDevice() {
print("Searching for devices...")
guard let blackHoleID = findDeviceByName("BlackHole 2ch") else {
print("Error: BlackHole 2ch not found. Please install it first.")
exit(1)
}
print("Found BlackHole 2ch (ID: \(blackHoleID))")
// Default Input
var defaultInputID: AudioObjectID = 0
var size = UInt32(MemoryLayout<AudioObjectID>.size)
var address = AudioObjectPropertyAddress(
mSelector: kAudioHardwarePropertyDefaultInputDevice,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMain
)
if AudioObjectGetPropertyData(AudioObjectID(kAudioObjectSystemObject), &address, 0, nil, &size, &defaultInputID) != noErr {
print("Error: Could not find default input.")
exit(1)
}
print("Found Default Input (ID: \(defaultInputID))")
// Check for existing "Hearbit Audio" by UID
let targetUID = "hearbit_audio_aggregate_v1"
if let existingID = findDeviceByUID(targetUID) {
print("Found existing Hearbit Audio device (ID: \(existingID)). Destroying to recreate...")
if AudioHardwareDestroyAggregateDevice(existingID) != noErr {
print("Warning: Failed to destroy existing device.")
} else {
print("Existing device destroyed.")
}
Thread.sleep(forTimeInterval: 0.5)
}
// Build SubDevice List
guard let bhUID = getStringProperty(objectID: blackHoleID, selector: kAudioDevicePropertyDeviceUID) else {
print("Error: Could not get BlackHole UID.")
exit(1)
}
guard let micUID = getStringProperty(objectID: defaultInputID, selector: kAudioDevicePropertyDeviceUID) else {
print("Error: Could not get Default Input UID.")
exit(1)
}
// Dedup: if Mic IS BlackHole (user set BlackHole as default), don't duplicate
var subDevicesUIDs = [bhUID]
if micUID != bhUID {
subDevicesUIDs.append(micUID)
}
let subDevicesArray = subDevicesUIDs.map {
[kAudioSubDeviceUIDKey: $0]
}
let desc: [String: Any] = [
kAudioAggregateDeviceNameKey: "Hearbit Audio",
kAudioAggregateDeviceUIDKey: targetUID,
kAudioAggregateDeviceIsPrivateKey: Int(0),
kAudioAggregateDeviceIsStackedKey: Int(0),
kAudioAggregateDeviceSubDeviceListKey: subDevicesArray
]
print("Creating Aggregate Device with UIDs: \(subDevicesUIDs)")
var outID: AudioObjectID = 0
let err = AudioHardwareCreateAggregateDevice(desc as CFDictionary, &outID)
if err == noErr {
print("Success! Created 'Hearbit Audio' with ID: \(outID)")
exit(0)
} else {
print("Failed to create device. Error code: \(err) (\(err.fourCC))")
exit(1)
}
}
createAggregateDevice()

View File

@@ -0,0 +1,183 @@
use std::sync::{Arc, Mutex};
use tauri::{AppHandle, Emitter};
use cpal::Sample;
use hound::WavWriter;
use rubato::{Resampler, FastFixedIn, PolynomialDegree};
use voice_activity_detector::VoiceActivityDetector;
pub struct AudioProcessor {
// VAD
vad: VoiceActivityDetector,
vad_chunk_size: usize,
vad_buffer: Vec<f32>,
// Resampler
resampler: FastFixedIn<f32>,
resample_input_buffer: Vec<f32>,
resample_output_buffer: Vec<f32>,
// State
is_speech_active: bool,
last_speech_time: u64, // In samples or frames
hangover_samples: u64,
// Ring Buffer (for pre-roll)
ring_buffer: Vec<f32>,
ring_pos: usize,
ring_size: usize,
// Output
writer: Arc<Mutex<WavWriter<std::io::BufWriter<std::fs::File>>>>,
sample_rate: u32,
total_processed_samples: u64,
// Event Emission
app_handle: Option<AppHandle>,
last_event_time: std::time::Instant,
}
impl AudioProcessor {
pub fn new(
sample_rate: u32,
writer: Arc<Mutex<WavWriter<std::io::BufWriter<std::fs::File>>>>,
app_handle: AppHandle
) -> Result<Self, String> {
let vad_sample_rate = 16000;
let vad_chunk_size = 512; // Silero usually likes ~30ms which is 512 at 16k? No 16000 * 0.032 = 512.
// Initialize VAD
let vad = VoiceActivityDetector::builder()
.sample_rate(vad_sample_rate as u32)
.chunk_size(vad_chunk_size)
.build()
.map_err(|e| format!("Failed to init VAD: {:?}", e))?;
// Initialize Resampler (Input Rate -> 16000) using FastFixedIn for speed/simplicity
// new(f_ratio, max_resample_ratio_relative, polyn_deg, chunk_size, channels)
let resampler = FastFixedIn::<f32>::new(
16000.0 / sample_rate as f64,
1.0,
PolynomialDegree::Septic,
1024,
1
).map_err(|e| format!("Failed to init Resampler: {:?}", e))?;
// Pre-roll buffer (e.g. 0.5 seconds of high quality audio)
let ring_curr_seconds = 1.0;
let ring_size = (sample_rate as f32 * ring_curr_seconds) as usize;
Ok(Self {
vad,
vad_chunk_size,
vad_buffer: Vec::new(),
resampler,
resample_input_buffer: Vec::new(),
resample_output_buffer: Vec::new(),
is_speech_active: false,
last_speech_time: 0,
hangover_samples: (sample_rate as f32 * 1.5) as u64, // 1.5s hangover
ring_buffer: vec![0.0; ring_size],
ring_pos: 0,
ring_size,
writer,
sample_rate,
total_processed_samples: 0,
app_handle: Some(app_handle),
last_event_time: std::time::Instant::now(),
})
}
pub fn process(&mut self, data: &[f32]) {
// 1. Add to Ring Buffer (always, for pre-roll)
for &sample in data {
self.ring_buffer[self.ring_pos] = sample;
self.ring_pos = (self.ring_pos + 1) % self.ring_size;
}
// 2. Resample for VAD
// We append new data to input buffer for resampler
self.resample_input_buffer.extend_from_slice(data);
// Process in chunks compatible with resampler
// Actually rubato process_into_buffer needs waves of input.
// Simplified: SincFixedIn wants a fixed number of input frames?
// Docs: "retrieve result... input buffer must contain needed number of frames"
// SincFixedIn: "input buffer used for resampling... must receive a fixed number of frames"
// Wait, SincFixedIn is fixed INPUT size. SincFixedOut is fixed OUTPUT size.
// We want to feed whatever we get.
// For simplicity, let's use a simpler resampling strategy or accept rubato's constraints.
// Rubato SincFixedIn: we must provide `input_frames_next` frames.
// Let's defer strict resampling and just use decimation if sample rate is multiple?
// No, user devices vary.
// Handling Resampling properly:
let needed = self.resampler.input_frames_next();
while self.resample_input_buffer.len() >= needed {
let chunk: Vec<f32> = self.resample_input_buffer.drain(0..needed).collect();
// Resample (mono)
let waves_in = vec![chunk];
// Allocate output (approx)
let mut waves_out = vec![vec![0.0; (needed as f64 * (16000.0 / self.sample_rate as f64)).ceil() as usize + 10]; 1]; // +10 padding
if let Ok((_in_len, out_len)) = self.resampler.process_into_buffer(&waves_in, &mut waves_out, None) {
if out_len > 0 {
self.vad_buffer.extend_from_slice(&waves_out[0][0..out_len]);
}
}
// Update output buffer usage... logic is tricky with drain.
}
// 3. Process VAD
while self.vad_buffer.len() >= self.vad_chunk_size {
let vad_chunk: Vec<f32> = self.vad_buffer.drain(0..self.vad_chunk_size).collect();
// Run Detection
let probability = self.vad.predict(vad_chunk);
let is_speech = probability > 0.5;
if is_speech {
self.is_speech_active = true;
self.last_speech_time = self.total_processed_samples;
}
// Emit VAD event periodically (every 500ms)
if self.last_event_time.elapsed().as_millis() > 500 {
if let Some(app) = &self.app_handle {
// Calculate crude RMS for visualization or just send probability
// Just sending probability is enough for now
#[derive(serde::Serialize, Clone)]
struct VadEvent {
probability: f32,
is_speech: bool,
}
let _ = app.emit("vad-event", VadEvent { probability, is_speech });
}
self.last_event_time = std::time::Instant::now();
}
}
// 4. Update Hangover and Check Write condition
let time_since_speech = self.total_processed_samples.saturating_sub(self.last_speech_time);
if self.is_speech_active || time_since_speech < self.hangover_samples {
// We are recording!
// Check if we just started (transition)
// Ideally we dump the ring buffer here if we just switched state.
// Implementing perfect ring buffer dump is complex (need to track state changes better).
// MVP: Just Write Current Data if in state.
// Improvement: If we are in hangover, we just write.
// If we just detected speech (was not speech?), dump ring buffer?
// We'd need to know if we 'wrote' the ring buffer already.
// Simple Logic: just write all incoming data if (Now - LastSpeech < Hangover)
let mut guard = self.writer.lock().unwrap();
for &sample in data {
let amplitude = i16::MAX as f32;
guard.write_sample((sample * amplitude) as i16).ok();
}
}
self.total_processed_samples += data.len() as u64;
}
}

112
src-tauri/src/auth.rs Normal file
View File

@@ -0,0 +1,112 @@
use tauri::{AppHandle, Runtime};
use tauri_plugin_opener::OpenerExt;
use tauri_plugin_oauth::start;
use url::Url;
use oauth2::{
basic::BasicClient, AuthUrl, ClientId, CsrfToken, PkceCodeChallenge, RedirectUrl, Scope,
TokenResponse, TokenUrl,
};
use oauth2::reqwest::async_http_client;
// const CLIENT_ID: &str = "YOUR_CLIENT_ID_HERE";
const AUTH_URL: &str = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize";
const TOKEN_URL: &str = "https://login.microsoftonline.com/common/oauth2/v2.0/token";
#[tauri::command]
pub async fn start_auth_flow<R: Runtime>(app: AppHandle<R>, client_id: String) -> Result<String, String> {
// 1. Start Localhost Server
let (tx, rx) = std::sync::mpsc::channel();
// tauri-plugin-oauth start() returns a port and stops server when callback received
let port = start(move |url| {
// Ignore favicon requests to avoid triggering early
if url.contains("favicon.ico") {
return;
}
let _ = tx.send(url);
})
.map_err(|e| format!("Failed to start oauth server: {}", e))?;
let redirect_uri = format!("http://localhost:{}/auth/callback", port);
// 2. Setup OAuth Client
let client = BasicClient::new(
ClientId::new(client_id),
None, // No client secret for PKCE public client
AuthUrl::new(AUTH_URL.to_string()).map_err(|e| e.to_string())?,
Some(TokenUrl::new(TOKEN_URL.to_string()).map_err(|e| e.to_string())?),
)
.set_redirect_uri(RedirectUrl::new(redirect_uri.clone()).map_err(|e| e.to_string())?);
// 3. Generate PKCE Challenge
let (pkce_challenge, pkce_verifier) = PkceCodeChallenge::new_random_sha256();
// 4. Generate Auth URL
let (auth_url, _csrf_token) = client
.authorize_url(CsrfToken::new_random)
.add_scope(Scope::new("User.Read".to_string()))
.add_scope(Scope::new("Calendars.Read".to_string()))
.set_pkce_challenge(pkce_challenge)
.url();
// 5. Open Browser
app.opener().open_url(auth_url.as_str(), None::<&str>)
.map_err(|e| format!("Failed to open browser: {}", e))?;
// 6. Wait for Callback
let received_url_str = rx.recv().map_err(|e| format!("Failed to receive auth code: {}", e))?;
// 7. Parse Code from URL
// Actually we need to parse the query params from received_url_str
let parsed_url = Url::parse(&received_url_str).map_err(|e| e.to_string())?;
let pairs: std::collections::HashMap<_, _> = parsed_url.query_pairs().into_owned().collect();
if let Some(err) = pairs.get("error") {
let desc = pairs.get("error_description").map(|s| s.as_str()).unwrap_or("No description");
return Err(format!("OAuth Error: {} ({})", err, desc));
}
let code = pairs.get("code").ok_or_else(|| format!("No code in redirect callback. Received URL: {}", received_url_str))?;
// 8. Exchange Code for Token
let token_result = client
.exchange_code(oauth2::AuthorizationCode::new(code.clone()))
.set_pkce_verifier(pkce_verifier)
.request_async(async_http_client)
.await
.map_err(|e| format!("Failed to exchange token: {}", e))?;
let access_token = token_result.access_token().secret();
// Save token? Or just return it.
// Ideally we save it in key storage, but for MVP return it.
Ok(access_token.clone())
}
#[tauri::command]
pub async fn get_calendar_events(token: String) -> Result<Vec<serde_json::Value>, String> {
let client = reqwest::Client::new();
let res = client
.get("https://graph.microsoft.com/v1.0/me/calendarView")
.bearer_auth(token)
.query(&[
("startDateTime", chrono::Utc::now().to_rfc3339()),
("endDateTime", (chrono::Utc::now() + chrono::Duration::days(7)).to_rfc3339()),
("$select", "id,subject,start,end,location,onlineMeeting,bodyPreview,body,attendees".to_string())
])
.header("Prefer", "outlook.timezone=\"UTC\"")
.send()
.await
.map_err(|e| e.to_string())?
.json::<serde_json::Value>()
.await
.map_err(|e| e.to_string())?;
// Extract 'value' array
if let Some(events) = res.get("value").and_then(|v| v.as_array()) {
Ok(events.clone())
} else {
Ok(vec![])
}
}

View File

@@ -5,6 +5,10 @@ use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};
use std::time::Duration;
use tokio::time::sleep;
mod audio_processor;
use audio_processor::AudioProcessor;
mod auth;
// State to hold the active recording stream
struct AppState {
recording_stream: Mutex<Option<cpal::Stream>>,
@@ -60,7 +64,7 @@ fn get_input_devices() -> Result<Vec<AudioDevice>, String> {
#[tauri::command]
fn start_recording(app: AppHandle, state: State<'_, AppState>, device_id: String, save_path: Option<String>) -> Result<(), String> {
fn start_recording(app: AppHandle, state: State<'_, AppState>, device_id: String, save_path: Option<String>, custom_filename: Option<String>) -> Result<(), String> {
emit_log(&app, "INFO", &format!("Starting recording on device: {}", device_id));
let host = cpal::default_host();
@@ -73,6 +77,15 @@ fn start_recording(app: AppHandle, state: State<'_, AppState>, device_id: String
.ok_or("No input device found")?;
let config = device.default_input_config().map_err(|e| e.to_string())?;
// VAD requires 16Hz or 8kHz, typically. Silero likes 16k.
// We might need to resample or just check if the device supports it.
// For MVP VAD, we will try to stick to standard rates.
// Actually, simple energy VAD is easier to start with if Silero is too heavy or requires ONNX runtime.
// Let's check the crate docs or usage first.
// Wait, the user wants to IGNORE music. Energy VAD will fail on music.
// voice_activity_detector crate usually uses Silero or similar.
let spec = hound::WavSpec {
channels: config.channels(),
sample_rate: config.sample_rate(),
@@ -81,16 +94,22 @@ fn start_recording(app: AppHandle, state: State<'_, AppState>, device_id: String
};
// Determine file path: User provided or Temp
let filename = if let Some(name) = custom_filename {
// Sanitize filename
let safe_name: String = name.chars().map(|x| if x.is_alphanumeric() || x == ' ' || x == '-' || x == '_' { x } else { '_' }).collect();
format!("{}.wav", safe_name)
} else {
format!("recording_{}.wav", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs())
};
let file_path = if let Some(path) = save_path {
if path.trim().is_empty() {
std::env::temp_dir().join(format!("recording_{}.wav", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs()))
std::env::temp_dir().join(&filename)
} else {
// Check if directory exists, if not try to create it or error out?
// For now, assume user gives a valid directory. We'll append filename.
std::path::PathBuf::from(path).join(format!("recording_{}.wav", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs()))
std::path::PathBuf::from(path).join(&filename)
}
} else {
std::env::temp_dir().join(format!("recording_{}.wav", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs()))
std::env::temp_dir().join(&filename)
};
let file_path_str = file_path.to_string_lossy().to_string();
@@ -99,6 +118,19 @@ fn start_recording(app: AppHandle, state: State<'_, AppState>, device_id: String
let writer = hound::WavWriter::create(&file_path, spec).map_err(|e| e.to_string())?;
let writer = Arc::new(Mutex::new(writer));
let writer_clone = writer.clone();
// Initialize AudioProcessor (VAD)
// We pass the writer to it.
let processor = AudioProcessor::new(config.sample_rate(), writer.clone(), app.clone())
.map_err(|e| format!("Failed to create AudioProcessor: {}", e))?;
// Wrap processor in Arc<Mutex> so we can share/move it into callback
// Actually, cpal callback takes ownership of its closure state usually if 'move'.
// Since stream is on another thread, we need Send. AudioProcessor should be Send.
// However, the callback is called repeatedly. We need to keep state.
// The workaround is to wrap it in a Mutex.
let processor = Arc::new(Mutex::new(processor));
let processor_clone = processor.clone();
let app_handle = app.clone();
let err_fn = move |err| {
@@ -110,21 +142,21 @@ fn start_recording(app: AppHandle, state: State<'_, AppState>, device_id: String
cpal::SampleFormat::F32 => device.build_input_stream(
&config.into(),
move |data: &[f32], _: &_| {
let mut guard = writer_clone.lock().unwrap();
for &sample in data {
let amplitude = i16::MAX as f32;
guard.write_sample((sample * amplitude) as i16).ok();
if let Ok(mut p) = processor_clone.lock() {
p.process(data);
}
},
err_fn,
None
),
// For I16 and U16 we need to convert to F32 for our processor
cpal::SampleFormat::I16 => device.build_input_stream(
&config.into(),
move |data: &[i16], _: &_| {
let mut guard = writer_clone.lock().unwrap();
for &sample in data {
guard.write_sample(sample).ok();
// Convert i16 to f32
let f32_data: Vec<f32> = data.iter().map(|&s| s as f32 / i16::MAX as f32).collect();
if let Ok(mut p) = processor_clone.lock() {
p.process(&f32_data);
}
},
err_fn,
@@ -133,9 +165,10 @@ fn start_recording(app: AppHandle, state: State<'_, AppState>, device_id: String
cpal::SampleFormat::U16 => device.build_input_stream(
&config.into(),
move |data: &[u16], _: &_| {
let mut guard = writer_clone.lock().unwrap();
for &sample in data {
guard.write_sample((sample as i32 - 32768) as i16).ok();
// Convert u16 to f32
let f32_data: Vec<f32> = data.iter().map(|&s| (s as i32 - 32768) as f32 / 32768.0).collect();
if let Ok(mut p) = processor_clone.lock() {
p.process(&f32_data);
}
},
err_fn,
@@ -536,6 +569,60 @@ fn open_audio_midi_setup() -> Result<(), String> {
Ok(())
}
#[tauri::command]
fn create_hearbit_audio_device(app: AppHandle) -> Result<String, String> {
emit_log(&app, "INFO", "Attempting to create Hearbit Audio device...");
// Resolve resource path
let resource_path = app.path().resource_dir()
.map_err(|e| e.to_string())?
.join("resources/create_hearbit_audio.swift");
if !resource_path.exists() {
// Fallback for dev environment where resources might not be bundled yet or different path
emit_log(&app, "WARN", &format!("Resource script not found at {:?}. Trying local src-tauri path.", resource_path));
}
// For now, in dev mode, we might need to point to the source location if bundle isn't active
// But let's try running it.
let output = Command::new("swift")
.arg(resource_path)
.output()
.map_err(|e| e.to_string())?;
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
emit_log(&app, "DEBUG", &format!("Script Output: {}", stdout));
if !stderr.is_empty() {
emit_log(&app, "WARN", &format!("Script Stderr: {}", stderr));
}
if output.status.success() {
emit_log(&app, "SUCCESS", "Hearbit Audio device created successfully.");
Ok("Device created successfully".to_string())
} else {
emit_log(&app, "ERROR", "Failed to create device.");
Err(format!("Failed to create device: {} {}", stdout, stderr))
}
}
#[tauri::command]
async fn save_text_file(app: AppHandle, path: String, content: String) -> Result<(), String> {
emit_log(&app, "INFO", &format!("Saving text file to: {}", path));
match std::fs::write(&path, content) {
Ok(_) => {
emit_log(&app, "SUCCESS", "File saved successfully.");
Ok(())
},
Err(e) => {
emit_log(&app, "ERROR", &format!("Failed to save file: {}", e));
Err(e.to_string())
}
}
}
#[cfg_attr(mobile, tauri::mobile_entry_point)]
@@ -543,6 +630,8 @@ pub fn run() {
tauri::Builder::default()
.plugin(tauri_plugin_opener::init())
.plugin(tauri_plugin_dialog::init())
.plugin(tauri_plugin_fs::init())
.plugin(tauri_plugin_oauth::init())
.manage(AppState {
recording_stream: Mutex::new(None),
recording_file_path: Mutex::new(None),
@@ -557,7 +646,11 @@ pub fn run() {
transcribe_audio,
summarize_text,
get_available_models,
open_audio_midi_setup
open_audio_midi_setup,
create_hearbit_audio_device,
auth::start_auth_flow,
auth::get_calendar_events,
save_text_file
])
.run(tauri::generate_context!())
.expect("error while running tauri application");

View File

@@ -32,7 +32,7 @@
"icons/icon.ico"
],
"resources": [
"resources/BlackHole2ch.v0.6.1.pkg"
"resources/*"
]
}
}

View File

@@ -6,18 +6,37 @@ import Recorder from "./components/Recorder";
import LogViewer, { LogEntry } from "./components/LogViewer";
import TranscriptionView from "./components/TranscriptionView";
import Tabs from "./components/Tabs";
import MeetingsView from "./components/MeetingsView";
import HistoryView from "./components/HistoryView";
import ToastContainer, { ToastMessage, ToastType } from "./components/ui/Toast";
export interface PromptTemplate {
id: string;
name: string;
content: string;
keywords?: string[];
}
function App() {
const [view, setView] = useState<'recorder' | 'logs' | 'settings' | 'transcription'>('recorder');
// Keep track of the *previous* tab to return to from settings
const [lastTab, setLastTab] = useState<'recorder' | 'logs' | 'transcription'>('recorder');
const [view, setView] = useState<'recorder' | 'logs' | 'settings' | 'transcription' | 'meetings' | 'history'>('recorder');
const [lastTab, setLastTab] = useState<'recorder' | 'logs' | 'transcription' | 'meetings' | 'history'>('recorder');
// Auto-start recording state to handle "Join & Record" transition
const [autoStartRecording, setAutoStartRecording] = useState(false);
const [recordingSubject, setRecordingSubject] = useState<string>('');
// Toast State
const [toasts, setToasts] = useState<ToastMessage[]>([]);
const addToast = (message: string, type: ToastType = 'info', duration = 3000) => {
const id = Date.now().toString() + Math.random().toString();
setToasts(prev => [...prev, { id, message, type, duration }]);
};
const removeToast = (id: string) => {
setToasts(prev => prev.filter(t => t.id !== id));
};
const [apiKey, setApiKey] = useState(localStorage.getItem('infomaniak_api_key') || '');
const [productId, setProductId] = useState(localStorage.getItem('infomaniak_product_id') || '');
const [savePath, setSavePath] = useState(localStorage.getItem('infomaniak_save_path') || '');
@@ -61,7 +80,8 @@ Kurze Stichpunkte zu Themen, die besprochen, aber noch nicht final geklärt wurd
| [Aufgabe 2] | [Name] | [Datum] |
## 5. Nächste Schritte / Nächstes Meeting
Kurze Info zum weiteren Vorgehen.`
Kurze Info zum weiteren Vorgehen.`,
keywords: ['protokoll', 'meeting', 'team', 'daily', 'weekly']
},
{
id: '2',
@@ -96,7 +116,8 @@ Thema B: [Kurze Zusammenfassung]
| Wer? | Was ist zu tun / zu beachten? | Bis wann? |
| :--- | :--- | :--- |
| [Name] | [Aufgabe] | [Datum] |
| [Name] | [Aufgabe] | [Datum] |`
| [Name] | [Aufgabe] | [Datum] |`,
keywords: ['personal', 'privat', 'vertraulich', 'entwicklungsgespräch', 'feedback', 'unter vier augen']
},
{
id: '3',
@@ -138,7 +159,8 @@ Teilnehmer: [Namen Kunden] & [Namen Intern]
[ ] [Aufgabe, z.B. Zugangdaten senden, Design freigeben] (bis [Datum])
## 5. Nächster Termin / Timeline
Wann findet das nächste Meeting statt oder was ist der nächste Meilenstein?`
Wann findet das nächste Meeting statt oder was ist der nächste Meilenstein?`,
keywords: ['beratung', 'kunde', 'client', 'angebot', 'projekt', 'extern']
}
];
@@ -168,6 +190,8 @@ Wann findet das nächste Meeting statt oder was ist der nächste Meilenstein?`
date: string;
transcription: string;
summary: string;
subject?: string;
filename?: string;
}
const [history, setHistory] = useState<HistoryItem[]>(() => {
@@ -179,16 +203,39 @@ Wann findet das nächste Meeting statt oder was ist der nächste Meilenstein?`
const transToSave = t !== undefined ? t : transcription;
const sumToSave = s !== undefined ? s : summary;
// Sanitize subject for filename
const safeSubject = recordingSubject
? recordingSubject.replace(/[^a-zA-Z0-9_-]/g, '_')
: `Meeting_${Date.now()}`;
const filename = `${safeSubject}.md`;
if (!transToSave && !sumToSave) return;
const newItem: HistoryItem = {
id: Date.now().toString(),
date: new Date().toLocaleString(),
transcription: transToSave,
summary: sumToSave
summary: sumToSave,
subject: recordingSubject || "Untitled Recording",
filename: filename
};
const newHistory = [newItem, ...history];
setHistory(newHistory);
localStorage.setItem('infomaniak_history', JSON.stringify(newHistory));
// Persist to Disk (Markdown)
const content = `# ${newItem.subject}\nDate: ${newItem.date}\n\n## Summary\n${sumToSave}\n\n## Transcription\n${transToSave}`;
// If savePath is set, we use it. If not, backend defaults to temp. Here we want to save text.
// Let's assume savePath is set or we default to Documents/Hearbit (if we could).
// For now, if savePath is set, use it.
if (savePath) {
// We need invoke to save text
import("@tauri-apps/api/core").then(({ invoke }) => {
invoke('save_text_file', { path: `${savePath}/${filename}`, content })
.then(() => addToast('Transcript saved to file', 'success'))
.catch(e => addToast(`Failed to save file: ${e}`, 'error'));
});
}
};
const handleDeleteHistory = (id: string) => {
@@ -200,7 +247,7 @@ Wann findet das nächste Meeting statt oder was ist der nächste Meilenstein?`
const handleLoadHistory = (item: HistoryItem) => {
setTranscription(item.transcription);
setSummary(item.summary);
setView('recorder'); // Ensure we go back to recorder to see it
setView('transcription'); // Switch to Transcription view to see content
};
// Logs State
@@ -224,7 +271,7 @@ Wann findet das nächste Meeting statt oder was ist der nächste Meilenstein?`
<div className="absolute right-4 top-4">
<button
onClick={() => {
setLastTab(view === 'logs' ? 'logs' : 'recorder');
setLastTab(view === 'logs' || view === 'history' ? view : 'recorder');
setView('settings');
}}
className="p-2 text-muted-foreground hover:text-foreground hover:bg-secondary rounded-full transition-colors"
@@ -234,53 +281,79 @@ Wann findet das nächste Meeting statt oder was ist der nächste Meilenstein?`
</div>
<Tabs
currentTab={view as 'recorder' | 'logs' | 'transcription'}
currentTab={view as 'recorder' | 'logs' | 'transcription' | 'meetings' | 'history'}
onTabChange={(t) => setView(t)}
/>
</div>
)}
<div className="flex-1 flex flex-col h-full overflow-hidden relative">
{view === 'recorder' && (
<Recorder
apiKey={apiKey}
productId={productId}
prompts={prompts}
onOpenSettings={() => {
setLastTab('recorder');
setView('settings');
}}
transcription={transcription}
setTranscription={setTranscription}
summary={summary}
setSummary={setSummary}
history={history}
onSaveToHistory={handleSaveToHistory}
onDeleteHistory={handleDeleteHistory}
onLoadHistory={handleLoadHistory}
savePath={savePath}
onRecordingComplete={() => setView('transcription')}
/>
)}
<div className="flex-1 flex h-full overflow-hidden relative">
<div className="flex-1 flex flex-col h-full overflow-hidden relative">
{view === 'recorder' && (
<Recorder
apiKey={apiKey}
productId={productId}
prompts={prompts}
onOpenSettings={() => {
setLastTab('recorder');
setView('settings');
}}
transcription={transcription}
setTranscription={setTranscription}
summary={summary}
setSummary={setSummary}
history={history}
onSaveToHistory={handleSaveToHistory}
onDeleteHistory={handleDeleteHistory}
onLoadHistory={handleLoadHistory}
savePath={savePath}
{view === 'transcription' && (
<TranscriptionView transcription={transcription} summary={summary} />
)}
onRecordingComplete={() => setView('transcription')}
autoStart={autoStartRecording}
recordingSubject={recordingSubject}
onAutoStartHandled={() => setAutoStartRecording(false)}
addToast={addToast}
/>
)}
{view === 'logs' && (
<LogViewer logs={logs} />
)}
{view === 'transcription' && (
<TranscriptionView transcription={transcription} summary={summary} />
)}
{view === 'settings' && (
<Settings
onSave={handleSaveSettings}
onClose={() => setView(lastTab)}
apiKey={apiKey}
productId={productId}
prompts={prompts}
savePath={savePath}
/>
)}
{view === 'history' && (
<HistoryView
history={history}
onLoad={handleLoadHistory}
onDelete={handleDeleteHistory}
/>
)}
{view === 'meetings' && (
<MeetingsView
onStartRecording={(subject) => {
setView('recorder');
setRecordingSubject(subject || '');
setAutoStartRecording(true);
}}
/>
)}
{view === 'logs' && (
<LogViewer logs={logs} />
)}
{view === 'settings' && (
<Settings
onSave={handleSaveSettings}
onClose={() => setView(lastTab)}
apiKey={apiKey}
productId={productId}
prompts={prompts}
savePath={savePath}
/>
)}
</div>
<ToastContainer toasts={toasts} removeToast={removeToast} />
</div>
</div>
);

View File

@@ -0,0 +1,67 @@
import { FileText, Trash2, Calendar } from 'lucide-react';
interface HistoryItem {
id: string;
date: string;
transcription: string; // This might be raw text or path?
summary: string;
subject?: string;
filename?: string;
}
interface HistoryViewProps {
history: HistoryItem[];
onLoad: (item: HistoryItem) => void;
onDelete: (id: string) => void;
}
export default function HistoryView({ history, onLoad, onDelete }: HistoryViewProps) {
return (
<div className="flex flex-col w-full h-full bg-background p-6">
<h1 className="text-2xl font-bold mb-6 flex items-center gap-2">
<FileText className="w-8 h-8" />
Recording History
</h1>
{history.length === 0 ? (
<div className="flex-1 flex flex-col items-center justify-center text-muted-foreground">
<FileText size={48} className="mb-4 opacity-20" />
<p>No history found.</p>
</div>
) : (
<div className="flex-1 overflow-y-auto space-y-3 pr-2">
{history.map(item => (
<div key={item.id} className="bg-card border border-border rounded-xl p-4 hover:shadow-md transition-all group">
<div className="flex justify-between items-start">
<div
className="flex-1 cursor-pointer"
onClick={() => onLoad(item)}
>
<h3 className="text-lg font-semibold group-hover:text-primary transition-colors mb-1">
{item.subject || "Untitled Recording"}
</h3>
<div className="flex items-center gap-2 text-xs text-muted-foreground mb-2">
<Calendar size={12} />
{item.date}
{item.filename && <span className="bg-secondary px-1.5 py-0.5 rounded text-[10px] font-mono">{item.filename}</span>}
</div>
<p className="text-sm text-foreground/70 line-clamp-2">
{item.summary ? item.summary.substring(0, 150) + "..." : "No summary available."}
</p>
</div>
<button
onClick={(e) => { e.stopPropagation(); onDelete(item.id); }}
className="text-muted-foreground hover:text-destructive p-2 rounded-lg hover:bg-destructive/10 transition-colors opacity-0 group-hover:opacity-100"
title="Delete"
>
<Trash2 size={18} />
</button>
</div>
</div>
))}
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,289 @@
import { useState, useEffect } from 'react';
import { invoke } from '@tauri-apps/api/core';
import { Calendar, RefreshCw, LogIn, Video } from 'lucide-react';
import { openUrl } from '@tauri-apps/plugin-opener';
interface CalendarEvent {
id: string;
subject: string;
start: { dateTime: string, timeZone: string };
end: { dateTime: string, timeZone: string };
onlineMeeting?: { joinUrl: string };
location?: { displayName: string };
bodyPreview?: string; // Text preview
body?: { content: string, contentType: string }; // Full HTML/Text
attendees?: { emailAddress: { name: string, address: string }, type: string, status: { response: string } }[];
}
interface MeetingsViewProps {
onStartRecording: (subject?: string) => void;
}
export default function MeetingsView({ onStartRecording }: MeetingsViewProps) {
const [isAuthenticated, setIsAuthenticated] = useState(false);
const [token, setToken] = useState(localStorage.getItem('m365_token') || '');
const [clientId, setClientId] = useState(localStorage.getItem('m365_client_id') || '');
const [events, setEvents] = useState<CalendarEvent[]>([]);
const [loading, setLoading] = useState(false);
const [error, setError] = useState('');
const [expandedIds, setExpandedIds] = useState<Set<string>>(new Set());
const toggleExpand = (id: string) => {
const newSet = new Set(expandedIds);
if (newSet.has(id)) {
newSet.delete(id);
} else {
newSet.add(id);
}
setExpandedIds(newSet);
};
useEffect(() => {
if (token) {
setIsAuthenticated(true);
fetchEvents(token);
}
}, [token]);
const handleLogin = async () => {
if (!clientId) {
setError("Please enter a Client ID");
return;
}
localStorage.setItem('m365_client_id', clientId);
setLoading(true);
setError('');
try {
const accessToken = await invoke<string>('start_auth_flow', { clientId });
setToken(accessToken);
localStorage.setItem('m365_token', accessToken);
setIsAuthenticated(true);
fetchEvents(accessToken);
} catch (err) {
console.error("Auth failed", err);
setError(String(err)); // Use String() to safely convert error object
} finally {
setLoading(false);
}
};
const fetchEvents = async (authToken: string) => {
setLoading(true);
setError('');
try {
const data = await invoke<CalendarEvent[]>('get_calendar_events', { token: authToken });
// Sort by start time
const sorted = data.sort((a, b) => new Date(a.start.dateTime).getTime() - new Date(b.start.dateTime).getTime());
setEvents(sorted);
} catch (err) {
console.error("Fetch failed", err);
setError(`Fetch failed: ${err}`);
// If error is 401, logout
if (String(err).includes('401')) {
logout();
}
} finally {
setLoading(false);
}
};
const logout = () => {
setToken('');
localStorage.removeItem('m365_token');
setIsAuthenticated(false);
setEvents([]);
};
const handleJoin = async (joinUrl?: string, subject?: string) => {
if (!joinUrl) return;
try {
// 1. Open URL
await openUrl(joinUrl);
// 2. Start Recording (wait a sec for app focus switch?)
// Actually user might want to confirm recording? Protocol says "one-click".
onStartRecording(subject);
} catch (e) {
console.error("Failed to join", e);
}
};
const formatTime = (isoString: string) => {
return new Date(isoString).toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
};
const formatDate = (isoString: string) => {
const date = new Date(isoString);
const today = new Date();
if (date.toDateString() === today.toDateString()) return "Today";
return date.toLocaleDateString([], { weekday: 'short', month: 'short', day: 'numeric' });
};
return (
<div className="flex flex-col w-full h-full bg-background p-6">
<h1 className="text-2xl font-bold mb-6 flex items-center gap-2">
<Calendar className="w-8 h-8" />
Upcoming Meetings
</h1>
{/* Auth Section */}
{!isAuthenticated ? (
<div className="flex flex-col items-center justify-center flex-1 gap-6 text-center max-w-md mx-auto">
<div className="bg-secondary/30 p-8 rounded-xl border border-border">
<Calendar className="w-16 h-16 text-muted-foreground mx-auto mb-4" />
<h2 className="text-lg font-semibold mb-2">Connect Microsoft 365</h2>
<p className="text-sm text-muted-foreground mb-6">
Connect your account to see upcoming Teams & Zoom meetings and join them with one click.
</p>
<div className="flex flex-col gap-3">
<input
type="text"
placeholder="Client ID (Dynamics/Azure)"
value={clientId}
onChange={(e) => setClientId(e.target.value)}
className="text-sm p-2 rounded border border-input bg-background w-full"
/>
<button
onClick={handleLogin}
disabled={loading || !clientId}
className="bg-primary text-primary-foreground px-4 py-2 rounded-md text-sm flex items-center justify-center gap-2 hover:opacity-90 disabled:opacity-50 w-full transition-all"
>
{loading ? <RefreshCw className="animate-spin" size={16} /> : <LogIn size={16} />}
Connect Account
</button>
</div>
{error && (
<div className="mt-4 p-3 bg-destructive/10 text-destructive text-xs rounded-md text-left break-all">
<strong>Error:</strong> {error}
</div>
)}
<p className="text-[10px] text-muted-foreground mt-4 px-2">
Note: Requires an Azure App Registration (Multitenant) with redirect URI: <br />
<code className="bg-secondary px-1 rounded">http://localhost:14200/auth/callback</code>
</p>
</div>
</div>
) : (
<div className="flex flex-col flex-1 overflow-hidden">
<div className="flex justify-between items-center mb-4 px-1">
<span className="text-sm text-muted-foreground font-medium">Next 7 Days</span>
<div className="flex gap-2">
<button onClick={() => fetchEvents(token)} disabled={loading} className="text-muted-foreground hover:text-foreground p-1 rounded hover:bg-secondary transition-colors" title="Refresh">
<RefreshCw size={16} className={loading ? 'animate-spin' : ''} />
</button>
<button onClick={logout} className="text-muted-foreground hover:text-destructive p-1 rounded hover:bg-destructive/10 transition-colors" title="Logout">
<LogIn size={16} className="rotate-180" />
</button>
</div>
</div>
{events.length === 0 && !loading && (
<div className="flex-1 flex flex-col items-center justify-center text-muted-foreground">
{/* No meetings empty state (only if no error) */}
<Calendar size={48} className="mb-4 opacity-20" />
<p>No upcoming meetings found for the next 7 days.</p>
</div>
)}
{error && (
<div className="m-4 p-3 bg-destructive/10 text-destructive text-sm rounded-md flex items-center justify-between">
<span>{error}</span>
<button onClick={() => fetchEvents(token)} className="underline hover:no-underline ml-2">Retry</button>
</div>
)}
<div className="flex-1 overflow-y-auto pr-2 space-y-3">
{events.map(event => (
<div key={event.id} className="bg-card border border-border rounded-xl p-4 hover:shadow-md transition-all group">
<div className="flex justify-between items-start">
<div className="flex flex-col gap-1">
<div className="flex items-baseline gap-2">
<span className="text-sm font-bold text-primary bg-primary/10 px-2 py-0.5 rounded">
{formatDate(event.start.dateTime)}
</span>
<span className="text-lg font-mono font-medium">
{formatTime(event.start.dateTime)}
</span>
</div>
<h3 className="text-lg font-semibold group-hover:text-primary transition-colors">
{event.subject}
</h3>
{event.location?.displayName && (
<div className="text-sm text-muted-foreground">
📍 {event.location.displayName}
</div>
)}
</div>
{event.onlineMeeting?.joinUrl ? (
<button
onClick={() => handleJoin(event.onlineMeeting?.joinUrl, event.subject)}
className="bg-green-600 hover:bg-green-700 text-white px-4 py-2 rounded-lg flex items-center gap-2 shadow-sm hover:shadow transition-all"
>
<Video size={18} />
<span className="font-semibold">Join & Record</span>
</button>
) : (
<div className="px-3 py-1.5 bg-secondary text-muted-foreground text-xs rounded-lg italic">
No online link
</div>
)}
</div>
{/* Expand/Collapse Button */}
<button
onClick={() => toggleExpand(event.id)}
className="text-xs text-muted-foreground hover:text-primary mt-2 flex items-center gap-1 transition-colors w-full justify-center py-1 bg-secondary/30 hover:bg-secondary/50 rounded"
>
{expandedIds.has(event.id) ? "Hide Details" : "Show Details"}
</button>
{/* Expanded Content */}
{expandedIds.has(event.id) && (
<div className="mt-3 text-sm text-foreground/80 bg-background/50 p-3 rounded border border-border/50 animate-in fade-in slide-in-from-top-1">
{event.body?.content ? (
<div
className="prose prose-sm dark:prose-invert max-w-none break-words"
dangerouslySetInnerHTML={{ __html: event.body.content }}
/>
) : (
<p className="whitespace-pre-wrap">{event.bodyPreview || "No details available."}</p>
)}
{event.attendees && event.attendees.length > 0 && (
<div className="mt-4 pt-4 border-t border-border/50">
<h4 className="text-sm font-semibold text-foreground mb-3 flex items-center gap-2">
👥 Attendees
<span className="text-xs font-normal text-muted-foreground bg-secondary px-1.5 py-0.5 rounded-full">
{event.attendees.length}
</span>
</h4>
<div className="flex flex-wrap gap-2">
{event.attendees.map((att, i) => (
<div key={i} className="flex items-center gap-2 bg-secondary/50 border border-border/50 px-3 py-1.5 rounded-lg text-sm transition-colors hover:bg-secondary hover:border-border">
<div className={`w-2 h-2 rounded-full shrink-0 ${att.status.response === 'accepted' ? 'bg-green-500 shadow-[0_0_4px_rgba(34,197,94,0.4)]' :
att.status.response === 'declined' ? 'bg-red-500' : 'bg-yellow-500'
}`} title={`Status: ${att.status.response}`} />
<span className="font-medium truncate max-w-[200px]" title={att.emailAddress.address}>
{att.emailAddress.name || att.emailAddress.address}
</span>
</div>
))}
</div>
</div>
)}
</div>
)}
</div>
))}
</div>
</div>
)}
</div>
);
}

View File

@@ -1,12 +1,14 @@
import React, { useState, useEffect } from 'react';
import { Mic, Square } from 'lucide-react';
import { Mic, Square, Users, Headphones } from 'lucide-react';
import { invoke } from "@tauri-apps/api/core";
import { listen } from '@tauri-apps/api/event';
import logo from '../assets/logo.png'; // Import logo
interface PromptTemplate {
id: string;
name: string;
content: string;
keywords?: string[];
}
interface HistoryItem {
@@ -32,7 +34,12 @@ interface RecorderProps {
onDeleteHistory: (id: string) => void;
onLoadHistory: (item: HistoryItem) => void;
savePath: string | null;
onRecordingComplete: () => void;
autoStart?: boolean;
recordingSubject?: string;
onAutoStartHandled?: () => void;
addToast: (msg: string, type: 'success' | 'error' | 'info', duration?: number) => void;
}
interface AudioDevice {
@@ -43,7 +50,8 @@ interface AudioDevice {
const Recorder: React.FC<RecorderProps> = ({
apiKey, productId, prompts,
setTranscription, setSummary,
onSaveToHistory, savePath, onRecordingComplete
onSaveToHistory, savePath, onRecordingComplete,
onOpenSettings, addToast, ...props
}) => {
const [isRecording, setIsRecording] = useState(false);
const [isPaused, setIsPaused] = useState(false);
@@ -51,8 +59,17 @@ const Recorder: React.FC<RecorderProps> = ({
const [selectedDevice, setSelectedDevice] = useState<string>('');
const [selectedPromptId, setSelectedPromptId] = useState<string>('');
const [selectedModel, setSelectedModel] = useState<string>('mixtral');
const [recordingMode, setRecordingMode] = useState<'voice' | 'meeting'>('voice');
const [devices, setDevices] = useState<AudioDevice[]>([]);
const [availableModels, setAvailableModels] = useState<Array<{ id: string, name: string }>>([]);
const [lastSpeechTime, setLastSpeechTime] = useState<number>(Date.now());
const [silenceDuration, setSilenceDuration] = useState(0);
// Filtered devices based on mode
const filteredDevices = devices.filter(d => {
const isVirtual = d.name.toLowerCase().includes('hearbit') || d.name.toLowerCase().includes('blackhole');
return recordingMode === 'meeting' ? isVirtual : !isVirtual;
});
useEffect(() => {
loadDevices();
@@ -95,12 +112,21 @@ const Recorder: React.FC<RecorderProps> = ({
setDevices(aliasedDevs);
// Select Hearbit mic by default if available and no selection made
// Smart Auto-select based on mode
if (!selectedDevice) {
const vb = aliasedDevs.find(d => d.name.includes('Hearbit Virtual Mic'));
if (vb) {
setSelectedDevice(vb.id);
} else if (aliasedDevs.length > 0) {
setSelectedDevice(aliasedDevs[0].id);
// Prioritize "Hearbit Audio" (Aggregate) over "Hearbit Virtual Mic" (BlackHole)
const aggregateDev = aliasedDevs.find(d => d.name === 'Hearbit Audio');
const virtualDev = aliasedDevs.find(d => d.name.includes('Hearbit Virtual'));
if (aggregateDev) {
setRecordingMode('meeting');
setSelectedDevice(aggregateDev.id);
} else if (virtualDev) {
setRecordingMode('meeting');
setSelectedDevice(virtualDev.id);
} else {
setRecordingMode('voice');
if (aliasedDevs.length > 0) setSelectedDevice(aliasedDevs[0].id);
}
}
} catch (e) {
@@ -113,26 +139,114 @@ const Recorder: React.FC<RecorderProps> = ({
await invoke('open_audio_midi_setup');
} catch (e) {
console.error(e);
addToast('Failed to open Audio Setup', 'error');
setStatus('Failed to open Audio Setup');
}
};
const startRecording = async () => {
const startRecording = async (deviceIdOverride?: string) => {
try {
setStatus('Starting...');
await invoke('start_recording', { deviceId: selectedDevice, savePath: savePath || null });
setStatus('Starting...');
// Check override or state
const targetDeviceId = deviceIdOverride || selectedDevice;
// Pass customFilename (camelCase key maps to snake_case in Rust automatically or we need to check Tauri mapping, usually it maps camel to camel? Rust expects snake. Let's use snake_case in invoke args to be safe)
await invoke('start_recording', { deviceId: targetDeviceId, savePath: savePath || null, customFilename: props.recordingSubject || null });
setIsRecording(true);
setIsPaused(false);
setTranscription('');
setSummary('');
setStatus('Recording...');
addToast('Recording started', 'success', 2000);
} catch (e) {
console.error(e);
setStatus(`Error: ${e}`);
addToast(`Error starting recording: ${e}`, 'error');
setIsRecording(false);
}
};
// VAD & Auto-Stop Logic
useEffect(() => {
let unlisten: () => void;
const setupListener = async () => {
unlisten = await listen<{ is_speech: boolean, probability: number }>('vad-event', (event) => {
if (event.payload.is_speech) {
setLastSpeechTime(Date.now());
setSilenceDuration(0);
}
});
};
if (isRecording && !isPaused) {
setupListener();
setLastSpeechTime(Date.now()); // Reset on start
}
const interval = setInterval(() => {
if (isRecording && !isPaused) {
const diff = (Date.now() - lastSpeechTime) / 1000;
setSilenceDuration(diff);
// Auto-stop after 30 seconds of silence
if (diff > 30) { // 30 seconds
console.log("Auto-stopping due to silence");
setStatus("Auto-stopping (Silence detected)...");
stopRecording();
}
}
}, 1000);
return () => {
if (unlisten) unlisten();
clearInterval(interval);
};
}, [isRecording, isPaused, lastSpeechTime]);
// Handle Auto Start Prop
useEffect(() => {
if (props.autoStart && !isRecording && devices.length > 0) {
// Force meeting mode for auto-joins
if (recordingMode !== 'meeting') {
setRecordingMode('meeting');
}
// Find best device (Race condition fix: we can't rely on selectedDevice state update being instant)
const aggregateDev = devices.find(d => d.name === 'Hearbit Audio');
const virtualDev = devices.find(d => d.name.includes('Hearbit Virtual'));
const bestDevice = aggregateDev || virtualDev;
if (bestDevice) {
setSelectedDevice(bestDevice.id); // Update UI state for consistency
console.log("Auto-starting with device:", bestDevice.name);
startRecording(bestDevice.id); // Pass ID directly
} else {
console.warn("Auto-start: No meeting device found, trying default.");
startRecording();
}
if (props.onAutoStartHandled) {
props.onAutoStartHandled();
}
}
}, [props.autoStart, devices]);
// Handle Custom Event (Legacy/Fallback)
useEffect(() => {
const handleStartReq = () => {
if (!isRecording) {
if (recordingMode !== 'meeting') {
setRecordingMode('meeting');
}
startRecording();
}
};
window.addEventListener('start-recording-req', handleStartReq);
return () => window.removeEventListener('start-recording-req', handleStartReq);
}, [isRecording, recordingMode]);
const togglePause = async () => {
try {
if (isPaused) {
@@ -172,8 +286,40 @@ const Recorder: React.FC<RecorderProps> = ({
return;
}
// Find selected prompt content
const activePrompt = prompts.find(p => p.id === selectedPromptId);
// Find selected prompt content - SMART SELECTION
let activePrompt = prompts.find(p => p.id === selectedPromptId);
// Smart Auto-Select based on keywords
const lowerText = transText.toLowerCase();
let bestMatchId = selectedPromptId;
let maxMatches = 0;
for (const p of prompts) {
if (!p.keywords) continue;
let matches = 0;
for (const kw of p.keywords) {
if (lowerText.includes(kw.toLowerCase())) {
matches++;
}
}
if (matches > maxMatches) {
maxMatches = matches;
bestMatchId = p.id;
}
}
if (bestMatchId !== selectedPromptId) {
const newPrompt = prompts.find(p => p.id === bestMatchId);
if (newPrompt) {
console.log(`Smart Select: Switched to '${newPrompt.name}' with ${maxMatches} matches.`);
setStatus(`Smart Select: Using "${newPrompt.name}"...`);
addToast(`Smart Select: Switched to "${newPrompt.name}"`, 'success', 4000);
activePrompt = newPrompt;
// Optional: Update UI selection? setSelectedPromptId(bestMatchId);
// Let's verify with user preference? For now, we override as "Magic".
}
}
const promptContent = activePrompt ? activePrompt.content : "Summarize this.";
setStatus(`Summarizing (${selectedModel})...`);
@@ -190,11 +336,13 @@ const Recorder: React.FC<RecorderProps> = ({
onSaveToHistory(transText, sumText);
setStatus('Done!');
addToast('Transcription & Summary complete!', 'success', 4000);
onRecordingComplete(); // Auto-switch tab
setTimeout(() => setStatus('Ready to record'), 3000);
} catch (e) {
console.error(e);
setStatus(`Error: ${e}`);
addToast(`Error processing: ${e}`, 'error');
}
};
@@ -227,12 +375,17 @@ const Recorder: React.FC<RecorderProps> = ({
<p className="text-muted-foreground mb-6 text-center text-sm h-6">
{status}
{isRecording && !isPaused && silenceDuration > 10 && (
<span className="block text-xs text-yellow-500 mt-1 opacity-80">
Silence detected: {Math.floor(silenceDuration)}s (Auto-stop in {90 - Math.floor(silenceDuration)}s)
</span>
)}
</p>
<div className="w-full max-w-sm space-y-4 mb-6 shrink-0">
{!isRecording ? (
<button
onClick={startRecording}
onClick={() => startRecording()}
disabled={!apiKey || !productId}
className="w-full py-4 text-lg font-semibold bg-primary text-primary-foreground rounded-lg hover:bg-primary/90 disabled:opacity-50 disabled:cursor-not-allowed transition-all shadow-md hover:shadow-lg"
>
@@ -260,22 +413,43 @@ const Recorder: React.FC<RecorderProps> = ({
)}
<div className="grid grid-cols-2 gap-4 pt-2">
<div>
<label className="text-xs font-semibold text-muted-foreground uppercase tracking-wider block mb-1">
Input Device
</label>
<select
value={selectedDevice}
onChange={(e) => setSelectedDevice(e.target.value)}
className="w-full p-2 text-sm bg-secondary rounded border border-border outline-none focus:ring-2 focus:ring-primary"
disabled={isRecording}
</div>
{/* INPUT DEVICE SECTION */}
<div className="col-span-2">
<div className="flex bg-secondary p-1 rounded-lg mb-2">
<button
onClick={() => { setRecordingMode('voice'); setSelectedDevice(''); }}
className={`flex-1 flex items-center justify-center gap-2 py-1.5 text-xs font-semibold rounded-md transition-all ${recordingMode === 'voice' ? 'bg-background shadow text-foreground' : 'text-muted-foreground hover:text-foreground'}`}
>
{devices.map(d => (
<option key={d.id} value={d.id}>{d.name}</option>
))}
{devices.length === 0 && <option value="">Loading devices...</option>}
</select>
<Headphones size={14} /> Voice Memo
</button>
<button
onClick={() => { setRecordingMode('meeting'); setSelectedDevice(''); }}
className={`flex-1 flex items-center justify-center gap-2 py-1.5 text-xs font-semibold rounded-md transition-all ${recordingMode === 'meeting' ? 'bg-background shadow text-foreground' : 'text-muted-foreground hover:text-foreground'}`}
>
<Users size={14} /> Meeting
</button>
</div>
<select
value={selectedDevice}
onChange={(e) => setSelectedDevice(e.target.value)}
className="w-full p-2 text-sm bg-secondary rounded border border-border outline-none focus:ring-2 focus:ring-primary"
disabled={isRecording}
>
{filteredDevices.map(d => (
<option key={d.id} value={d.id}>{d.name}</option>
))}
{filteredDevices.length === 0 && (
<option value="">
{recordingMode === 'meeting' ? 'No Meeting Device (Create in Settings)' : 'No Microphone Found'}
</option>
)}
</select>
</div>
<div className="col-span-2 grid grid-cols-2 gap-4">
<div>
<label className="text-xs font-semibold text-muted-foreground uppercase tracking-wider block mb-1">
LLM Model
@@ -311,11 +485,19 @@ const Recorder: React.FC<RecorderProps> = ({
</div>
<div className="flex flex-col gap-2 mt-2 w-full">
{recordingMode === 'meeting' && filteredDevices.length === 0 && (
<button
onClick={onOpenSettings}
className="text-xs bg-primary/10 text-primary hover:bg-primary/20 w-full text-center border border-primary/20 rounded p-2 mb-2 font-semibold"
>
🪄 Create "Hearbit Audio" Device
</button>
)}
<button
onClick={openAudioSetup}
className="text-xs text-muted-foreground hover:text-foreground w-full text-center border border-dashed border-border/50 rounded p-1"
>
Open Audio MIDI Setup (Configure Multi-Output)
Open Audio MIDI Setup
</button>
</div>
</div>

View File

@@ -1,6 +1,8 @@
import React, { useState } from 'react';
import { Save, FolderOpen, Lock, Upload, Download, Eye, EyeOff } from 'lucide-react';
import { open } from '@tauri-apps/plugin-dialog';
import { save, open } from '@tauri-apps/plugin-dialog';
import { writeTextFile } from '@tauri-apps/plugin-fs';
import { invoke } from '@tauri-apps/api/core';
import { encryptData, decryptData } from '../utils/backup';
import { PromptTemplate } from '../App';
@@ -72,16 +74,19 @@ const Settings: React.FC<SettingsProps> = ({ apiKey, productId, prompts, savePat
savePath: localSavePath
};
const encrypted = await encryptData(data, backupPassword);
const blob = new Blob([encrypted], { type: 'text/plain' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `hearbit_backup_${new Date().toISOString().slice(0, 10)}.conf`;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
URL.revokeObjectURL(url);
setStatusIdx('Configuration exported successfully!');
const filePath = await save({
defaultPath: `hearbit_backup_${new Date().toISOString().slice(0, 10)}.conf`,
filters: [{
name: 'Hearbit Config',
extensions: ['conf']
}]
});
if (filePath) {
await writeTextFile(filePath, encrypted);
setStatusIdx(`Configuration exported to: ${filePath}`);
}
} catch (e) {
console.error(e);
setStatusIdx('Export failed.');
@@ -131,6 +136,17 @@ const Settings: React.FC<SettingsProps> = ({ apiKey, productId, prompts, savePat
}
};
const handleCreateDevice = async () => {
try {
setStatusIdx('Creating Hearbit Audio device...');
await invoke('create_hearbit_audio_device');
setStatusIdx('Success! "Hearbit Audio" device created.');
} catch (e) {
console.error(e);
setStatusIdx(`Error: ${e}`);
}
};
return (
<div className="flex flex-col h-full bg-background font-mono text-sm relative">
{/* Import Password Modal */}
@@ -179,7 +195,7 @@ const Settings: React.FC<SettingsProps> = ({ apiKey, productId, prompts, savePat
<div className="p-4 border-b border-border/40 bg-secondary/20 flex justify-between items-center">
<span className="text-xs uppercase tracking-wider text-muted-foreground font-semibold">Settings</span>
<button onClick={handleSave} className="flex items-center gap-1 text-primary hover:text-primary/80 transition-colors font-semibold">
<button onClick={handleSave} className="flex items-center gap-1 text-primary hover:text-primary/80 transition-all font-semibold active:scale-95">
<Save size={16} /> Save
</button>
</div>
@@ -220,13 +236,25 @@ const Settings: React.FC<SettingsProps> = ({ apiKey, productId, prompts, savePat
/>
<button
onClick={handleSelectFolder}
className="p-2 aspect-square flex items-center justify-center bg-secondary hover:bg-secondary/80 border border-border rounded text-foreground transition-colors"
className="p-2 aspect-square flex items-center justify-center bg-secondary hover:bg-secondary/80 border border-border rounded text-foreground transition-all active:scale-95"
title="Pick Folder"
>
<FolderOpen size={16} />
</button>
</div>
</div>
<div className="pt-2 border-t border-border/50 mt-2">
<label className="block text-sm font-medium mb-1 text-foreground">Advanced Audio Setup</label>
<p className="text-xs text-muted-foreground mb-2">
For automatic recording in Teams, create a virtual device combining your Mic and computer audio.
</p>
<button
onClick={handleCreateDevice}
className="bg-secondary hover:bg-secondary/80 text-xs px-3 py-2 rounded border border-border transition-all active:scale-95 flex items-center gap-2"
>
<span>🪄</span> Create "Hearbit Audio" Device
</button>
</div>
</div>
<div className="space-y-4 border rounded p-4 border-border/50">
@@ -256,13 +284,13 @@ const Settings: React.FC<SettingsProps> = ({ apiKey, productId, prompts, savePat
<div className="flex gap-2 pt-2">
<button
onClick={handleExport}
className="flex-1 flex items-center justify-center gap-2 py-2 px-4 rounded bg-secondary hover:bg-secondary/80 border border-border text-foreground transition-all text-xs font-semibold"
className="flex-1 flex items-center justify-center gap-2 py-2 px-4 rounded bg-secondary hover:bg-secondary/80 border border-border text-foreground transition-all text-xs font-semibold active:scale-95"
>
<Download size={14} /> Export Config
</button>
<button
onClick={triggerImport}
className="flex-1 flex items-center justify-center gap-2 py-2 px-4 rounded bg-secondary hover:bg-secondary/80 border border-border text-foreground transition-all text-xs font-semibold"
className="flex-1 flex items-center justify-center gap-2 py-2 px-4 rounded bg-secondary hover:bg-secondary/80 border border-border text-foreground transition-all text-xs font-semibold active:scale-95"
>
<Upload size={14} /> Import Config
</button>
@@ -279,7 +307,7 @@ const Settings: React.FC<SettingsProps> = ({ apiKey, productId, prompts, savePat
<div className="space-y-4 border rounded p-4 border-border/50">
<div className="flex justify-between items-center">
<h3 className="text-foreground font-semibold">Prompts</h3>
<button onClick={addPrompt} className="text-xs bg-primary text-primary-foreground px-2 py-1 rounded hover:bg-primary/90">
<button onClick={addPrompt} className="text-xs bg-primary text-primary-foreground px-2 py-1 rounded hover:bg-primary/90 transition-all active:scale-95">
+ Add Prompt
</button>
</div>

View File

@@ -1,8 +1,9 @@
import React from 'react';
import { Mic, Terminal, FileText } from 'lucide-react';
import { Mic, Terminal, FileText, Calendar } from 'lucide-react';
interface TabsProps {
currentTab: 'recorder' | 'logs' | 'transcription' | 'settings';
onTabChange: (tab: 'recorder' | 'logs' | 'transcription' | 'settings') => void;
currentTab: 'recorder' | 'logs' | 'transcription' | 'settings' | 'meetings' | 'history';
onTabChange: (tab: 'recorder' | 'logs' | 'transcription' | 'settings' | 'meetings' | 'history') => void;
}
const Tabs: React.FC<TabsProps> = ({ currentTab, onTabChange }) => {
@@ -22,6 +23,20 @@ const Tabs: React.FC<TabsProps> = ({ currentTab, onTabChange }) => {
<FileText size={16} />
Transcription
</button>
<button
onClick={() => onTabChange('meetings')}
className={`flex items-center gap-2 px-4 py-2 rounded-lg text-sm font-medium transition-colors ${currentTab === 'meetings' ? 'bg-secondary text-foreground' : 'text-muted-foreground hover:text-foreground hover:bg-secondary/50'}`}
>
<Calendar size={16} />
Meetings
</button>
<button
onClick={() => onTabChange('history')}
className={`flex items-center gap-2 px-4 py-2 rounded-lg text-sm font-medium transition-colors ${currentTab === 'history' ? 'bg-secondary text-foreground' : 'text-muted-foreground hover:text-foreground hover:bg-secondary/50'}`}
>
<FileText size={16} />
History
</button>
<button
onClick={() => onTabChange('logs')}
className={`flex items-center gap-2 px-4 py-1.5 rounded-full text-sm font-medium transition-all duration-200 ${currentTab === 'logs'

View File

@@ -0,0 +1,81 @@
import React, { useEffect, useState } from 'react';
import { X, CheckCircle, AlertCircle, Info } from 'lucide-react';
export type ToastType = 'success' | 'error' | 'info';
export interface ToastMessage {
id: string;
message: string;
type: ToastType;
duration?: number;
}
interface ToastProps {
toast: ToastMessage;
onClose: (id: string) => void;
}
const Toast: React.FC<ToastProps> = ({ toast, onClose }) => {
const [isVisible, setIsVisible] = useState(false);
useEffect(() => {
// Determine background color based on type
// Animate in
requestAnimationFrame(() => setIsVisible(true));
const timer = setTimeout(() => {
setIsVisible(false);
setTimeout(() => onClose(toast.id), 300); // Wait for animation
}, toast.duration || 3000);
return () => clearTimeout(timer);
}, [toast, onClose]);
const bgColors = {
success: 'bg-green-600',
error: 'bg-destructive',
info: 'bg-blue-600'
};
const icon = {
success: <CheckCircle size={18} />,
error: <AlertCircle size={18} />,
info: <Info size={18} />
};
return (
<div
className={`
flex items-center gap-3 px-4 py-3 rounded-lg shadow-lg text-white mb-2 transition-all duration-300 transform
${bgColors[toast.type]}
${isVisible ? 'translate-x-0 opacity-100' : 'translate-x-full opacity-0'}
`}
style={{ minWidth: '300px', maxWidth: '400px' }}
>
<div className="shrink-0">
{icon[toast.type]}
</div>
<p className="text-sm font-medium flex-1">{toast.message}</p>
<button
onClick={() => { setIsVisible(false); setTimeout(() => onClose(toast.id), 300); }}
className="text-white/80 hover:text-white"
>
<X size={16} />
</button>
</div>
);
};
export const ToastContainer: React.FC<{ toasts: ToastMessage[], removeToast: (id: string) => void }> = ({ toasts, removeToast }) => {
return (
<div className="fixed bottom-4 right-4 z-50 flex flex-col items-end pointer-events-none">
<div className="pointer-events-auto">
{toasts.map(t => (
<Toast key={t.id} toast={t} onClose={removeToast} />
))}
</div>
</div>
);
};
export default ToastContainer;