From 79db6adf45303413a985470f602e7be7a2f4eb69 Mon Sep 17 00:00:00 2001
From: "michael.borak"
Date: Wed, 21 Jan 2026 10:14:16 +0100
Subject: [PATCH] feat: v1.1.0 - Long meeting support, email in history, custom
logo
- Add MP3 conversion and chunking for files >18MB
- Support meetings up to 2+ hours
- Add email button directly in history view
- Implement custom logo upload in Settings for white-labeling
- Add read_image_as_base64 Rust command
- Update README with new features and ffmpeg requirement
---
README.md | 36 +++++++++-
src-tauri/Cargo.lock | 1 +
src-tauri/Cargo.toml | 1 +
src-tauri/src/lib.rs | 116 +++++++++++++++++++++++++++++++++
src/App.tsx | 3 +
src/components/HistoryView.tsx | 46 ++++++++++---
src/components/Import.tsx | 108 ++++++++++++++++++------------
src/components/Recorder.tsx | 77 ++++++++++++++++++----
src/components/Settings.tsx | 56 ++++++++++++++++
9 files changed, 378 insertions(+), 66 deletions(-)
diff --git a/README.md b/README.md
index 1946255..fe4b921 100644
--- a/README.md
+++ b/README.md
@@ -8,6 +8,10 @@
* **🎙️ Dual-Channel Recording**: seamlessly capture your voice and meeting audio from apps like Microsoft Teams, Zoom, or Google Meet.
* **📁 Import Audio Files**: Upload existing recordings (MP3, MP4, WAV, M4A, FLAC, OGG, AAC, WMA) for transcription and summarization.
+* **⏱️ Long Meeting Support**: Record meetings up to 2+ hours with automatic MP3 conversion and chunking.
+* **🎵 Smart Auto-Stop**:
+ * **Voice Memo Mode**: Automatically stops after 20 seconds of silence
+ * **Meeting Mode**: No auto-stop to capture full discussions
* **📅 Microsoft 365 Integration**:
* **Upcoming Meetings**: View your daily schedule and join with **one click**.
* **Meeting Details**: View full agenda and **invited attendee status** (Accepted/Declined).
@@ -23,9 +27,16 @@
## 🚀 Getting Started
-### 1. Prerequisites
-* **macOS** (Apple Silicon or Intel).
-* **BlackHole 2ch Driver** (Mandatory): Download from [existential.audio](https://existential.audio/blackhole/) or run `brew install blackhole-2ch`.
+### Required
+
+* **macOS** (tested on macOS Monterey and later)
+* **BlackHole 2ch Driver** ([Download here](https://existential.audio/blackhole/))
+ * **MANDATORY** for system audio capture (MS Teams, Zoom, etc.)
+ * Without this, you can only record microphone input
+* **ffmpeg** for audio processing
+ ```bash
+ brew install ffmpeg
+ ```
* **Infomaniak AI Account**: You need an API Key and Product ID from the [Infomaniak Developer Portal](https://manager.infomaniak.com/).
### 2. Installation
@@ -93,6 +104,25 @@ This is a standard macOS warning for apps not signed with an Apple Developer Cer
3. Enter your password.
4. Open the app again.
+### Long Meetings (> 1 hour)
+
+**Automatic Handling**: The app automatically handles long recordings:
+- **MP3 Conversion**: All recordings are converted to MP3 (64kbps) for 10x compression
+- **Chunking**: Files ≥18 MB are automatically split into 10-minute segments
+- **Processing**: Each segment is transcribed separately and merged with timestamps
+
+**Example**: A 2-hour meeting:
+1. Records as WAV (~120 MB)
+2. Converts to MP3 (~12 MB)
+3. Stays under limit → No chunking needed!
+
+**Very long meetings** (e.g., all-day workshops):
+- Automatically chunks into segments
+- Shows progress: "Processing chunk 1/15..."
+- Merges all transcriptions seamlessly
+
+### No Audio / Can't Hear Meeting Participants
+
---
## 👨💻 Development
diff --git a/src-tauri/Cargo.lock b/src-tauri/Cargo.lock
index 261f789..ec58168 100644
--- a/src-tauri/Cargo.lock
+++ b/src-tauri/Cargo.lock
@@ -1741,6 +1741,7 @@ checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100"
name = "hearbit-ai"
version = "0.1.2"
dependencies = [
+ "base64 0.22.1",
"chrono",
"cpal",
"hound",
diff --git a/src-tauri/Cargo.toml b/src-tauri/Cargo.toml
index 9e2ac21..c274acf 100644
--- a/src-tauri/Cargo.toml
+++ b/src-tauri/Cargo.toml
@@ -37,3 +37,4 @@ url = "2.5"
lettre = { version = "0.11", features = ["tokio1", "tokio1-native-tls", "builder"] }
tauri-plugin-log = "2.0.0"
tauri-plugin-shell = "2.3.4"
+base64 = "0.22"
diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs
index a4f8698..cdfc876 100644
--- a/src-tauri/src/lib.rs
+++ b/src-tauri/src/lib.rs
@@ -9,6 +9,7 @@ use std::process::Command;
use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};
use std::time::Duration;
use tokio::time::sleep;
+use base64::Engine;
mod audio_processor;
use audio_processor::AudioProcessor;
@@ -627,6 +628,118 @@ fn get_audio_metadata(app: AppHandle, file_path: String) -> Result Result {
+ emit_log(&app, "INFO", &format!("Converting to MP3: {}", wav_path));
+
+ let mp3_path = wav_path.replace(".wav", ".mp3");
+
+ let output = Command::new("ffmpeg")
+ .args([
+ "-i", &wav_path,
+ "-codec:a", "libmp3lame",
+ "-b:a", "64k",
+ "-y", // overwrite
+ &mp3_path
+ ])
+ .output()
+ .map_err(|e| format!("Failed to execute ffmpeg: {}", e))?;
+
+ if output.status.success() {
+ emit_log(&app, "SUCCESS", &format!("MP3 created: {}", mp3_path));
+ Ok(mp3_path)
+ } else {
+ let error = String::from_utf8_lossy(&output.stderr);
+ emit_log(&app, "ERROR", &format!("MP3 conversion failed: {}", error));
+ Err(format!("MP3 conversion failed: {}", error))
+ }
+}
+
+#[tauri::command]
+fn chunk_audio(app: AppHandle, file_path: String, chunk_minutes: u32) -> Result, String> {
+ emit_log(&app, "INFO", &format!("Chunking audio: {} ({}min chunks)", file_path, chunk_minutes));
+
+ let chunk_seconds = chunk_minutes * 60;
+
+ // Get total duration using ffprobe
+ let duration_output = Command::new("ffprobe")
+ .args([
+ "-v", "error",
+ "-show_entries", "format=duration",
+ "-of", "default=noprint_wrappers=1:nokey=1",
+ &file_path
+ ])
+ .output()
+ .map_err(|e| format!("Failed to get duration: {}", e))?;
+
+ let duration_str = String::from_utf8_lossy(&duration_output.stdout);
+ let duration: f64 = duration_str.trim().parse()
+ .map_err(|_| "Failed to parse duration".to_string())?;
+
+ let num_chunks = (duration / chunk_seconds as f64).ceil() as usize;
+ emit_log(&app, "INFO", &format!("Total duration: {}s, creating {} chunks", duration, num_chunks));
+
+ let mut chunk_paths = Vec::new();
+ let base_path = file_path.replace(".mp3", "");
+
+ for i in 0..num_chunks {
+ let start_time = i as u32 * chunk_seconds;
+ let chunk_path = format!("{}_chunk_{}.mp3", base_path, i);
+
+ let output = Command::new("ffmpeg")
+ .args([
+ "-i", &file_path,
+ "-ss", &start_time.to_string(),
+ "-t", &chunk_seconds.to_string(),
+ "-c", "copy",
+ "-y",
+ &chunk_path
+ ])
+ .output()
+ .map_err(|e| format!("Failed to create chunk {}: {}", i, e))?;
+
+ if !output.status.success() {
+ let error = String::from_utf8_lossy(&output.stderr);
+ return Err(format!("Chunk {} failed: {}", i, error));
+ }
+
+ chunk_paths.push(chunk_path);
+ }
+
+ emit_log(&app, "SUCCESS", &format!("Created {} chunks", chunk_paths.len()));
+ Ok(chunk_paths)
+}
+
+#[tauri::command]
+fn read_image_as_base64(app: AppHandle, file_path: String) -> Result {
+ emit_log(&app, "INFO", &format!("Reading image as base64: {}", file_path));
+
+ let bytes = std::fs::read(&file_path)
+ .map_err(|e| format!("Failed to read file: {}", e))?;
+
+ // Detect image type from extension
+ let extension = std::path::Path::new(&file_path)
+ .extension()
+ .and_then(|e| e.to_str())
+ .unwrap_or("png")
+ .to_lowercase();
+
+ let mime_type = match extension.as_str() {
+ "jpg" | "jpeg" => "image/jpeg",
+ "png" => "image/png",
+ "svg" => "image/svg+xml",
+ "gif" => "image/gif",
+ _ => "image/png"
+ };
+
+ // Use base64 encoding
+ let base64_str = base64::prelude::BASE64_STANDARD.encode(&bytes);
+ let data_url = format!("data:{};base64,{}", mime_type, base64_str);
+
+ emit_log(&app, "SUCCESS", &format!("Image converted to base64 ({} bytes)", base64_str.len()));
+ Ok(data_url)
+}
+
#[tauri::command]
fn open_audio_midi_setup() -> Result<(), String> {
Command::new("open")
@@ -781,6 +894,9 @@ pub fn run() {
save_text_file,
read_log_file,
get_audio_metadata,
+ convert_to_mp3,
+ chunk_audio,
+ read_image_as_base64,
email::send_smtp_email
])
.run(tauri::generate_context!())
diff --git a/src/App.tsx b/src/App.tsx
index 40ed9de..fa862dc 100644
--- a/src/App.tsx
+++ b/src/App.tsx
@@ -420,6 +420,9 @@ Thanks!`
onLoad={handleLoadHistory}
onDelete={handleDeleteHistory}
onRename={handleRenameHistory}
+ smtpConfig={smtpConfig}
+ emailTemplates={emailTemplates}
+ addToast={addToast}
/>
)}
diff --git a/src/components/HistoryView.tsx b/src/components/HistoryView.tsx
index ef115ad..cb28e73 100644
--- a/src/components/HistoryView.tsx
+++ b/src/components/HistoryView.tsx
@@ -1,5 +1,9 @@
-import { FileText, Trash2, Calendar, Pencil, Check, X } from 'lucide-react';
+import { FileText, Trash2, Calendar, Pencil, Check, X, Mail } from 'lucide-react';
import { useState } from 'react';
+import EmailPreviewModal from './EmailPreviewModal';
+import { SmtpConfig } from './Settings';
+import { EmailTemplate } from '../App';
+import { ToastType } from './ui/Toast';
interface HistoryItem {
id: string;
@@ -15,11 +19,15 @@ interface HistoryViewProps {
onLoad: (item: HistoryItem) => void;
onDelete: (id: string) => void;
onRename: (id: string, newSubject: string) => void;
+ smtpConfig: SmtpConfig;
+ emailTemplates: EmailTemplate[];
+ addToast: (message: string, type: ToastType, duration?: number) => void;
}
-export default function HistoryView({ history, onLoad, onDelete, onRename }: HistoryViewProps) {
+export default function HistoryView({ history, onLoad, onDelete, onRename, smtpConfig, emailTemplates, addToast }: HistoryViewProps) {
const [editingId, setEditingId] = useState(null);
const [editValue, setEditValue] = useState("");
+ const [emailModalItem, setEmailModalItem] = useState(null);
const startEditing = (item: HistoryItem) => {
setEditingId(item.id);
@@ -104,18 +112,38 @@ export default function HistoryView({ history, onLoad, onDelete, onRename }: His
-
+
+
+
+
))}
)}
+
+ setEmailModalItem(null)}
+ initialRecipients={[]}
+ initialSubject={emailModalItem?.subject || "Meeting Summary"}
+ initialBody={emailModalItem?.summary || ""}
+ emailTemplates={emailTemplates}
+ smtpConfig={smtpConfig ? { ...smtpConfig, port: Number(smtpConfig.port) } : null}
+ addToast={addToast}
+ />
);
}
diff --git a/src/components/Import.tsx b/src/components/Import.tsx
index e862b80..a1eed82 100644
--- a/src/components/Import.tsx
+++ b/src/components/Import.tsx
@@ -1,4 +1,4 @@
-import React, { useState, useCallback } from 'react';
+import React, { useState } from 'react';
import { Upload, FileAudio, X, Check, Loader2 } from 'lucide-react';
import { invoke } from "@tauri-apps/api/core";
import { open } from '@tauri-apps/plugin-dialog';
@@ -44,7 +44,6 @@ const Import: React.FC = ({
setTranscription,
setSummary
}) => {
- const [isDragging, setIsDragging] = useState(false);
const [selectedFile, setSelectedFile] = useState(null);
const [metadata, setMetadata] = useState(null);
const [meetingTitle, setMeetingTitle] = useState('');
@@ -94,35 +93,17 @@ const Import: React.FC = ({
setMeetingTitle(extractFilename(filePath));
try {
- // Get metadata (we'll need to implement this in Rust backend)
const meta = await invoke('get_audio_metadata', { filePath });
setMetadata(meta);
setStage('idle');
addToast('File loaded successfully', 'success', 2000);
} catch (e) {
console.error('Metadata error:', e);
- // Even if metadata fails, allow processing
setMetadata(null);
setStage('idle');
}
};
- const handleDrop = useCallback((e: React.DragEvent) => {
- e.preventDefault();
- setIsDragging(false);
-
- const files = Array.from(e.dataTransfer.files);
- if (files.length > 0) {
- // @ts-ignore - Tauri provides path on File object
- const filePath = files[0].path;
- if (filePath) {
- handleFileSelect(filePath);
- } else {
- addToast('Failed to get file path', 'error');
- }
- }
- }, []);
-
const handleManualSelect = async () => {
try {
const selected = await open({
@@ -150,12 +131,62 @@ const Import: React.FC = ({
}
try {
- setStage('transcribing');
- const transText = await invoke('transcribe_audio', {
- filePath: selectedFile,
- apiKey,
- productId
- });
+ // Check file extension
+ const isWav = selectedFile.toLowerCase().endsWith('.wav');
+ let processFile = selectedFile;
+
+ // Convert WAV to MP3 if needed
+ if (isWav) {
+ setStage('validating');
+ addToast('Converting WAV to MP3...', 'info', 2000);
+ processFile = await invoke('convert_to_mp3', { wavPath: selectedFile });
+ }
+
+ // Get file size to check if chunking needed
+ const metadata = await invoke('get_audio_metadata', { filePath: processFile });
+ const sizeMB = metadata.size / (1024 * 1024);
+
+ let transText = '';
+
+ // Check if chunking needed for large files
+ if (sizeMB >= 18) {
+ // CHUNKING PATH for large files
+ setStage('validating');
+ addToast(`Large file (${sizeMB.toFixed(1)}MB). Splitting into chunks...`, 'info', 4000);
+
+ const chunks = await invoke('chunk_audio', {
+ filePath: processFile,
+ chunkMinutes: 10
+ });
+
+ addToast(`Processing ${chunks.length} chunks...`, 'info', 4000);
+
+ let allTranscriptions: string[] = [];
+
+ for (let i = 0; i < chunks.length; i++) {
+ setStage('transcribing');
+ addToast(`Transcribing chunk ${i + 1}/${chunks.length}...`, 'info', 2000);
+ const chunkText = await invoke('transcribe_audio', {
+ filePath: chunks[i],
+ apiKey,
+ productId
+ });
+ allTranscriptions.push(chunkText);
+ }
+
+ // Merge transcriptions
+ transText = allTranscriptions.join('\n\n--- Next Segment ---\n\n');
+ addToast('All chunks transcribed successfully!', 'success', 3000);
+ } else {
+ // NORMAL PATH for small files
+ setStage('transcribing');
+ transText = await invoke('transcribe_audio', {
+ filePath: processFile,
+ apiKey,
+ productId
+ });
+ }
+
setTranscription(transText);
if (!transText || transText.trim().length === 0) {
@@ -246,26 +277,21 @@ const Import: React.FC = ({
{/* Header */}
-

+
{/* Main Content */}
Import Audio File
- Upload a recording for transcription and summarization
+ Select an audio file for transcription and summarization
- {/* Drag & Drop Zone */}
+ {/* File Selection Zone */}
{ e.preventDefault(); setIsDragging(true); }}
- onDragLeave={() => setIsDragging(false)}
- onDrop={handleDrop}
- className={`w-full max-w-md border-2 border-dashed rounded-lg p-8 mb-6 transition-all ${isDragging
- ? 'border-primary bg-primary/5 scale-105'
- : selectedFile
- ? 'border-green-500 bg-green-500/5'
- : 'border-border bg-secondary/30 hover:border-primary/50'
+ className={`w-full max-w-md border-2 border-dashed rounded-lg p-8 mb-6 transition-all ${selectedFile
+ ? 'border-green-500 bg-green-500/5'
+ : 'border-border bg-secondary/30'
}`}
>
@@ -291,17 +317,17 @@ const Import: React.FC
= ({
<>
-
Drag & Drop audio file
+
Select Audio File
- or click below to browse
+ Click below to browse your files
Supported: MP3, MP4, WAV, M4A, FLAC, OGG, AAC, WMA
diff --git a/src/components/Recorder.tsx b/src/components/Recorder.tsx
index b622b44..c669f45 100644
--- a/src/components/Recorder.tsx
+++ b/src/components/Recorder.tsx
@@ -255,16 +255,20 @@ const Recorder: React.FC = ({
const diff = (now - lastSpeechTimeRef.current) / 1000;
setSilenceDuration(diff);
- // Auto-stop after 30 seconds of silence
- if (diff > 30 && !isStoppingRef.current) {
- console.log("Auto-stopping due to silence");
- addToast("Auto-stopping (Silence detected)", "info", 3000);
+ // Different timeouts based on mode:
+ // Voice Memo: 20 seconds of silence
+ // Meeting: Disabled (no auto-stop to avoid cutting off long meetings)
+ const timeoutSeconds = recordingMode === 'voice' ? 20 : 9999; // 9999 = effectively disabled
+
+ if (diff > timeoutSeconds && !isStoppingRef.current) {
+ console.log(`Auto-stopping (${recordingMode} mode) due to ${timeoutSeconds}s silence`);
+ addToast(`Auto-stopping (${Math.floor(diff)}s silence detected)`, "info", 3000);
stopRecording();
}
}, 1000);
return () => clearInterval(interval);
- }, [isRecording, isPaused, isWaiting, addToast]); // Dependencies for interval lifecycle
+ }, [isRecording, isPaused, isWaiting, recordingMode, addToast]); // Added recordingMode dependency
// Handle Auto Start Prop
useEffect(() => {
@@ -332,18 +336,65 @@ const Recorder: React.FC = ({
setIsRecording(false);
setIsPaused(false);
setIsWaiting(false); // Reset waiting state
- setStatus('Processing...');
+ setStatus('Saving recording...');
const filePath = await invoke('stop_recording');
// Wait a moment for file flush (safety)
await new Promise(r => setTimeout(r, 500));
- setStatus('Transcribing (Infomaniak Whisper)...');
- const transText = await invoke('transcribe_audio', {
- filePath,
- apiKey,
- productId
- });
+ // Confirm recording saved
+ addToast(`Recording saved locally: ${filePath.split('/').pop()}`, 'success', 3000);
+ setStatus('Converting to MP3...');
+
+ // Small delay to show the "saved" message
+ await new Promise(r => setTimeout(r, 500));
+
+ // Convert WAV to MP3 for smaller size
+ const mp3Path = await invoke('convert_to_mp3', { wavPath: filePath });
+
+ // Get file size to check if chunking needed
+ interface AudioMetadata { duration: number; size: number; format: string; }
+ const metadata = await invoke('get_audio_metadata', { filePath: mp3Path });
+ const sizeMB = metadata.size / (1024 * 1024);
+
+ let transText = '';
+
+ // Check if chunking needed (only for Meeting mode and large files)
+ if (recordingMode === 'meeting' && sizeMB >= 18) {
+ // CHUNKING PATH for large meetings
+ setStatus(`Large file (${sizeMB.toFixed(1)}MB). Splitting into chunks...`);
+ const chunks = await invoke('chunk_audio', {
+ filePath: mp3Path,
+ chunkMinutes: 10
+ });
+
+ addToast(`Processing ${chunks.length} chunks...`, 'info', 4000);
+
+ let allTranscriptions: string[] = [];
+
+ for (let i = 0; i < chunks.length; i++) {
+ setStatus(`Transcribing chunk ${i + 1}/${chunks.length}...`);
+ const chunkText = await invoke('transcribe_audio', {
+ filePath: chunks[i],
+ apiKey,
+ productId
+ });
+ allTranscriptions.push(chunkText);
+ }
+
+ // Merge transcriptions
+ transText = allTranscriptions.join('\n\n--- Next Segment ---\n\n');
+ addToast('All chunks transcribed successfully!', 'success', 3000);
+ } else {
+ // NORMAL PATH for small files
+ setStatus('Transcribing (Infomaniak Whisper)...');
+ transText = await invoke('transcribe_audio', {
+ filePath: mp3Path,
+ apiKey,
+ productId
+ });
+ }
+
setTranscription(transText);
// Check if transcription is empty or just whitespace
@@ -422,7 +473,7 @@ const Recorder: React.FC = ({
{/* Fixed Header - Reduced padding */}
-

+
{/* Scrollable Content - Reduced spacing */}
diff --git a/src/components/Settings.tsx b/src/components/Settings.tsx
index 792d58c..c714aba 100644
--- a/src/components/Settings.tsx
+++ b/src/components/Settings.tsx
@@ -5,6 +5,7 @@ import { save, open } from '@tauri-apps/plugin-dialog';
import { invoke } from '@tauri-apps/api/core';
import { encryptData, decryptData } from '../utils/backup';
import EmailTemplateEditor from './EmailTemplateEditor';
+import logo from '../assets/logo.png';
import { PromptTemplate, EmailTemplate } from '../App';
@@ -382,6 +383,61 @@ const Settings: React.FC
= ({ apiKey, productId, prompts, savePat
+
+
📸 Branding
+
+
+
Custom Logo
+
Upload your company logo to replace the default Livtec branding throughout the app.
+
+
+ {/* Logo Preview */}
+
+
+
)
+
+
+
+
+
+
+
Supported: PNG, JPG, SVG. Recommended: Square format, transparent background.
+
+
+