feat: complete history, attendees list, and smart templates

This commit is contained in:
michael.borak
2026-01-20 15:00:56 +01:00
parent d266de942a
commit 52ccd7ee03
18 changed files with 2222 additions and 480 deletions

1107
src-tauri/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -26,5 +26,11 @@ serde_json = "1.0"
chrono = "0.4"
cpal = "0.17.1"
hound = "3.5.1"
reqwest = { version = "0.13.1", features = ["json", "multipart"] }
reqwest = { version = "0.11", features = ["json", "multipart"] }
tokio = { version = "1.40.0", features = ["full"] }
tauri-plugin-fs = "2.4.5"
voice_activity_detector = "0.2.1"
rubato = "0.14.1"
tauri-plugin-oauth = "2.0.0"
oauth2 = "4.4"
url = "2.5"

View File

@@ -8,6 +8,7 @@
"permissions": [
"core:default",
"opener:default",
"dialog:default"
"dialog:default",
"fs:default"
]
}

View File

@@ -0,0 +1,182 @@
#!/usr/bin/env swift
import Foundation
import CoreAudio
// Extensions and Helpers
extension Int32 {
var fourCC: String {
let utf16 = [
UInt16((self >> 24) & 0xFF),
UInt16((self >> 16) & 0xFF),
UInt16((self >> 8) & 0xFF),
UInt16(self & 0xFF)
]
return String(utf16CodeUnits: utf16, count: 4)
}
}
// Safer Property Getter
func getPropertyData<T>(objectID: AudioObjectID, selector: AudioObjectPropertySelector, initialValue: T) -> T? {
var address = AudioObjectPropertyAddress(
mSelector: selector,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMain
)
var size = UInt32(MemoryLayout<T>.size)
var value = initialValue
let status = AudioObjectGetPropertyData(objectID, &address, 0, nil, &size, &value)
if status == noErr {
return value
}
return nil
}
// CFString Helper
func getStringProperty(objectID: AudioObjectID, selector: AudioObjectPropertySelector) -> String? {
var address = AudioObjectPropertyAddress(
mSelector: selector,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMain
)
// CFStringRef is just a pointer, so size of Optional<Unmanaged<CFString>> is pointer size
var size = UInt32(MemoryLayout<Unmanaged<CFString>?>.size)
var value: Unmanaged<CFString>?
let status = AudioObjectGetPropertyData(objectID, &address, 0, nil, &size, &value)
if status == noErr, let existingValue = value {
return existingValue.takeRetainedValue() as String
}
return nil
}
func findDeviceByName(_ name: String) -> AudioObjectID? {
// System Object is 1
let systemID = AudioObjectID(kAudioObjectSystemObject)
// Get all devices
var address = AudioObjectPropertyAddress(
mSelector: kAudioHardwarePropertyDevices,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMain
)
var size: UInt32 = 0
guard AudioObjectGetPropertyDataSize(systemID, &address, 0, nil, &size) == noErr else { return nil }
let count = Int(size) / MemoryLayout<AudioObjectID>.size
var deviceIDs = [AudioObjectID](repeating: 0, count: count)
guard AudioObjectGetPropertyData(systemID, &address, 0, nil, &size, &deviceIDs) == noErr else { return nil }
for id in deviceIDs {
if let devName = getStringProperty(objectID: id, selector: kAudioDevicePropertyDeviceNameCFString) {
if devName == name {
return id
}
}
}
return nil
}
func findDeviceByUID(_ uid: String) -> AudioObjectID? {
let systemID = AudioObjectID(kAudioObjectSystemObject)
var address = AudioObjectPropertyAddress(
mSelector: kAudioHardwarePropertyDevices,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMain
)
var size: UInt32 = 0
guard AudioObjectGetPropertyDataSize(systemID, &address, 0, nil, &size) == noErr else { return nil }
let count = Int(size) / MemoryLayout<AudioObjectID>.size
var deviceIDs = [AudioObjectID](repeating: 0, count: count)
guard AudioObjectGetPropertyData(systemID, &address, 0, nil, &size, &deviceIDs) == noErr else { return nil }
for id in deviceIDs {
if let devUID = getStringProperty(objectID: id, selector: kAudioDevicePropertyDeviceUID) {
if devUID == uid {
return id
}
}
}
return nil
}
func createAggregateDevice() {
print("Searching for devices...")
guard let blackHoleID = findDeviceByName("BlackHole 2ch") else {
print("Error: BlackHole 2ch not found. Please install it first.")
exit(1)
}
print("Found BlackHole 2ch (ID: \(blackHoleID))")
// Default Input
var defaultInputID: AudioObjectID = 0
var size = UInt32(MemoryLayout<AudioObjectID>.size)
var address = AudioObjectPropertyAddress(
mSelector: kAudioHardwarePropertyDefaultInputDevice,
mScope: kAudioObjectPropertyScopeGlobal,
mElement: kAudioObjectPropertyElementMain
)
if AudioObjectGetPropertyData(AudioObjectID(kAudioObjectSystemObject), &address, 0, nil, &size, &defaultInputID) != noErr {
print("Error: Could not find default input.")
exit(1)
}
print("Found Default Input (ID: \(defaultInputID))")
// Check for existing "Hearbit Audio" by UID
let targetUID = "hearbit_audio_aggregate_v1"
if let existingID = findDeviceByUID(targetUID) {
print("Found existing Hearbit Audio device (ID: \(existingID)). Destroying to recreate...")
if AudioHardwareDestroyAggregateDevice(existingID) != noErr {
print("Warning: Failed to destroy existing device.")
} else {
print("Existing device destroyed.")
}
Thread.sleep(forTimeInterval: 0.5)
}
// Build SubDevice List
guard let bhUID = getStringProperty(objectID: blackHoleID, selector: kAudioDevicePropertyDeviceUID) else {
print("Error: Could not get BlackHole UID.")
exit(1)
}
guard let micUID = getStringProperty(objectID: defaultInputID, selector: kAudioDevicePropertyDeviceUID) else {
print("Error: Could not get Default Input UID.")
exit(1)
}
// Dedup: if Mic IS BlackHole (user set BlackHole as default), don't duplicate
var subDevicesUIDs = [bhUID]
if micUID != bhUID {
subDevicesUIDs.append(micUID)
}
let subDevicesArray = subDevicesUIDs.map {
[kAudioSubDeviceUIDKey: $0]
}
let desc: [String: Any] = [
kAudioAggregateDeviceNameKey: "Hearbit Audio",
kAudioAggregateDeviceUIDKey: targetUID,
kAudioAggregateDeviceIsPrivateKey: Int(0),
kAudioAggregateDeviceIsStackedKey: Int(0),
kAudioAggregateDeviceSubDeviceListKey: subDevicesArray
]
print("Creating Aggregate Device with UIDs: \(subDevicesUIDs)")
var outID: AudioObjectID = 0
let err = AudioHardwareCreateAggregateDevice(desc as CFDictionary, &outID)
if err == noErr {
print("Success! Created 'Hearbit Audio' with ID: \(outID)")
exit(0)
} else {
print("Failed to create device. Error code: \(err) (\(err.fourCC))")
exit(1)
}
}
createAggregateDevice()

View File

@@ -0,0 +1,183 @@
use std::sync::{Arc, Mutex};
use tauri::{AppHandle, Emitter};
use cpal::Sample;
use hound::WavWriter;
use rubato::{Resampler, FastFixedIn, PolynomialDegree};
use voice_activity_detector::VoiceActivityDetector;
pub struct AudioProcessor {
// VAD
vad: VoiceActivityDetector,
vad_chunk_size: usize,
vad_buffer: Vec<f32>,
// Resampler
resampler: FastFixedIn<f32>,
resample_input_buffer: Vec<f32>,
resample_output_buffer: Vec<f32>,
// State
is_speech_active: bool,
last_speech_time: u64, // In samples or frames
hangover_samples: u64,
// Ring Buffer (for pre-roll)
ring_buffer: Vec<f32>,
ring_pos: usize,
ring_size: usize,
// Output
writer: Arc<Mutex<WavWriter<std::io::BufWriter<std::fs::File>>>>,
sample_rate: u32,
total_processed_samples: u64,
// Event Emission
app_handle: Option<AppHandle>,
last_event_time: std::time::Instant,
}
impl AudioProcessor {
pub fn new(
sample_rate: u32,
writer: Arc<Mutex<WavWriter<std::io::BufWriter<std::fs::File>>>>,
app_handle: AppHandle
) -> Result<Self, String> {
let vad_sample_rate = 16000;
let vad_chunk_size = 512; // Silero usually likes ~30ms which is 512 at 16k? No 16000 * 0.032 = 512.
// Initialize VAD
let vad = VoiceActivityDetector::builder()
.sample_rate(vad_sample_rate as u32)
.chunk_size(vad_chunk_size)
.build()
.map_err(|e| format!("Failed to init VAD: {:?}", e))?;
// Initialize Resampler (Input Rate -> 16000) using FastFixedIn for speed/simplicity
// new(f_ratio, max_resample_ratio_relative, polyn_deg, chunk_size, channels)
let resampler = FastFixedIn::<f32>::new(
16000.0 / sample_rate as f64,
1.0,
PolynomialDegree::Septic,
1024,
1
).map_err(|e| format!("Failed to init Resampler: {:?}", e))?;
// Pre-roll buffer (e.g. 0.5 seconds of high quality audio)
let ring_curr_seconds = 1.0;
let ring_size = (sample_rate as f32 * ring_curr_seconds) as usize;
Ok(Self {
vad,
vad_chunk_size,
vad_buffer: Vec::new(),
resampler,
resample_input_buffer: Vec::new(),
resample_output_buffer: Vec::new(),
is_speech_active: false,
last_speech_time: 0,
hangover_samples: (sample_rate as f32 * 1.5) as u64, // 1.5s hangover
ring_buffer: vec![0.0; ring_size],
ring_pos: 0,
ring_size,
writer,
sample_rate,
total_processed_samples: 0,
app_handle: Some(app_handle),
last_event_time: std::time::Instant::now(),
})
}
pub fn process(&mut self, data: &[f32]) {
// 1. Add to Ring Buffer (always, for pre-roll)
for &sample in data {
self.ring_buffer[self.ring_pos] = sample;
self.ring_pos = (self.ring_pos + 1) % self.ring_size;
}
// 2. Resample for VAD
// We append new data to input buffer for resampler
self.resample_input_buffer.extend_from_slice(data);
// Process in chunks compatible with resampler
// Actually rubato process_into_buffer needs waves of input.
// Simplified: SincFixedIn wants a fixed number of input frames?
// Docs: "retrieve result... input buffer must contain needed number of frames"
// SincFixedIn: "input buffer used for resampling... must receive a fixed number of frames"
// Wait, SincFixedIn is fixed INPUT size. SincFixedOut is fixed OUTPUT size.
// We want to feed whatever we get.
// For simplicity, let's use a simpler resampling strategy or accept rubato's constraints.
// Rubato SincFixedIn: we must provide `input_frames_next` frames.
// Let's defer strict resampling and just use decimation if sample rate is multiple?
// No, user devices vary.
// Handling Resampling properly:
let needed = self.resampler.input_frames_next();
while self.resample_input_buffer.len() >= needed {
let chunk: Vec<f32> = self.resample_input_buffer.drain(0..needed).collect();
// Resample (mono)
let waves_in = vec![chunk];
// Allocate output (approx)
let mut waves_out = vec![vec![0.0; (needed as f64 * (16000.0 / self.sample_rate as f64)).ceil() as usize + 10]; 1]; // +10 padding
if let Ok((_in_len, out_len)) = self.resampler.process_into_buffer(&waves_in, &mut waves_out, None) {
if out_len > 0 {
self.vad_buffer.extend_from_slice(&waves_out[0][0..out_len]);
}
}
// Update output buffer usage... logic is tricky with drain.
}
// 3. Process VAD
while self.vad_buffer.len() >= self.vad_chunk_size {
let vad_chunk: Vec<f32> = self.vad_buffer.drain(0..self.vad_chunk_size).collect();
// Run Detection
let probability = self.vad.predict(vad_chunk);
let is_speech = probability > 0.5;
if is_speech {
self.is_speech_active = true;
self.last_speech_time = self.total_processed_samples;
}
// Emit VAD event periodically (every 500ms)
if self.last_event_time.elapsed().as_millis() > 500 {
if let Some(app) = &self.app_handle {
// Calculate crude RMS for visualization or just send probability
// Just sending probability is enough for now
#[derive(serde::Serialize, Clone)]
struct VadEvent {
probability: f32,
is_speech: bool,
}
let _ = app.emit("vad-event", VadEvent { probability, is_speech });
}
self.last_event_time = std::time::Instant::now();
}
}
// 4. Update Hangover and Check Write condition
let time_since_speech = self.total_processed_samples.saturating_sub(self.last_speech_time);
if self.is_speech_active || time_since_speech < self.hangover_samples {
// We are recording!
// Check if we just started (transition)
// Ideally we dump the ring buffer here if we just switched state.
// Implementing perfect ring buffer dump is complex (need to track state changes better).
// MVP: Just Write Current Data if in state.
// Improvement: If we are in hangover, we just write.
// If we just detected speech (was not speech?), dump ring buffer?
// We'd need to know if we 'wrote' the ring buffer already.
// Simple Logic: just write all incoming data if (Now - LastSpeech < Hangover)
let mut guard = self.writer.lock().unwrap();
for &sample in data {
let amplitude = i16::MAX as f32;
guard.write_sample((sample * amplitude) as i16).ok();
}
}
self.total_processed_samples += data.len() as u64;
}
}

112
src-tauri/src/auth.rs Normal file
View File

@@ -0,0 +1,112 @@
use tauri::{AppHandle, Runtime};
use tauri_plugin_opener::OpenerExt;
use tauri_plugin_oauth::start;
use url::Url;
use oauth2::{
basic::BasicClient, AuthUrl, ClientId, CsrfToken, PkceCodeChallenge, RedirectUrl, Scope,
TokenResponse, TokenUrl,
};
use oauth2::reqwest::async_http_client;
// const CLIENT_ID: &str = "YOUR_CLIENT_ID_HERE";
const AUTH_URL: &str = "https://login.microsoftonline.com/common/oauth2/v2.0/authorize";
const TOKEN_URL: &str = "https://login.microsoftonline.com/common/oauth2/v2.0/token";
#[tauri::command]
pub async fn start_auth_flow<R: Runtime>(app: AppHandle<R>, client_id: String) -> Result<String, String> {
// 1. Start Localhost Server
let (tx, rx) = std::sync::mpsc::channel();
// tauri-plugin-oauth start() returns a port and stops server when callback received
let port = start(move |url| {
// Ignore favicon requests to avoid triggering early
if url.contains("favicon.ico") {
return;
}
let _ = tx.send(url);
})
.map_err(|e| format!("Failed to start oauth server: {}", e))?;
let redirect_uri = format!("http://localhost:{}/auth/callback", port);
// 2. Setup OAuth Client
let client = BasicClient::new(
ClientId::new(client_id),
None, // No client secret for PKCE public client
AuthUrl::new(AUTH_URL.to_string()).map_err(|e| e.to_string())?,
Some(TokenUrl::new(TOKEN_URL.to_string()).map_err(|e| e.to_string())?),
)
.set_redirect_uri(RedirectUrl::new(redirect_uri.clone()).map_err(|e| e.to_string())?);
// 3. Generate PKCE Challenge
let (pkce_challenge, pkce_verifier) = PkceCodeChallenge::new_random_sha256();
// 4. Generate Auth URL
let (auth_url, _csrf_token) = client
.authorize_url(CsrfToken::new_random)
.add_scope(Scope::new("User.Read".to_string()))
.add_scope(Scope::new("Calendars.Read".to_string()))
.set_pkce_challenge(pkce_challenge)
.url();
// 5. Open Browser
app.opener().open_url(auth_url.as_str(), None::<&str>)
.map_err(|e| format!("Failed to open browser: {}", e))?;
// 6. Wait for Callback
let received_url_str = rx.recv().map_err(|e| format!("Failed to receive auth code: {}", e))?;
// 7. Parse Code from URL
// Actually we need to parse the query params from received_url_str
let parsed_url = Url::parse(&received_url_str).map_err(|e| e.to_string())?;
let pairs: std::collections::HashMap<_, _> = parsed_url.query_pairs().into_owned().collect();
if let Some(err) = pairs.get("error") {
let desc = pairs.get("error_description").map(|s| s.as_str()).unwrap_or("No description");
return Err(format!("OAuth Error: {} ({})", err, desc));
}
let code = pairs.get("code").ok_or_else(|| format!("No code in redirect callback. Received URL: {}", received_url_str))?;
// 8. Exchange Code for Token
let token_result = client
.exchange_code(oauth2::AuthorizationCode::new(code.clone()))
.set_pkce_verifier(pkce_verifier)
.request_async(async_http_client)
.await
.map_err(|e| format!("Failed to exchange token: {}", e))?;
let access_token = token_result.access_token().secret();
// Save token? Or just return it.
// Ideally we save it in key storage, but for MVP return it.
Ok(access_token.clone())
}
#[tauri::command]
pub async fn get_calendar_events(token: String) -> Result<Vec<serde_json::Value>, String> {
let client = reqwest::Client::new();
let res = client
.get("https://graph.microsoft.com/v1.0/me/calendarView")
.bearer_auth(token)
.query(&[
("startDateTime", chrono::Utc::now().to_rfc3339()),
("endDateTime", (chrono::Utc::now() + chrono::Duration::days(7)).to_rfc3339()),
("$select", "id,subject,start,end,location,onlineMeeting,bodyPreview,body,attendees".to_string())
])
.header("Prefer", "outlook.timezone=\"UTC\"")
.send()
.await
.map_err(|e| e.to_string())?
.json::<serde_json::Value>()
.await
.map_err(|e| e.to_string())?;
// Extract 'value' array
if let Some(events) = res.get("value").and_then(|v| v.as_array()) {
Ok(events.clone())
} else {
Ok(vec![])
}
}

View File

@@ -5,6 +5,10 @@ use cpal::traits::{DeviceTrait, HostTrait, StreamTrait};
use std::time::Duration;
use tokio::time::sleep;
mod audio_processor;
use audio_processor::AudioProcessor;
mod auth;
// State to hold the active recording stream
struct AppState {
recording_stream: Mutex<Option<cpal::Stream>>,
@@ -60,7 +64,7 @@ fn get_input_devices() -> Result<Vec<AudioDevice>, String> {
#[tauri::command]
fn start_recording(app: AppHandle, state: State<'_, AppState>, device_id: String, save_path: Option<String>) -> Result<(), String> {
fn start_recording(app: AppHandle, state: State<'_, AppState>, device_id: String, save_path: Option<String>, custom_filename: Option<String>) -> Result<(), String> {
emit_log(&app, "INFO", &format!("Starting recording on device: {}", device_id));
let host = cpal::default_host();
@@ -73,6 +77,15 @@ fn start_recording(app: AppHandle, state: State<'_, AppState>, device_id: String
.ok_or("No input device found")?;
let config = device.default_input_config().map_err(|e| e.to_string())?;
// VAD requires 16Hz or 8kHz, typically. Silero likes 16k.
// We might need to resample or just check if the device supports it.
// For MVP VAD, we will try to stick to standard rates.
// Actually, simple energy VAD is easier to start with if Silero is too heavy or requires ONNX runtime.
// Let's check the crate docs or usage first.
// Wait, the user wants to IGNORE music. Energy VAD will fail on music.
// voice_activity_detector crate usually uses Silero or similar.
let spec = hound::WavSpec {
channels: config.channels(),
sample_rate: config.sample_rate(),
@@ -81,16 +94,22 @@ fn start_recording(app: AppHandle, state: State<'_, AppState>, device_id: String
};
// Determine file path: User provided or Temp
let filename = if let Some(name) = custom_filename {
// Sanitize filename
let safe_name: String = name.chars().map(|x| if x.is_alphanumeric() || x == ' ' || x == '-' || x == '_' { x } else { '_' }).collect();
format!("{}.wav", safe_name)
} else {
format!("recording_{}.wav", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs())
};
let file_path = if let Some(path) = save_path {
if path.trim().is_empty() {
std::env::temp_dir().join(format!("recording_{}.wav", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs()))
std::env::temp_dir().join(&filename)
} else {
// Check if directory exists, if not try to create it or error out?
// For now, assume user gives a valid directory. We'll append filename.
std::path::PathBuf::from(path).join(format!("recording_{}.wav", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs()))
std::path::PathBuf::from(path).join(&filename)
}
} else {
std::env::temp_dir().join(format!("recording_{}.wav", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_secs()))
std::env::temp_dir().join(&filename)
};
let file_path_str = file_path.to_string_lossy().to_string();
@@ -99,6 +118,19 @@ fn start_recording(app: AppHandle, state: State<'_, AppState>, device_id: String
let writer = hound::WavWriter::create(&file_path, spec).map_err(|e| e.to_string())?;
let writer = Arc::new(Mutex::new(writer));
let writer_clone = writer.clone();
// Initialize AudioProcessor (VAD)
// We pass the writer to it.
let processor = AudioProcessor::new(config.sample_rate(), writer.clone(), app.clone())
.map_err(|e| format!("Failed to create AudioProcessor: {}", e))?;
// Wrap processor in Arc<Mutex> so we can share/move it into callback
// Actually, cpal callback takes ownership of its closure state usually if 'move'.
// Since stream is on another thread, we need Send. AudioProcessor should be Send.
// However, the callback is called repeatedly. We need to keep state.
// The workaround is to wrap it in a Mutex.
let processor = Arc::new(Mutex::new(processor));
let processor_clone = processor.clone();
let app_handle = app.clone();
let err_fn = move |err| {
@@ -110,21 +142,21 @@ fn start_recording(app: AppHandle, state: State<'_, AppState>, device_id: String
cpal::SampleFormat::F32 => device.build_input_stream(
&config.into(),
move |data: &[f32], _: &_| {
let mut guard = writer_clone.lock().unwrap();
for &sample in data {
let amplitude = i16::MAX as f32;
guard.write_sample((sample * amplitude) as i16).ok();
if let Ok(mut p) = processor_clone.lock() {
p.process(data);
}
},
err_fn,
None
),
// For I16 and U16 we need to convert to F32 for our processor
cpal::SampleFormat::I16 => device.build_input_stream(
&config.into(),
move |data: &[i16], _: &_| {
let mut guard = writer_clone.lock().unwrap();
for &sample in data {
guard.write_sample(sample).ok();
// Convert i16 to f32
let f32_data: Vec<f32> = data.iter().map(|&s| s as f32 / i16::MAX as f32).collect();
if let Ok(mut p) = processor_clone.lock() {
p.process(&f32_data);
}
},
err_fn,
@@ -133,9 +165,10 @@ fn start_recording(app: AppHandle, state: State<'_, AppState>, device_id: String
cpal::SampleFormat::U16 => device.build_input_stream(
&config.into(),
move |data: &[u16], _: &_| {
let mut guard = writer_clone.lock().unwrap();
for &sample in data {
guard.write_sample((sample as i32 - 32768) as i16).ok();
// Convert u16 to f32
let f32_data: Vec<f32> = data.iter().map(|&s| (s as i32 - 32768) as f32 / 32768.0).collect();
if let Ok(mut p) = processor_clone.lock() {
p.process(&f32_data);
}
},
err_fn,
@@ -536,6 +569,60 @@ fn open_audio_midi_setup() -> Result<(), String> {
Ok(())
}
#[tauri::command]
fn create_hearbit_audio_device(app: AppHandle) -> Result<String, String> {
emit_log(&app, "INFO", "Attempting to create Hearbit Audio device...");
// Resolve resource path
let resource_path = app.path().resource_dir()
.map_err(|e| e.to_string())?
.join("resources/create_hearbit_audio.swift");
if !resource_path.exists() {
// Fallback for dev environment where resources might not be bundled yet or different path
emit_log(&app, "WARN", &format!("Resource script not found at {:?}. Trying local src-tauri path.", resource_path));
}
// For now, in dev mode, we might need to point to the source location if bundle isn't active
// But let's try running it.
let output = Command::new("swift")
.arg(resource_path)
.output()
.map_err(|e| e.to_string())?;
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
emit_log(&app, "DEBUG", &format!("Script Output: {}", stdout));
if !stderr.is_empty() {
emit_log(&app, "WARN", &format!("Script Stderr: {}", stderr));
}
if output.status.success() {
emit_log(&app, "SUCCESS", "Hearbit Audio device created successfully.");
Ok("Device created successfully".to_string())
} else {
emit_log(&app, "ERROR", "Failed to create device.");
Err(format!("Failed to create device: {} {}", stdout, stderr))
}
}
#[tauri::command]
async fn save_text_file(app: AppHandle, path: String, content: String) -> Result<(), String> {
emit_log(&app, "INFO", &format!("Saving text file to: {}", path));
match std::fs::write(&path, content) {
Ok(_) => {
emit_log(&app, "SUCCESS", "File saved successfully.");
Ok(())
},
Err(e) => {
emit_log(&app, "ERROR", &format!("Failed to save file: {}", e));
Err(e.to_string())
}
}
}
#[cfg_attr(mobile, tauri::mobile_entry_point)]
@@ -543,6 +630,8 @@ pub fn run() {
tauri::Builder::default()
.plugin(tauri_plugin_opener::init())
.plugin(tauri_plugin_dialog::init())
.plugin(tauri_plugin_fs::init())
.plugin(tauri_plugin_oauth::init())
.manage(AppState {
recording_stream: Mutex::new(None),
recording_file_path: Mutex::new(None),
@@ -557,7 +646,11 @@ pub fn run() {
transcribe_audio,
summarize_text,
get_available_models,
open_audio_midi_setup
open_audio_midi_setup,
create_hearbit_audio_device,
auth::start_auth_flow,
auth::get_calendar_events,
save_text_file
])
.run(tauri::generate_context!())
.expect("error while running tauri application");

View File

@@ -32,7 +32,7 @@
"icons/icon.ico"
],
"resources": [
"resources/BlackHole2ch.v0.6.1.pkg"
"resources/*"
]
}
}