Looks better now

This commit is contained in:
2026-01-26 01:02:18 +01:00
parent 87fd59549d
commit 4ae8e28b2f
4 changed files with 66 additions and 45 deletions

View File

@@ -129,8 +129,7 @@ impl SequencerSnapshot {
pub struct SequencerHandle { pub struct SequencerHandle {
pub cmd_tx: Sender<SeqCommand>, pub cmd_tx: Sender<SeqCommand>,
pub audio_tx: Sender<AudioCommand>, pub audio_tx: Arc<ArcSwap<Sender<AudioCommand>>>,
pub audio_rx: Receiver<AudioCommand>,
shared_state: Arc<ArcSwap<SharedSequencerState>>, shared_state: Arc<ArcSwap<SharedSequencerState>>,
thread: JoinHandle<()>, thread: JoinHandle<()>,
} }
@@ -146,6 +145,12 @@ impl SequencerHandle {
} }
} }
pub fn swap_audio_channel(&self) -> Receiver<AudioCommand> {
let (new_tx, new_rx) = bounded::<AudioCommand>(256);
self.audio_tx.store(Arc::new(new_tx));
new_rx
}
pub fn shutdown(self) { pub fn shutdown(self) {
let _ = self.cmd_tx.send(SeqCommand::Shutdown); let _ = self.cmd_tx.send(SeqCommand::Shutdown);
let _ = self.thread.join(); let _ = self.thread.join();
@@ -186,20 +191,21 @@ pub fn spawn_sequencer(
rng: Rng, rng: Rng,
quantum: f64, quantum: f64,
live_keys: Arc<LiveKeyState>, live_keys: Arc<LiveKeyState>,
) -> SequencerHandle { ) -> (SequencerHandle, Receiver<AudioCommand>) {
let (cmd_tx, cmd_rx) = bounded::<SeqCommand>(64); let (cmd_tx, cmd_rx) = bounded::<SeqCommand>(64);
let (audio_tx, audio_rx) = bounded::<AudioCommand>(256); let (audio_tx, audio_rx) = bounded::<AudioCommand>(256);
let audio_tx = Arc::new(ArcSwap::from_pointee(audio_tx));
let shared_state = Arc::new(ArcSwap::from_pointee(SharedSequencerState::default())); let shared_state = Arc::new(ArcSwap::from_pointee(SharedSequencerState::default()));
let shared_state_clone = Arc::clone(&shared_state); let shared_state_clone = Arc::clone(&shared_state);
let audio_tx_clone = audio_tx.clone(); let audio_tx_for_thread = Arc::clone(&audio_tx);
let thread = thread::Builder::new() let thread = thread::Builder::new()
.name("sequencer".into()) .name("sequencer".into())
.spawn(move || { .spawn(move || {
sequencer_loop( sequencer_loop(
cmd_rx, cmd_rx,
audio_tx_clone, audio_tx_for_thread,
link, link,
playing, playing,
variables, variables,
@@ -212,13 +218,13 @@ pub fn spawn_sequencer(
}) })
.expect("Failed to spawn sequencer thread"); .expect("Failed to spawn sequencer thread");
SequencerHandle { let handle = SequencerHandle {
cmd_tx, cmd_tx,
audio_tx, audio_tx,
audio_rx,
shared_state, shared_state,
thread, thread,
} };
(handle, audio_rx)
} }
struct PatternCache { struct PatternCache {
@@ -294,7 +300,7 @@ impl RunsCounter {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
fn sequencer_loop( fn sequencer_loop(
cmd_rx: Receiver<SeqCommand>, cmd_rx: Receiver<SeqCommand>,
audio_tx: Sender<AudioCommand>, audio_tx: Arc<ArcSwap<Sender<AudioCommand>>>,
link: Arc<LinkState>, link: Arc<LinkState>,
playing: Arc<std::sync::atomic::AtomicBool>, playing: Arc<std::sync::atomic::AtomicBool>,
variables: Variables, variables: Variables,
@@ -430,7 +436,7 @@ fn sequencer_loop(
std::mem::take(&mut trace), std::mem::take(&mut trace),
); );
for cmd in cmds { for cmd in cmds {
match audio_tx.try_send(AudioCommand::Evaluate(cmd)) { match audio_tx.load().try_send(AudioCommand::Evaluate(cmd)) {
Ok(()) => { Ok(()) => {
event_count += 1; event_count += 1;
} }
@@ -438,7 +444,9 @@ fn sequencer_loop(
dropped_events += 1; dropped_events += 1;
} }
Err(TrySendError::Disconnected(_)) => { Err(TrySendError::Disconnected(_)) => {
return; // Channel disconnected means old stream is gone, but
// a new one will be swapped in. Don't exit - just skip.
dropped_events += 1;
} }
} }
} }

View File

@@ -1,3 +1,4 @@
use arc_swap::ArcSwap;
use crossbeam_channel::Sender; use crossbeam_channel::Sender;
use crossterm::event::{Event, KeyCode, KeyEvent, KeyEventKind, KeyModifiers}; use crossterm::event::{Event, KeyCode, KeyEvent, KeyEventKind, KeyModifiers};
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
@@ -21,7 +22,7 @@ pub struct InputContext<'a> {
pub link: &'a LinkState, pub link: &'a LinkState,
pub snapshot: &'a SequencerSnapshot, pub snapshot: &'a SequencerSnapshot,
pub playing: &'a Arc<AtomicBool>, pub playing: &'a Arc<AtomicBool>,
pub audio_tx: &'a Sender<AudioCommand>, pub audio_tx: &'a ArcSwap<Sender<AudioCommand>>,
} }
impl<'a> InputContext<'a> { impl<'a> InputContext<'a> {
@@ -362,7 +363,7 @@ fn handle_modal_input(ctx: &mut InputContext, key: KeyEvent) -> InputResult {
if let Some(path) = sample_path { if let Some(path) = sample_path {
let index = doux::loader::scan_samples_dir(&path); let index = doux::loader::scan_samples_dir(&path);
let count = index.len(); let count = index.len();
let _ = ctx.audio_tx.send(AudioCommand::LoadSamples(index)); let _ = ctx.audio_tx.load().send(AudioCommand::LoadSamples(index));
ctx.app.audio.config.sample_count += count; ctx.app.audio.config.sample_count += count;
ctx.app.audio.add_sample_path(path); ctx.app.audio.add_sample_path(path);
ctx.dispatch(AppCommand::SetStatus(format!("Added {count} samples"))); ctx.dispatch(AppCommand::SetStatus(format!("Added {count} samples")));
@@ -515,7 +516,7 @@ fn handle_panel_input(ctx: &mut InputContext, key: KeyEvent) -> InputResult {
let idx = entry.index; let idx = entry.index;
let cmd = let cmd =
format!("/sound/{folder}/n/{idx}/gain/0.5/dur/1"); format!("/sound/{folder}/n/{idx}/gain/0.5/dur/1");
let _ = ctx.audio_tx.send(AudioCommand::Evaluate(cmd)); let _ = ctx.audio_tx.load().send(AudioCommand::Evaluate(cmd));
} }
_ => state.toggle_expand(), _ => state.toggle_expand(),
} }
@@ -846,15 +847,16 @@ fn handle_engine_page(ctx: &mut InputContext, key: KeyEvent) -> InputResult {
} }
} }
KeyCode::Char('h') => { KeyCode::Char('h') => {
let _ = ctx.audio_tx.send(AudioCommand::Hush); let _ = ctx.audio_tx.load().send(AudioCommand::Hush);
} }
KeyCode::Char('p') => { KeyCode::Char('p') => {
let _ = ctx.audio_tx.send(AudioCommand::Panic); let _ = ctx.audio_tx.load().send(AudioCommand::Panic);
} }
KeyCode::Char('r') => ctx.app.metrics.peak_voices = 0, KeyCode::Char('r') => ctx.app.metrics.peak_voices = 0,
KeyCode::Char('t') => { KeyCode::Char('t') => {
let _ = ctx let _ = ctx
.audio_tx .audio_tx
.load()
.send(AudioCommand::Evaluate("/sound/sine/dur/0.5/decay/0.2".into())); .send(AudioCommand::Evaluate("/sound/sine/dur/0.5/decay/0.2".into()));
} }
_ => {} _ => {}
@@ -986,7 +988,7 @@ fn load_project_samples(ctx: &mut InputContext) {
let index = doux::loader::scan_samples_dir(path); let index = doux::loader::scan_samples_dir(path);
let count = index.len(); let count = index.len();
total_count += count; total_count += count;
let _ = ctx.audio_tx.send(AudioCommand::LoadSamples(index)); let _ = ctx.audio_tx.load().send(AudioCommand::LoadSamples(index));
} }
} }

View File

@@ -99,7 +99,7 @@ fn main() -> io::Result<()> {
initial_samples.extend(index); initial_samples.extend(index);
} }
let sequencer = spawn_sequencer( let (sequencer, initial_audio_rx) = spawn_sequencer(
Arc::clone(&link), Arc::clone(&link),
Arc::clone(&playing), Arc::clone(&playing),
Arc::clone(&app.variables), Arc::clone(&app.variables),
@@ -118,7 +118,7 @@ fn main() -> io::Result<()> {
let (mut _stream, mut _analysis_handle) = match build_stream( let (mut _stream, mut _analysis_handle) = match build_stream(
&stream_config, &stream_config,
sequencer.audio_rx.clone(), initial_audio_rx,
Arc::clone(&scope_buffer), Arc::clone(&scope_buffer),
Arc::clone(&spectrum_buffer), Arc::clone(&spectrum_buffer),
Arc::clone(&metrics), Arc::clone(&metrics),
@@ -149,6 +149,8 @@ fn main() -> io::Result<()> {
_stream = None; _stream = None;
_analysis_handle = None; _analysis_handle = None;
let new_audio_rx = sequencer.swap_audio_channel();
let new_config = AudioStreamConfig { let new_config = AudioStreamConfig {
output_device: app.audio.config.output_device.clone(), output_device: app.audio.config.output_device.clone(),
channels: app.audio.config.channels, channels: app.audio.config.channels,
@@ -165,7 +167,7 @@ fn main() -> io::Result<()> {
match build_stream( match build_stream(
&new_config, &new_config,
sequencer.audio_rx.clone(), new_audio_rx,
Arc::clone(&scope_buffer), Arc::clone(&scope_buffer),
Arc::clone(&spectrum_buffer), Arc::clone(&spectrum_buffer),
Arc::clone(&metrics), Arc::clone(&metrics),

View File

@@ -9,6 +9,9 @@ use crate::app::App;
use crate::state::{DeviceKind, EngineSection, SettingKind}; use crate::state::{DeviceKind, EngineSection, SettingKind};
use crate::widgets::{Orientation, Scope, Spectrum}; use crate::widgets::{Orientation, Scope, Spectrum};
const HEADER_COLOR: Color = Color::Rgb(100, 160, 180);
const DIVIDER_COLOR: Color = Color::Rgb(60, 65, 70);
pub fn render(frame: &mut Frame, app: &App, area: Rect) { pub fn render(frame: &mut Frame, app: &App, area: Rect) {
let [left_col, _, right_col] = Layout::horizontal([ let [left_col, _, right_col] = Layout::horizontal([
Constraint::Percentage(55), Constraint::Percentage(55),
@@ -42,9 +45,9 @@ fn render_settings_section(frame: &mut Frame, app: &App, area: Rect) {
let [devices_area, _, settings_area, _, samples_area] = Layout::vertical([ let [devices_area, _, settings_area, _, samples_area] = Layout::vertical([
Constraint::Length(devices_height), Constraint::Length(devices_height),
Constraint::Length(1), Constraint::Length(1),
Constraint::Length(6), Constraint::Length(7),
Constraint::Length(1), Constraint::Length(1),
Constraint::Min(5), Constraint::Min(6),
]) ])
.areas(padded); .areas(padded);
@@ -109,23 +112,39 @@ fn list_height(item_count: usize) -> u16 {
fn devices_section_height(app: &App) -> u16 { fn devices_section_height(app: &App) -> u16 {
let output_h = list_height(app.audio.output_devices.len()); let output_h = list_height(app.audio.output_devices.len());
let input_h = list_height(app.audio.input_devices.len()); let input_h = list_height(app.audio.input_devices.len());
2 + output_h.max(input_h) 3 + output_h.max(input_h)
}
fn render_section_header(frame: &mut Frame, title: &str, focused: bool, area: Rect) {
let [header_area, divider_area] = Layout::vertical([
Constraint::Length(1),
Constraint::Length(1),
]).areas(area);
let header_style = if focused {
Style::new().fg(Color::Yellow).add_modifier(Modifier::BOLD)
} else {
Style::new().fg(HEADER_COLOR).add_modifier(Modifier::BOLD)
};
frame.render_widget(Paragraph::new(title).style(header_style), header_area);
let divider = "".repeat(area.width as usize);
frame.render_widget(
Paragraph::new(divider).style(Style::new().fg(DIVIDER_COLOR)),
divider_area,
);
} }
fn render_devices(frame: &mut Frame, app: &App, area: Rect) { fn render_devices(frame: &mut Frame, app: &App, area: Rect) {
let section_focused = app.audio.section == EngineSection::Devices; let section_focused = app.audio.section == EngineSection::Devices;
let header_style = if section_focused {
Style::new().fg(Color::Yellow).add_modifier(Modifier::BOLD)
} else {
Style::new().fg(Color::Rgb(100, 160, 180)).add_modifier(Modifier::BOLD)
};
let [header_area, content_area] = Layout::vertical([ let [header_area, content_area] = Layout::vertical([
Constraint::Length(1), Constraint::Length(2),
Constraint::Min(1), Constraint::Min(1),
]).areas(area); ]).areas(area);
frame.render_widget(Paragraph::new("Devices").style(header_style), header_area); render_section_header(frame, "DEVICES", section_focused, header_area);
let [output_col, separator, input_col] = Layout::horizontal([ let [output_col, separator, input_col] = Layout::horizontal([
Constraint::Percentage(48), Constraint::Percentage(48),
@@ -206,16 +225,11 @@ fn render_device_column(
fn render_settings(frame: &mut Frame, app: &App, area: Rect) { fn render_settings(frame: &mut Frame, app: &App, area: Rect) {
let section_focused = app.audio.section == EngineSection::Settings; let section_focused = app.audio.section == EngineSection::Settings;
let header_style = if section_focused {
Style::new().fg(Color::Yellow).add_modifier(Modifier::BOLD)
} else {
Style::new().fg(Color::Rgb(100, 160, 180)).add_modifier(Modifier::BOLD)
};
let [header_area, content_area] = let [header_area, content_area] =
Layout::vertical([Constraint::Length(1), Constraint::Min(1)]).areas(area); Layout::vertical([Constraint::Length(2), Constraint::Min(1)]).areas(area);
frame.render_widget(Paragraph::new("Settings").style(header_style), header_area); render_section_header(frame, "SETTINGS", section_focused, header_area);
let highlight = Style::new().fg(Color::Yellow).add_modifier(Modifier::BOLD); let highlight = Style::new().fg(Color::Yellow).add_modifier(Modifier::BOLD);
let normal = Style::new().fg(Color::White); let normal = Style::new().fg(Color::White);
@@ -269,14 +283,9 @@ fn render_settings(frame: &mut Frame, app: &App, area: Rect) {
fn render_samples(frame: &mut Frame, app: &App, area: Rect) { fn render_samples(frame: &mut Frame, app: &App, area: Rect) {
let section_focused = app.audio.section == EngineSection::Samples; let section_focused = app.audio.section == EngineSection::Samples;
let header_style = if section_focused {
Style::new().fg(Color::Yellow).add_modifier(Modifier::BOLD)
} else {
Style::new().fg(Color::Rgb(100, 160, 180)).add_modifier(Modifier::BOLD)
};
let [header_area, content_area, _, hint_area] = Layout::vertical([ let [header_area, content_area, _, hint_area] = Layout::vertical([
Constraint::Length(1), Constraint::Length(2),
Constraint::Min(1), Constraint::Min(1),
Constraint::Length(1), Constraint::Length(1),
Constraint::Length(1), Constraint::Length(1),
@@ -285,8 +294,8 @@ fn render_samples(frame: &mut Frame, app: &App, area: Rect) {
let path_count = app.audio.config.sample_paths.len(); let path_count = app.audio.config.sample_paths.len();
let sample_count = app.audio.config.sample_count; let sample_count = app.audio.config.sample_count;
let header_text = format!("Samples {path_count} paths · {sample_count} indexed"); let header_text = format!("SAMPLES {path_count} paths · {sample_count} indexed");
frame.render_widget(Paragraph::new(header_text).style(header_style), header_area); render_section_header(frame, &header_text, section_focused, header_area);
let dim = Style::new().fg(Color::Rgb(80, 85, 95)); let dim = Style::new().fg(Color::Rgb(80, 85, 95));
let path_style = Style::new().fg(Color::Rgb(120, 125, 135)); let path_style = Style::new().fg(Color::Rgb(120, 125, 135));