// https://github.com/WeirdConstructor/HexoDSP/blob/master/examples/cpal_demo_node_api.rs use cpal::traits::{DeviceTrait, HostTrait, StreamTrait}; use crossbeam_channel::Receiver; use hexodsp::{matrix_repr::MatrixRepr, *}; use std::io::Read; use ticktock::Clock; pub enum AudioMsg { Color([f32; 3]), Jump, } pub fn setup(event_channel: Receiver) { let mut buf = String::new(); std::fs::File::open("assets/init.hxy") .unwrap() .read_to_string(&mut buf) .unwrap(); let matrix_repr: MatrixRepr = MatrixRepr::deserialize(&buf).unwrap(); let (node_conf, node_exec) = new_node_engine(); let mut matrix = Matrix::new(node_conf, 64, 64); matrix.from_repr(&matrix_repr).unwrap(); start_backend(node_exec, move || { let color_mix = NodeId::Mix3(0); let color_mix_r_gain = color_mix.inp_param("gain1").unwrap(); let color_mix_g_gain = color_mix.inp_param("gain2").unwrap(); let color_mix_b_gain = color_mix.inp_param("gain3").unwrap(); let jump_ad = NodeId::Ad(0); let jump_ad_trig = jump_ad.inp_param("trig").unwrap(); for (_tick, _now) in Clock::framerate(10.0).iter() { matrix.set_param(jump_ad_trig, (0.0).into()); if let Ok(msg) = event_channel.try_recv() { match msg { AudioMsg::Color([r, g, b]) => { matrix.set_param(color_mix_r_gain, r.into()); matrix.set_param(color_mix_g_gain, g.into()); matrix.set_param(color_mix_b_gain, b.into()); } AudioMsg::Jump => matrix.set_param(jump_ad_trig, (1.0).into()), } } } }); } pub fn run( device: &cpal::Device, config: &cpal::StreamConfig, mut node_exec: NodeExecutor, mut frontend_loop: F, ) where T: cpal::Sample, { let sample_rate = config.sample_rate.0 as f32; let channels = config.channels as usize; node_exec.set_sample_rate(sample_rate); let input_bufs = [[0.0; hexodsp::dsp::MAX_BLOCK_SIZE]; 2]; let mut outputbufs = [[0.0; hexodsp::dsp::MAX_BLOCK_SIZE]; 2]; let err_fn = |err| eprintln!("an error occurred on stream: {}", err); let stream = device .build_output_stream( config, move |data: &mut [T], _: &cpal::OutputCallbackInfo| { let mut frames_left = data.len() / channels; let mut out_iter = data.chunks_mut(channels); node_exec.process_graph_updates(); while frames_left > 0 { let cur_nframes = if frames_left >= hexodsp::dsp::MAX_BLOCK_SIZE { hexodsp::dsp::MAX_BLOCK_SIZE } else { frames_left }; let input = &[ &input_bufs[0][0..cur_nframes], &input_bufs[1][0..cur_nframes], ]; let split = outputbufs.split_at_mut(1); let mut output = [ &mut ((split.0[0])[0..cur_nframes]), &mut ((split.1[0])[0..cur_nframes]), ]; let mut context = Context { nframes: cur_nframes, output: &mut output[..], input, }; context.output[0].fill(0.0); context.output[1].fill(0.0); node_exec.process(&mut context); for i in 0..cur_nframes { if let Some(frame) = out_iter.next() { let mut ctx_chan = 0; for sample in frame.iter_mut() { let value: T = cpal::Sample::from::(&context.output[ctx_chan][i]); *sample = value; ctx_chan += 1; if ctx_chan > context.output.len() { ctx_chan = context.output.len() - 1; } } } } frames_left -= cur_nframes; } }, err_fn, ) .unwrap(); stream.play().unwrap(); frontend_loop(); } fn start_backend(node_exec: NodeExecutor, frontend_loop: F) { let host = cpal::default_host(); let device = host .default_output_device() .expect("Finding useable audio device"); let config = device .default_output_config() .expect("A workable output config"); match config.sample_format() { cpal::SampleFormat::F32 => run::(&device, &config.into(), node_exec, frontend_loop), cpal::SampleFormat::I16 => run::(&device, &config.into(), node_exec, frontend_loop), cpal::SampleFormat::U16 => run::(&device, &config.into(), node_exec, frontend_loop), }; }