xref: /aosp_15_r20/external/crosvm/win_audio/src/intermediate_resampler_buffer.rs (revision bb4ee6a4ae7042d18b07a98463b9c8b875e44b39)
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::collections::VecDeque;
6 
7 use audio_streams::BoxError;
8 use base::error;
9 use base::info;
10 use base::warn;
11 use winapi::shared::mmreg::SPEAKER_FRONT_LEFT;
12 use winapi::shared::mmreg::SPEAKER_FRONT_RIGHT;
13 
14 use crate::r8b_create;
15 use crate::r8b_delete;
16 use crate::r8b_process;
17 use crate::win_audio_impl;
18 use crate::CR8BResampler;
19 use crate::ER8BResamplerRes_r8brr24;
20 
21 // Increasing this constant won't do much now. In the future, we may want to read from the shm
22 // buffer mulitple times in a row to prevent the chance of us running out of audio frames to write
23 // to the Windows audio engine buffer.
24 const PERIOD_COUNT: usize = 4;
25 pub const STEREO_CHANNEL_COUNT: usize = win_audio_impl::STEREO_CHANNEL_COUNT as usize;
26 const MONO_CHANNEL_COUNT: usize = win_audio_impl::MONO_CHANNEL_COUNT as usize;
27 pub const BYTES_PER_32FLOAT: usize = 4;
28 /// Android audio capture accepts 16bit int, 2 channels.
29 pub const ANDROID_CAPTURE_FRAME_SIZE_BYTES: usize = 4;
30 
31 trait BitDepth {
extend_le_bytes_to_vec(&self, resampled_output_buffer: &mut Vec<u8>)32     fn extend_le_bytes_to_vec(&self, resampled_output_buffer: &mut Vec<u8>);
33 }
34 
35 impl BitDepth for f32 {
extend_le_bytes_to_vec(&self, resampled_output_buffer: &mut Vec<u8>)36     fn extend_le_bytes_to_vec(&self, resampled_output_buffer: &mut Vec<u8>) {
37         resampled_output_buffer.extend_from_slice(&self.to_le_bytes());
38     }
39 }
40 
41 impl BitDepth for i16 {
extend_le_bytes_to_vec(&self, resampled_output_buffer: &mut Vec<u8>)42     fn extend_le_bytes_to_vec(&self, resampled_output_buffer: &mut Vec<u8>) {
43         resampled_output_buffer.extend_from_slice(&self.to_le_bytes());
44     }
45 }
46 
47 struct ResamplerContainer<T: BitDepth> {
48     left_resampler: CR8BResampler,
49     right_resampler: CR8BResampler,
50     ring_buf: VecDeque<T>,
51     resampled_output_buffer: Vec<u8>,
52 }
53 
54 impl<T: BitDepth> ResamplerContainer<T> {
new( from_sample_rate: usize, to_sample_rate: usize, guest_period_in_frames: usize, ring_buf_size: usize, resample_output_buffer_size: usize, ) -> Self55     fn new(
56         from_sample_rate: usize,
57         to_sample_rate: usize,
58         guest_period_in_frames: usize,
59         ring_buf_size: usize,
60         resample_output_buffer_size: usize,
61     ) -> Self {
62         ResamplerContainer {
63             // If the from and to sample rate is the same, there will be a no-op.
64             //
65             // SAFETY: `r8b_create` returns a pointer that will be freed when this struct is
66             // dropped.
67             left_resampler: unsafe {
68                 r8b_create(
69                     from_sample_rate as f64,
70                     to_sample_rate as f64,
71                     guest_period_in_frames as i32,
72                     /* ReqTransBand= */ 2.0,
73                     ER8BResamplerRes_r8brr24,
74                 )
75             },
76             // SAFETY: see above
77             right_resampler: unsafe {
78                 r8b_create(
79                     from_sample_rate as f64,
80                     to_sample_rate as f64,
81                     guest_period_in_frames as i32,
82                     /* ReqTransBand= */ 2.0,
83                     ER8BResamplerRes_r8brr24,
84                 )
85             },
86             ring_buf: VecDeque::with_capacity(ring_buf_size),
87             resampled_output_buffer: Vec::<u8>::with_capacity(resample_output_buffer_size),
88         }
89     }
90 
91     /// Returns true if the next period is available.
get_next_period_internal(&mut self, sample_threshold: usize) -> bool92     fn get_next_period_internal(&mut self, sample_threshold: usize) -> bool {
93         self.resampled_output_buffer.clear();
94 
95         if self.ring_buf.len() >= sample_threshold {
96             for current_sample in self.ring_buf.drain(..sample_threshold) {
97                 current_sample.extend_le_bytes_to_vec(&mut self.resampled_output_buffer);
98             }
99             true
100         } else {
101             false
102         }
103     }
104 
get_next_period_mut(&mut self, sample_threshold: usize) -> Option<&mut Vec<u8>>105     pub fn get_next_period_mut(&mut self, sample_threshold: usize) -> Option<&mut Vec<u8>> {
106         if self.get_next_period_internal(sample_threshold) {
107             Some(&mut self.resampled_output_buffer)
108         } else {
109             None
110         }
111     }
112 
get_next_period(&mut self, sample_threshold: usize) -> Option<&Vec<u8>>113     pub fn get_next_period(&mut self, sample_threshold: usize) -> Option<&Vec<u8>> {
114         self.get_next_period_mut(sample_threshold).map(|r| &*r)
115     }
116 
sample_rate_convert_2_channels<'a>( &mut self, left_channel: &'a mut Vec<f64>, right_channel: &'a mut Vec<f64>, ) -> Option<(&'a [f64], &'a [f64])>117     fn sample_rate_convert_2_channels<'a>(
118         &mut self,
119         left_channel: &'a mut Vec<f64>,
120         right_channel: &'a mut Vec<f64>,
121     ) -> Option<(&'a [f64], &'a [f64])> {
122         let left_channel_converted = self.sample_rate_convert_left_channel(left_channel);
123         let right_channel_converted = self.sample_rate_convert_right_channel(right_channel);
124 
125         let converted = left_channel_converted.zip(right_channel_converted);
126         if let Some((left_channel_converted, right_channel_converted)) = converted {
127             if left_channel_converted.len() != right_channel_converted.len() {
128                 warn!(
129                     "left_samples_avialable: {}, does not match right_samples_avaiable: {}",
130                     left_channel_converted.len(),
131                     right_channel_converted.len(),
132                 );
133             }
134         } else {
135             info!("Skipping adding samples to ring buffer because of SRC priming.");
136         }
137         converted
138     }
139 
sample_rate_convert_left_channel<'a>( &mut self, channel: &'a mut Vec<f64>, ) -> Option<&'a [f64]>140     fn sample_rate_convert_left_channel<'a>(
141         &mut self,
142         channel: &'a mut Vec<f64>,
143     ) -> Option<&'a [f64]> {
144         // SAFETY: `left_sampler` is a valid `CR8Resampler` pointer.
145         unsafe { Self::sample_rate_convert_one_channel(channel, self.left_resampler) }
146     }
147 
sample_rate_convert_right_channel<'a>( &mut self, channel: &'a mut Vec<f64>, ) -> Option<&'a [f64]>148     fn sample_rate_convert_right_channel<'a>(
149         &mut self,
150         channel: &'a mut Vec<f64>,
151     ) -> Option<&'a [f64]> {
152         // SAFETY: `right_sampler` is a valid `CR8Resampler` pointer.
153         unsafe { Self::sample_rate_convert_one_channel(channel, self.right_resampler) }
154     }
155 
156     /// # Safety
157     ///
158     /// This is safe if:
159     ///   1. `resampler` is a valid pointer to the resampler object.
160     ///   2. `r8b_process` sets `converted_buffer_raw` to point to a valid buffer and
161     ///      `samples_available` is accurate.
162     ///   3. `channel` remains alive when `converted_buffer_raw` is being processed.
163     ///      `converted_buffer_raw` could point to the input `channel.as_mut_ptr()`. This is why the
164     ///      param `channel` is passed as a reference instead of the vector being moved in here.
sample_rate_convert_one_channel( channel: &mut Vec<f64>, resampler: CR8BResampler, ) -> Option<&[f64]>165     unsafe fn sample_rate_convert_one_channel(
166         channel: &mut Vec<f64>,
167         resampler: CR8BResampler,
168     ) -> Option<&[f64]> {
169         let mut converted_buffer_raw: *mut f64 = std::ptr::null_mut();
170 
171         let samples_available = r8b_process(
172             resampler,
173             channel.as_mut_ptr(),
174             channel.len() as i32,
175             &mut converted_buffer_raw,
176         );
177         if samples_available != 0 {
178             let channel_converted =
179                 std::slice::from_raw_parts(converted_buffer_raw, samples_available as usize);
180             Some(channel_converted)
181         } else {
182             None
183         }
184     }
185 }
186 
187 impl<T: BitDepth> Drop for ResamplerContainer<T> {
drop(&mut self)188     fn drop(&mut self) {
189         // SAFETY: This is calling to a FFI that was binded properly. Also
190         // `left_resampler` and `right_resampler` are instantiated in the contructor.
191         unsafe {
192             if !self.left_resampler.is_null() {
193                 r8b_delete(self.left_resampler);
194             }
195             if !self.right_resampler.is_null() {
196                 r8b_delete(self.right_resampler);
197             }
198         }
199     }
200 }
201 
202 /// Provides a ring buffer to hold audio samples coming from the guest. Also responsible for sample
203 /// rate conversion (src) if needed. We are assuming the guest's sample format is ALWAYS 16bit
204 /// ints, 48kHz, and 2 channels because this is defined in Kiwi's Android Audio HAL, which
205 /// we control. We are also assuming that the audio engine will always take 32bit
206 /// floats if we ask for the shared format through `GetMixFormat` since it will convert
207 /// to 32bit floats if it's not anyways.
208 pub struct PlaybackResamplerBuffer {
209     resampler_container: ResamplerContainer<f32>,
210     pub shared_audio_engine_period_in_frames: usize,
211     // The guest period in frames when converted to the audio engine's sample rate.
212     pub guest_period_in_target_sample_rate_frames: usize,
213     num_channels: usize,
214     // Set to true if the resampler is priming. Priming means that the resampler needs to read in
215     // multiple periods of audio samples in order to determine how to best sample rate convert.
216     pub is_priming: bool,
217 }
218 
219 impl PlaybackResamplerBuffer {
new( from_sample_rate: usize, to_sample_rate: usize, guest_period_in_frames: usize, shared_audio_engine_period_in_frames: usize, num_channels: usize, channel_mask: Option<u32>, ) -> Result<Self, BoxError>220     pub fn new(
221         from_sample_rate: usize,
222         to_sample_rate: usize,
223         guest_period_in_frames: usize,
224         shared_audio_engine_period_in_frames: usize,
225         num_channels: usize,
226         channel_mask: Option<u32>,
227     ) -> Result<Self, BoxError> {
228         // Convert the period to milliseconds. Even though rounding happens, it shouldn't distort
229         // the result.
230         // Unit would look like: (frames * 1000(milliseconds/second) / (frames/second))
231         // so end units is in milliseconds.
232         if (shared_audio_engine_period_in_frames * 1000) / to_sample_rate < 10 {
233             warn!("Windows Audio Engine period is less than 10ms");
234         }
235         // Divide by 100 because we want to get the # of frames in 10ms since that is the guest's
236         // period.
237         let guest_period_in_target_sample_rate_frames = to_sample_rate / 100;
238 
239         soft_check_channel_mask(channel_mask);
240 
241         // Size chosen since it's a power of 2 minus 1. Anecdotally, this is the max capacity
242         // the VecDeque has reached during runtime.
243         let ring_buf_size = shared_audio_engine_period_in_frames * PERIOD_COUNT;
244         // Each frame will have 64 bits, or 8 bytes.
245         let resampled_output_buffer_size = shared_audio_engine_period_in_frames * 8;
246         Ok(PlaybackResamplerBuffer {
247             resampler_container: ResamplerContainer::<f32>::new(
248                 from_sample_rate,
249                 to_sample_rate,
250                 guest_period_in_frames,
251                 ring_buf_size,
252                 resampled_output_buffer_size,
253             ),
254             shared_audio_engine_period_in_frames,
255             guest_period_in_target_sample_rate_frames,
256             num_channels,
257             is_priming: false,
258         })
259     }
260 
261     /// Converts the 16 bit int samples to the target sample rate and also add to the
262     /// intermediate `ring_buf` if needed.
263     ///
264     /// Returns `true` if the resampler is priming.
convert_and_add(&mut self, input_buffer: &[u8])265     pub fn convert_and_add(&mut self, input_buffer: &[u8]) {
266         if input_buffer.len() % 4 != 0 {
267             warn!("input buffer len {} not divisible by 4", input_buffer.len());
268         }
269         let mut left_channel = vec![0.0; input_buffer.len() / 4];
270         let mut right_channel = vec![0.0; input_buffer.len() / 4];
271         self.copy_every_other_and_convert_to_float(input_buffer, &mut left_channel, 0);
272         self.copy_every_other_and_convert_to_float(input_buffer, &mut right_channel, 2);
273 
274         let (left_channel_converted, right_channel_converted) = match self
275             .resampler_container
276             .sample_rate_convert_2_channels(&mut left_channel, &mut right_channel)
277         {
278             Some((left_channel_converted, right_channel_converted)) => {
279                 (left_channel_converted, right_channel_converted)
280             }
281             // If no audio samples are returned, then the resampler is priming.
282             None => {
283                 self.is_priming = true;
284                 return;
285             }
286         };
287 
288         // As mentioned above, we are assuming that guest's format is 16bits int. A 16 bit int
289         // format gives a range from −32,768 to 32,767. To convert audio samples from int to float,
290         // we need to convert it to a range from -1.0 to 1.0, hence dividing by 32767 (2^15 - 1).
291         for (left_sample, right_sample) in left_channel_converted
292             .iter()
293             .zip(right_channel_converted.iter())
294         {
295             let left_normalized_sample = *left_sample as f32 / i16::MAX as f32;
296             let right_normalized_sample = *right_sample as f32 / i16::MAX as f32;
297 
298             self.perform_channel_conversion(left_normalized_sample, right_normalized_sample);
299         }
300 
301         // The resampler is not priming, since audio samples were returned.
302         self.is_priming = false;
303     }
304 
perform_channel_conversion( &mut self, left_normalized_sample: f32, right_normalized_sample: f32, )305     fn perform_channel_conversion(
306         &mut self,
307         left_normalized_sample: f32,
308         right_normalized_sample: f32,
309     ) {
310         match self.num_channels {
311             STEREO_CHANNEL_COUNT => {
312                 self.resampler_container
313                     .ring_buf
314                     .push_back(left_normalized_sample);
315                 self.resampler_container
316                     .ring_buf
317                     .push_back(right_normalized_sample);
318             }
319             MONO_CHANNEL_COUNT => {
320                 self.resampler_container
321                     .ring_buf
322                     .push_back((left_normalized_sample + right_normalized_sample) / 2.0);
323             }
324             _ => {
325                 // This will put the `left_normalized_sample` in SPEAKER_FRONT_LEFT and the
326                 // `right_normalized_sample` in SPEAKER_FRONT_RIGHT and then zero out the rest.
327                 self.resampler_container
328                     .ring_buf
329                     .push_back(left_normalized_sample);
330                 self.resampler_container
331                     .ring_buf
332                     .push_back(right_normalized_sample);
333                 for _ in 0..self.num_channels - 2 {
334                     self.resampler_container.ring_buf.push_back(0.0);
335                 }
336             }
337         }
338     }
339 
get_next_period(&mut self) -> Option<&Vec<u8>>340     pub fn get_next_period(&mut self) -> Option<&Vec<u8>> {
341         // This value is equal to one full audio engine period of audio frames.
342         let sample_threshold = self.shared_audio_engine_period_in_frames * self.num_channels;
343         self.resampler_container.get_next_period(sample_threshold)
344     }
345 
346     /// Seperates the audio samples by channels
347     ///
348     /// Audio samples coming from the guest are formatted similarly to how WAV files are formatted:
349     /// http://soundfile.sapp.org/doc/WaveFormat/
350     ///
351     /// Audio samples from the guest are coming in as little endian format. Example:
352     /// Channel: [  L  ] [  R  ] [ L   ] [   R   ]
353     /// [u8]:    [14, 51, 45, 0, 23, 234, 123, 15]
354     /// [i16]:   [13070] [ 45  ] [-5609] [ 3963  ]
355     ///
356     /// Sample rate conversion samples as floats.
copy_every_other_and_convert_to_float(&self, source: &[u8], dest: &mut [f64], start: usize)357     fn copy_every_other_and_convert_to_float(&self, source: &[u8], dest: &mut [f64], start: usize) {
358         for (dest_index, x) in (start..source.len()).step_by(4).enumerate() {
359             let sample_value = source[x] as i16 + ((source[x + 1] as i16) << 8);
360             dest[dest_index] = sample_value.into();
361         }
362     }
363 
ring_buf_len(&self) -> usize364     pub fn ring_buf_len(&self) -> usize {
365         self.resampler_container.ring_buf.len()
366     }
367 }
368 
369 /// Similar to `ResamplerBuffer` except for audio capture. This structure assumes:
370 ///
371 /// 1. That the format coming from the Window's audio enginer will be a 32bit float, any sample
372 ///    rate, and any number of channels.
373 /// 2. The format Android requires is always 16bit int, 48kHz, and 2 channels.
374 pub struct CaptureResamplerBuffer {
375     resampler_container: ResamplerContainer<i16>,
376     // Minimum required size of samples in `ResamplerContainer` for it to be drained.
377     pub sample_threshold: usize,
378     pub shared_audio_engine_channels: usize,
379 }
380 
381 impl CaptureResamplerBuffer {
new_input_resampler( from_sample_rate: usize, to_sample_rate: usize, guest_period_in_frames: usize, shared_audio_engine_channels: usize, channel_mask: Option<u32>, ) -> Result<Self, BoxError>382     pub fn new_input_resampler(
383         from_sample_rate: usize,
384         to_sample_rate: usize,
385         guest_period_in_frames: usize,
386         shared_audio_engine_channels: usize,
387         channel_mask: Option<u32>,
388     ) -> Result<Self, BoxError> {
389         // Arbitrarily chose ring_buf size. For audio capture, we will be draining the buffer
390         // from Windows audio engine, so there can be many periods of audio samples in the
391         // `ring_buf`.
392         let ring_buf_size = guest_period_in_frames * 10;
393         //  The `resampled_out_buffer` will hold the format that Android wants
394         //  (16bit, 48kHz, 2 channels), so this will equal one guest period in bytes.
395         let resampled_output_buffer_size =
396             guest_period_in_frames * ANDROID_CAPTURE_FRAME_SIZE_BYTES;
397 
398         soft_check_channel_mask(channel_mask);
399 
400         Ok(CaptureResamplerBuffer {
401             resampler_container: ResamplerContainer::<i16>::new(
402                 from_sample_rate,
403                 to_sample_rate,
404                 guest_period_in_frames,
405                 ring_buf_size,
406                 resampled_output_buffer_size,
407             ),
408             // This value is equal to one full audio engine period of audio frames.
409             sample_threshold: guest_period_in_frames * 2,
410             shared_audio_engine_channels,
411         })
412     }
413 
414     /// Assumes `input_buffer` is in a 32 bit float format and the final bytes pushed into the
415     /// `ring_buf` will be a 16 bit int and 2 channels format.
convert_and_add(&mut self, input_buffer: &[u8])416     pub fn convert_and_add(&mut self, input_buffer: &[u8]) {
417         match self.shared_audio_engine_channels {
418             0 => {
419                 error!("`shared_audio_engine_channels` is 0, and that should never happen");
420             }
421             1 => {
422                 let mut converted_to_float = Self::convert_to_float(input_buffer);
423 
424                 // For the mono channel case, since there are two sample rate converters in
425                 // `resampler_container`, the left one was arbitrarily chosen.
426                 let channel_converted = self
427                     .resampler_container
428                     .sample_rate_convert_left_channel(&mut converted_to_float);
429 
430                 if let Some(channel_converted) = channel_converted {
431                     for sample in channel_converted {
432                         // SAFETY: `int_val` won't be infinity or NAN.
433                         // Also its value can be represented by an int once their fractional
434                         // parts are removed.
435                         let int_val = unsafe { (*sample).to_int_unchecked() };
436 
437                         // Copy bytes to create 2 channel frames
438                         self.resampler_container.ring_buf.push_back(int_val);
439                         self.resampler_container.ring_buf.push_back(int_val);
440                     }
441                 } else {
442                     info!("Skipping adding samples to ring buffer because of SRC priming.");
443                 }
444             }
445             // If the format from the audio engine is >= 2 channels, then we only take the first
446             // two channels in a frame and throw the rest out. This is because our Android audio
447             // policy is hardcoded to only accept 2 channel formats.
448             channels => {
449                 let mut left_channel =
450                     vec![0.0; input_buffer.len() / (BYTES_PER_32FLOAT * channels)];
451                 let mut right_channel =
452                     vec![0.0; input_buffer.len() / (BYTES_PER_32FLOAT * channels)];
453                 let bytes_per_frame = channels * BYTES_PER_32FLOAT;
454 
455                 Self::copy_every_other(input_buffer, &mut left_channel, 0, bytes_per_frame);
456                 Self::copy_every_other(
457                     input_buffer,
458                     &mut right_channel,
459                     BYTES_PER_32FLOAT,
460                     bytes_per_frame,
461                 );
462 
463                 let (left_channel_converted, right_channel_converted) = match self
464                     .resampler_container
465                     .sample_rate_convert_2_channels(&mut left_channel, &mut right_channel)
466                 {
467                     Some((left_channel_converted, right_channel_converted)) => {
468                         (left_channel_converted, right_channel_converted)
469                     }
470                     None => return,
471                 };
472 
473                 for (left_sample, right_sample) in left_channel_converted
474                     .iter()
475                     .zip(right_channel_converted.iter())
476                 {
477                     // SAFETY: `left_sample` and `right_sample` won't be infinity or NAN.
478                     // Also their values can be represented by an int once their fractional
479                     // parts are removed.
480                     let left_val = unsafe { (*left_sample).to_int_unchecked() };
481                     // SAFETY: ditto
482                     let right_val = unsafe { (*right_sample).to_int_unchecked() };
483 
484                     self.resampler_container.ring_buf.push_back(left_val);
485                     self.resampler_container.ring_buf.push_back(right_val);
486                 }
487             }
488         }
489     }
490 
491     /// Since a stream of audio bytes will have their channels interleaved, this will separate
492     /// a channel into its own slice.
copy_every_other(source: &[u8], dest: &mut [f64], start: usize, bytes_per_frame: usize)493     fn copy_every_other(source: &[u8], dest: &mut [f64], start: usize, bytes_per_frame: usize) {
494         if (source.len() % BYTES_PER_32FLOAT) != 0 || (source.len() % bytes_per_frame != 0) {
495             error!(
496                 "source length: {} isn't divisible by the 4 (bytes in a 32bit float) or \
497                    bytes_per_frame: {}",
498                 source.len(),
499                 bytes_per_frame
500             );
501             return;
502         }
503         for (dest_index, x) in (start..source.len()).step_by(bytes_per_frame).enumerate() {
504             let sample_value =
505                 f32::from_le_bytes([source[x], source[x + 1], source[x + 2], source[x + 3]]);
506             dest[dest_index] = sample_value.into();
507             dest[dest_index] *= i16::MAX as f64;
508         }
509     }
510 
convert_to_float(buffer: &[u8]) -> Vec<f64>511     fn convert_to_float(buffer: &[u8]) -> Vec<f64> {
512         if buffer.len() % BYTES_PER_32FLOAT != 0 {
513             error!("buffer of bytes length isn't divisible by the 4 (bytes in a 32bit float)");
514             return vec![];
515         }
516 
517         let mut result = vec![0.0; buffer.len() / BYTES_PER_32FLOAT];
518         for (result_idx, i) in (0..buffer.len()).step_by(BYTES_PER_32FLOAT).enumerate() {
519             result[result_idx] =
520                 (f32::from_le_bytes([buffer[i], buffer[i + 1], buffer[i + 2], buffer[i + 3]])
521                     * i16::MAX as f32)
522                     .into();
523         }
524 
525         result
526     }
527 
get_next_period(&mut self) -> Option<&mut Vec<u8>>528     pub fn get_next_period(&mut self) -> Option<&mut Vec<u8>> {
529         self.resampler_container
530             .get_next_period_mut(self.sample_threshold)
531     }
532 
is_next_period_available(&self) -> bool533     pub fn is_next_period_available(&self) -> bool {
534         self.ring_buf_len() >= self.sample_threshold
535     }
536 
ring_buf_len(&self) -> usize537     pub fn ring_buf_len(&self) -> usize {
538         self.resampler_container.ring_buf.len()
539     }
540 }
541 
soft_check_channel_mask(channel_mask: Option<u32>)542 fn soft_check_channel_mask(channel_mask: Option<u32>) {
543     if let Some(channel_mask) = channel_mask {
544         if channel_mask & SPEAKER_FRONT_LEFT == 0 || channel_mask & SPEAKER_FRONT_RIGHT == 0 {
545             warn!(
546                 "channel_mask: {} does not have both front left and front right channels set. \
547                  Will proceed to populate the first 2 channels anyways.",
548                 channel_mask
549             );
550         }
551     }
552 }
553 
554 #[cfg(test)]
555 mod test {
556     use winapi::shared::mmreg::SPEAKER_BACK_LEFT;
557     use winapi::shared::mmreg::SPEAKER_BACK_RIGHT;
558     use winapi::shared::mmreg::SPEAKER_FRONT_CENTER;
559     use winapi::shared::mmreg::SPEAKER_LOW_FREQUENCY;
560     use winapi::shared::mmreg::SPEAKER_SIDE_LEFT;
561     use winapi::shared::mmreg::SPEAKER_SIDE_RIGHT;
562 
563     use super::*;
564 
565     #[test]
test_copy_every_other_and_convert_to_float()566     fn test_copy_every_other_and_convert_to_float() {
567         let intermediate_src_buffer = PlaybackResamplerBuffer::new(
568             48000, 44100, 480, 448, /* num_channel */ 2, /* channel_mask */ None,
569         )
570         .unwrap();
571 
572         let left_channel_bytes: Vec<u8> = [25u16, 256, 1000, 2400]
573             .iter()
574             .flat_map(|x| x.to_le_bytes())
575             .collect();
576 
577         let mut result = vec![0.0; 2];
578         intermediate_src_buffer.copy_every_other_and_convert_to_float(
579             &left_channel_bytes,
580             &mut result,
581             0,
582         );
583         assert_vec_float_eq(result, [25.0, 1000.0].to_vec());
584 
585         let mut result2 = vec![0.0; 2];
586         intermediate_src_buffer.copy_every_other_and_convert_to_float(
587             &left_channel_bytes,
588             &mut result2,
589             2,
590         );
591         assert_vec_float_eq(result2, [256.0, 2400.0].to_vec());
592     }
593 
assert_vec_float_eq(vec1: Vec<f64>, vec2: Vec<f64>)594     fn assert_vec_float_eq(vec1: Vec<f64>, vec2: Vec<f64>) {
595         assert_eq!(vec1.len(), vec2.len());
596         for (i, val) in vec1.into_iter().enumerate() {
597             assert!((val - vec2[i]).abs() < f64::EPSILON);
598         }
599     }
600 
601     /// Used to account for floating point arithmitic precision loss.
assert_vec_float_almost_eq(vec1: Vec<f64>, vec2: Vec<f64>)602     fn assert_vec_float_almost_eq(vec1: Vec<f64>, vec2: Vec<f64>) {
603         assert_eq!(vec1.len(), vec2.len());
604         for (i, val) in vec1.into_iter().enumerate() {
605             assert!((val - vec2[i]).abs() < 0.00001);
606         }
607     }
608 
609     #[test]
test_get_next_period()610     fn test_get_next_period() {
611         // Create an intermediate buffer that won't require resampling
612         let mut intermediate_src_buffer = PlaybackResamplerBuffer::new(
613             48000, 48000, 480, 513, /* num_channel */ 2, /* channel_mask */ None,
614         )
615         .unwrap();
616 
617         assert!(intermediate_src_buffer.get_next_period().is_none());
618 
619         // 480 frames * 2 sample/frames * 2 bytes/sample = 1920 bytes
620         let bytes_in_16bit_48k_hz = 1920;
621         let buffer: Vec<u8> = vec![0; bytes_in_16bit_48k_hz];
622         intermediate_src_buffer.convert_and_add(&buffer);
623 
624         assert!(intermediate_src_buffer.get_next_period().is_none());
625 
626         let buffer: Vec<u8> = vec![0; bytes_in_16bit_48k_hz];
627         intermediate_src_buffer.convert_and_add(&buffer);
628 
629         assert!(intermediate_src_buffer.get_next_period().is_some());
630     }
631 
632     #[test]
test_perform_channel_conversion_mono()633     fn test_perform_channel_conversion_mono() {
634         let mut intermediate_src_buffer = PlaybackResamplerBuffer::new(
635             /* from_sample_rate */ 48000, /* to_sample_rate */ 48000,
636             /* guest_period_in_frames */ 480,
637             /* shared_audio_engine_period_in_frames */ 513, /* num_channel */ 1,
638             /* channel_mask */ None,
639         )
640         .unwrap();
641 
642         let two_channel_samples = [5.0, 5.0, 2.0, 8.0];
643 
644         for x in (0..two_channel_samples.len()).step_by(2) {
645             let left = two_channel_samples[x];
646             let right = two_channel_samples[x + 1];
647             intermediate_src_buffer.perform_channel_conversion(left, right);
648         }
649 
650         assert_eq!(intermediate_src_buffer.ring_buf_len(), 2);
651         assert_eq!(
652             intermediate_src_buffer.resampler_container.ring_buf,
653             vec![5.0, 5.0]
654         );
655     }
656 
657     #[test]
test_upmix_5_1()658     fn test_upmix_5_1() {
659         let channel_mask = SPEAKER_FRONT_LEFT
660             | SPEAKER_FRONT_RIGHT
661             | SPEAKER_FRONT_CENTER
662             | SPEAKER_LOW_FREQUENCY
663             | SPEAKER_BACK_LEFT
664             | SPEAKER_BACK_RIGHT;
665         let mut intermediate_src_buffer = PlaybackResamplerBuffer::new(
666             48000,
667             44100,
668             480,
669             448,
670             /* num_channel */ 6,
671             /* channel_mask */ Some(channel_mask),
672         )
673         .unwrap();
674 
675         let two_channel_samples = [5.0, 5.0, 2.0, 8.0];
676         for x in (0..two_channel_samples.len()).step_by(2) {
677             let left = two_channel_samples[x];
678             let right = two_channel_samples[x + 1];
679             intermediate_src_buffer.perform_channel_conversion(left, right);
680         }
681 
682         assert_eq!(intermediate_src_buffer.ring_buf_len(), 12);
683         // Only populate FL and FR channels and zero out the rest.
684         assert_eq!(
685             intermediate_src_buffer.resampler_container.ring_buf,
686             vec![5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 2.0, 8.0, 0.0, 0.0, 0.0, 0.0]
687         );
688     }
689 
690     #[test]
test_upmix_7_1()691     fn test_upmix_7_1() {
692         let channel_mask = SPEAKER_FRONT_LEFT
693             | SPEAKER_FRONT_RIGHT
694             | SPEAKER_FRONT_CENTER
695             | SPEAKER_LOW_FREQUENCY
696             | SPEAKER_BACK_LEFT
697             | SPEAKER_BACK_RIGHT
698             | SPEAKER_SIDE_LEFT
699             | SPEAKER_SIDE_RIGHT;
700         let mut intermediate_src_buffer = PlaybackResamplerBuffer::new(
701             48000,
702             44100,
703             480,
704             448,
705             /* num_channel */ 8,
706             /* channel_mask */ Some(channel_mask),
707         )
708         .unwrap();
709 
710         let two_channel_samples = [5.0, 5.0, 2.0, 8.0];
711         for x in (0..two_channel_samples.len()).step_by(2) {
712             let left = two_channel_samples[x];
713             let right = two_channel_samples[x + 1];
714             intermediate_src_buffer.perform_channel_conversion(left, right);
715         }
716 
717         assert_eq!(intermediate_src_buffer.ring_buf_len(), 16);
718         // Only populate FL and FR channels and zero out the rest.
719         assert_eq!(
720             intermediate_src_buffer.resampler_container.ring_buf,
721             vec![5.0, 5.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 8.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
722         );
723     }
724 
725     #[test]
test_capture_copy_every_other_2_channels_returns_samples_from_one_channel()726     fn test_capture_copy_every_other_2_channels_returns_samples_from_one_channel() {
727         const CHANNEL_COUNT: usize = 2;
728 
729         let incoming_bytes: Vec<u8> = [25.0f32, 256.0, 1000.0, 2400.0]
730             .iter()
731             .flat_map(|x| {
732                 // Convert so range is between -1 and 1, which is how audio samples in float are
733                 // represented.
734                 let decimal = x / (i16::MAX as f32);
735                 decimal.to_le_bytes()
736             })
737             .collect();
738         let mut result = vec![0.0; 2];
739         CaptureResamplerBuffer::copy_every_other(
740             &incoming_bytes,
741             &mut result,
742             0,
743             BYTES_PER_32FLOAT * CHANNEL_COUNT,
744         );
745         // Verify first channel is retrieved.
746         assert_vec_float_almost_eq(result, [25.0, 1000.0].to_vec());
747 
748         let mut result2 = vec![0.0; 2];
749         CaptureResamplerBuffer::copy_every_other(
750             &incoming_bytes,
751             &mut result2,
752             BYTES_PER_32FLOAT,
753             BYTES_PER_32FLOAT * CHANNEL_COUNT,
754         );
755         // Verify second channel is retrieved.
756         assert_vec_float_almost_eq(result2, [256.0, 2400.0].to_vec());
757     }
758 
759     #[test]
test_capture_copy_every_other_surround_sound_returns_samples_from_one_channel()760     fn test_capture_copy_every_other_surround_sound_returns_samples_from_one_channel() {
761         const CHANNEL_COUNT: usize = 6;
762 
763         // Only the first two channels per frame will be used. Each frame has 6 channels.
764         let incoming_bytes: Vec<u8> = [
765             25.0f32, 256.0, 92.0, 56.0, 123.0, 93.0, 1000.0, 2400.0, 9.0, 1298.0, 4000.0, 34.0,
766         ]
767         .iter()
768         .flat_map(|x| {
769             // Convert so range is between -1 and 1, which is how audio samples in float are
770             // represented.
771             let decimal = x / (i16::MAX as f32);
772             decimal.to_le_bytes()
773         })
774         .collect();
775         let mut result = vec![0.0; 2];
776         CaptureResamplerBuffer::copy_every_other(
777             &incoming_bytes,
778             &mut result,
779             0,
780             BYTES_PER_32FLOAT * CHANNEL_COUNT,
781         );
782         // Verify first channel is retrieved.
783         assert_vec_float_almost_eq(result, [25.0, 1000.0].to_vec());
784 
785         let mut result2 = vec![0.0; 2];
786         CaptureResamplerBuffer::copy_every_other(
787             &incoming_bytes,
788             &mut result2,
789             BYTES_PER_32FLOAT,
790             BYTES_PER_32FLOAT * CHANNEL_COUNT,
791         );
792         // Verify second channel is retrieved.
793         assert_vec_float_almost_eq(result2, [256.0, 2400.0].to_vec());
794     }
795 
796     #[test]
test_capture_mono_channel_returns_some_audio_samples()797     fn test_capture_mono_channel_returns_some_audio_samples() {
798         const CHANNEL_COUNT: usize = 1;
799 
800         let guest_period_in_frames = 480;
801         let mut input_resampler = CaptureResamplerBuffer::new_input_resampler(
802             48000,
803             48000,
804             guest_period_in_frames,
805             CHANNEL_COUNT,
806             Some(SPEAKER_FRONT_CENTER),
807         )
808         .unwrap();
809         // Make sure no samples are added to the resampler buffer.
810         assert!(input_resampler.get_next_period().is_none());
811 
812         let bytes_in_32f_48k_hz_1_channel =
813             guest_period_in_frames * BYTES_PER_32FLOAT * CHANNEL_COUNT;
814         let buffer: Vec<u8> = vec![1; bytes_in_32f_48k_hz_1_channel];
815         input_resampler.convert_and_add(&buffer);
816 
817         // Verify that `get_next_period` returns something after audio samples are added.
818         assert!(input_resampler.get_next_period().is_some());
819     }
820 
821     #[test]
test_capture_stereo_channel_returns_some_audio_samples()822     fn test_capture_stereo_channel_returns_some_audio_samples() {
823         const CHANNEL_COUNT: usize = 2;
824 
825         let guest_period_in_frames = 480;
826         let mut input_resampler = CaptureResamplerBuffer::new_input_resampler(
827             48000,
828             48000,
829             guest_period_in_frames,
830             CHANNEL_COUNT,
831             Some(SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT),
832         )
833         .unwrap();
834         // Make sure no samples are added to the resampler buffer.
835         assert!(input_resampler.get_next_period().is_none());
836 
837         let bytes_in_32f_48k_hz_2_channel =
838             guest_period_in_frames * BYTES_PER_32FLOAT * CHANNEL_COUNT;
839         let buffer: Vec<u8> = vec![1; bytes_in_32f_48k_hz_2_channel];
840         input_resampler.convert_and_add(&buffer);
841 
842         // Verify that `get_next_period` returns something after audio samples are added.
843         assert!(input_resampler.get_next_period().is_some());
844     }
845 
846     #[test]
test_capture_surround_sound_returns_some_audio_samples()847     fn test_capture_surround_sound_returns_some_audio_samples() {
848         const CHANNEL_COUNT: usize = 6;
849 
850         let guest_period_in_frames = 480;
851         let mut input_resampler = CaptureResamplerBuffer::new_input_resampler(
852             48000,
853             48000,
854             guest_period_in_frames,
855             CHANNEL_COUNT,
856             Some(SPEAKER_FRONT_CENTER),
857         )
858         .unwrap();
859         // Make sure no samples are added to the resampler buffer.
860         assert!(input_resampler.get_next_period().is_none());
861 
862         let bytes_in_32f_48k_hz_1_channel =
863             guest_period_in_frames * BYTES_PER_32FLOAT * CHANNEL_COUNT;
864         let buffer: Vec<u8> = vec![1; bytes_in_32f_48k_hz_1_channel];
865         input_resampler.convert_and_add(&buffer);
866         // Verify that `get_next_period` returns something after audio samples are added.
867         assert!(input_resampler.get_next_period().is_some());
868     }
869 }
870