1 /*
2 * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "modules/audio_device/include/audio_device.h"
12
13 #include <list>
14 #include <memory>
15 #include <numeric>
16
17 #include "api/scoped_refptr.h"
18 #include "modules/audio_device/include/mock_audio_transport.h"
19 #include "rtc_base/arraysize.h"
20 #include "rtc_base/event.h"
21 #include "rtc_base/synchronization/mutex.h"
22 #include "rtc_base/time_utils.h"
23 #include "sdk/android/generated_native_unittests_jni/BuildInfo_jni.h"
24 #include "sdk/android/native_api/audio_device_module/audio_device_android.h"
25 #include "sdk/android/native_unittests/application_context_provider.h"
26 #include "sdk/android/src/jni/audio_device/audio_common.h"
27 #include "sdk/android/src/jni/audio_device/audio_device_module.h"
28 #include "sdk/android/src/jni/audio_device/opensles_common.h"
29 #include "sdk/android/src/jni/jni_helpers.h"
30 #include "test/gmock.h"
31 #include "test/gtest.h"
32 #include "test/testsupport/file_utils.h"
33
34 using std::cout;
35 using std::endl;
36 using ::testing::_;
37 using ::testing::AtLeast;
38 using ::testing::Gt;
39 using ::testing::Invoke;
40 using ::testing::NiceMock;
41 using ::testing::NotNull;
42 using ::testing::Return;
43
44 // #define ENABLE_DEBUG_PRINTF
45 #ifdef ENABLE_DEBUG_PRINTF
46 #define PRINTD(...) fprintf(stderr, __VA_ARGS__);
47 #else
48 #define PRINTD(...) ((void)0)
49 #endif
50 #define PRINT(...) fprintf(stderr, __VA_ARGS__);
51
52 namespace webrtc {
53
54 namespace jni {
55
56 // Number of callbacks (input or output) the tests waits for before we set
57 // an event indicating that the test was OK.
58 static const size_t kNumCallbacks = 10;
59 // Max amount of time we wait for an event to be set while counting callbacks.
60 static constexpr TimeDelta kTestTimeOut = TimeDelta::Seconds(10);
61 // Average number of audio callbacks per second assuming 10ms packet size.
62 static const size_t kNumCallbacksPerSecond = 100;
63 // Play out a test file during this time (unit is in seconds).
64 static const int kFilePlayTimeInSec = 5;
65 static const size_t kBitsPerSample = 16;
66 static const size_t kBytesPerSample = kBitsPerSample / 8;
67 // Run the full-duplex test during this time (unit is in seconds).
68 // Note that first `kNumIgnoreFirstCallbacks` are ignored.
69 static constexpr TimeDelta kFullDuplexTime = TimeDelta::Seconds(5);
70 // Wait for the callback sequence to stabilize by ignoring this amount of the
71 // initial callbacks (avoids initial FIFO access).
72 // Only used in the RunPlayoutAndRecordingInFullDuplex test.
73 static const size_t kNumIgnoreFirstCallbacks = 50;
74 // Sets the number of impulses per second in the latency test.
75 static const int kImpulseFrequencyInHz = 1;
76 // Length of round-trip latency measurements. Number of transmitted impulses
77 // is kImpulseFrequencyInHz * kMeasureLatencyTime - 1.
78 static constexpr TimeDelta kMeasureLatencyTime = TimeDelta::Seconds(11);
79 // Utilized in round-trip latency measurements to avoid capturing noise samples.
80 static const int kImpulseThreshold = 1000;
81 static const char kTag[] = "[..........] ";
82
83 enum TransportType {
84 kPlayout = 0x1,
85 kRecording = 0x2,
86 };
87
88 // Interface for processing the audio stream. Real implementations can e.g.
89 // run audio in loopback, read audio from a file or perform latency
90 // measurements.
91 class AudioStreamInterface {
92 public:
93 virtual void Write(const void* source, size_t num_frames) = 0;
94 virtual void Read(void* destination, size_t num_frames) = 0;
95
96 protected:
~AudioStreamInterface()97 virtual ~AudioStreamInterface() {}
98 };
99
100 // Reads audio samples from a PCM file where the file is stored in memory at
101 // construction.
102 class FileAudioStream : public AudioStreamInterface {
103 public:
FileAudioStream(size_t num_callbacks,const std::string & file_name,int sample_rate)104 FileAudioStream(size_t num_callbacks,
105 const std::string& file_name,
106 int sample_rate)
107 : file_size_in_bytes_(0), sample_rate_(sample_rate), file_pos_(0) {
108 file_size_in_bytes_ = test::GetFileSize(file_name);
109 sample_rate_ = sample_rate;
110 EXPECT_GE(file_size_in_callbacks(), num_callbacks)
111 << "Size of test file is not large enough to last during the test.";
112 const size_t num_16bit_samples =
113 test::GetFileSize(file_name) / kBytesPerSample;
114 file_.reset(new int16_t[num_16bit_samples]);
115 FILE* audio_file = fopen(file_name.c_str(), "rb");
116 EXPECT_NE(audio_file, nullptr);
117 size_t num_samples_read =
118 fread(file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
119 EXPECT_EQ(num_samples_read, num_16bit_samples);
120 fclose(audio_file);
121 }
122
123 // AudioStreamInterface::Write() is not implemented.
Write(const void * source,size_t num_frames)124 void Write(const void* source, size_t num_frames) override {}
125
126 // Read samples from file stored in memory (at construction) and copy
127 // `num_frames` (<=> 10ms) to the `destination` byte buffer.
Read(void * destination,size_t num_frames)128 void Read(void* destination, size_t num_frames) override {
129 memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
130 num_frames * sizeof(int16_t));
131 file_pos_ += num_frames;
132 }
133
file_size_in_seconds() const134 int file_size_in_seconds() const {
135 return static_cast<int>(file_size_in_bytes_ /
136 (kBytesPerSample * sample_rate_));
137 }
file_size_in_callbacks() const138 size_t file_size_in_callbacks() const {
139 return file_size_in_seconds() * kNumCallbacksPerSecond;
140 }
141
142 private:
143 size_t file_size_in_bytes_;
144 int sample_rate_;
145 std::unique_ptr<int16_t[]> file_;
146 size_t file_pos_;
147 };
148
149 // Simple first in first out (FIFO) class that wraps a list of 16-bit audio
150 // buffers of fixed size and allows Write and Read operations. The idea is to
151 // store recorded audio buffers (using Write) and then read (using Read) these
152 // stored buffers with as short delay as possible when the audio layer needs
153 // data to play out. The number of buffers in the FIFO will stabilize under
154 // normal conditions since there will be a balance between Write and Read calls.
155 // The container is a std::list container and access is protected with a lock
156 // since both sides (playout and recording) are driven by its own thread.
157 class FifoAudioStream : public AudioStreamInterface {
158 public:
FifoAudioStream(size_t frames_per_buffer)159 explicit FifoAudioStream(size_t frames_per_buffer)
160 : frames_per_buffer_(frames_per_buffer),
161 bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
162 fifo_(new AudioBufferList),
163 largest_size_(0),
164 total_written_elements_(0),
165 write_count_(0) {
166 EXPECT_NE(fifo_.get(), nullptr);
167 }
168
~FifoAudioStream()169 ~FifoAudioStream() { Flush(); }
170
171 // Allocate new memory, copy `num_frames` samples from `source` into memory
172 // and add pointer to the memory location to end of the list.
173 // Increases the size of the FIFO by one element.
Write(const void * source,size_t num_frames)174 void Write(const void* source, size_t num_frames) override {
175 ASSERT_EQ(num_frames, frames_per_buffer_);
176 PRINTD("+");
177 if (write_count_++ < kNumIgnoreFirstCallbacks) {
178 return;
179 }
180 int16_t* memory = new int16_t[frames_per_buffer_];
181 memcpy(static_cast<int16_t*>(&memory[0]), source, bytes_per_buffer_);
182 MutexLock lock(&lock_);
183 fifo_->push_back(memory);
184 const size_t size = fifo_->size();
185 if (size > largest_size_) {
186 largest_size_ = size;
187 PRINTD("(%zu)", largest_size_);
188 }
189 total_written_elements_ += size;
190 }
191
192 // Read pointer to data buffer from front of list, copy `num_frames` of stored
193 // data into `destination` and delete the utilized memory allocation.
194 // Decreases the size of the FIFO by one element.
Read(void * destination,size_t num_frames)195 void Read(void* destination, size_t num_frames) override {
196 ASSERT_EQ(num_frames, frames_per_buffer_);
197 PRINTD("-");
198 MutexLock lock(&lock_);
199 if (fifo_->empty()) {
200 memset(destination, 0, bytes_per_buffer_);
201 } else {
202 int16_t* memory = fifo_->front();
203 fifo_->pop_front();
204 memcpy(destination, static_cast<int16_t*>(&memory[0]), bytes_per_buffer_);
205 delete memory;
206 }
207 }
208
size() const209 size_t size() const { return fifo_->size(); }
210
largest_size() const211 size_t largest_size() const { return largest_size_; }
212
average_size() const213 size_t average_size() const {
214 return (total_written_elements_ == 0)
215 ? 0.0
216 : 0.5 + static_cast<float>(total_written_elements_) /
217 (write_count_ - kNumIgnoreFirstCallbacks);
218 }
219
220 private:
Flush()221 void Flush() {
222 for (auto it = fifo_->begin(); it != fifo_->end(); ++it) {
223 delete *it;
224 }
225 fifo_->clear();
226 }
227
228 using AudioBufferList = std::list<int16_t*>;
229 Mutex lock_;
230 const size_t frames_per_buffer_;
231 const size_t bytes_per_buffer_;
232 std::unique_ptr<AudioBufferList> fifo_;
233 size_t largest_size_;
234 size_t total_written_elements_;
235 size_t write_count_;
236 };
237
238 // Inserts periodic impulses and measures the latency between the time of
239 // transmission and time of receiving the same impulse.
240 // Usage requires a special hardware called Audio Loopback Dongle.
241 // See http://source.android.com/devices/audio/loopback.html for details.
242 class LatencyMeasuringAudioStream : public AudioStreamInterface {
243 public:
LatencyMeasuringAudioStream(size_t frames_per_buffer)244 explicit LatencyMeasuringAudioStream(size_t frames_per_buffer)
245 : frames_per_buffer_(frames_per_buffer),
246 bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
247 play_count_(0),
248 rec_count_(0),
249 pulse_time_(0) {}
250
251 // Insert periodic impulses in first two samples of `destination`.
Read(void * destination,size_t num_frames)252 void Read(void* destination, size_t num_frames) override {
253 ASSERT_EQ(num_frames, frames_per_buffer_);
254 if (play_count_ == 0) {
255 PRINT("[");
256 }
257 play_count_++;
258 memset(destination, 0, bytes_per_buffer_);
259 if (play_count_ % (kNumCallbacksPerSecond / kImpulseFrequencyInHz) == 0) {
260 if (pulse_time_ == 0) {
261 pulse_time_ = rtc::TimeMillis();
262 }
263 PRINT(".");
264 const int16_t impulse = std::numeric_limits<int16_t>::max();
265 int16_t* ptr16 = static_cast<int16_t*>(destination);
266 for (size_t i = 0; i < 2; ++i) {
267 ptr16[i] = impulse;
268 }
269 }
270 }
271
272 // Detect received impulses in `source`, derive time between transmission and
273 // detection and add the calculated delay to list of latencies.
Write(const void * source,size_t num_frames)274 void Write(const void* source, size_t num_frames) override {
275 ASSERT_EQ(num_frames, frames_per_buffer_);
276 rec_count_++;
277 if (pulse_time_ == 0) {
278 // Avoid detection of new impulse response until a new impulse has
279 // been transmitted (sets `pulse_time_` to value larger than zero).
280 return;
281 }
282 const int16_t* ptr16 = static_cast<const int16_t*>(source);
283 std::vector<int16_t> vec(ptr16, ptr16 + num_frames);
284 // Find max value in the audio buffer.
285 int max = *std::max_element(vec.begin(), vec.end());
286 // Find index (element position in vector) of the max element.
287 int index_of_max =
288 std::distance(vec.begin(), std::find(vec.begin(), vec.end(), max));
289 if (max > kImpulseThreshold) {
290 PRINTD("(%d,%d)", max, index_of_max);
291 int64_t now_time = rtc::TimeMillis();
292 int extra_delay = IndexToMilliseconds(static_cast<double>(index_of_max));
293 PRINTD("[%d]", static_cast<int>(now_time - pulse_time_));
294 PRINTD("[%d]", extra_delay);
295 // Total latency is the difference between transmit time and detection
296 // tome plus the extra delay within the buffer in which we detected the
297 // received impulse. It is transmitted at sample 0 but can be received
298 // at sample N where N > 0. The term `extra_delay` accounts for N and it
299 // is a value between 0 and 10ms.
300 latencies_.push_back(now_time - pulse_time_ + extra_delay);
301 pulse_time_ = 0;
302 } else {
303 PRINTD("-");
304 }
305 }
306
num_latency_values() const307 size_t num_latency_values() const { return latencies_.size(); }
308
min_latency() const309 int min_latency() const {
310 if (latencies_.empty())
311 return 0;
312 return *std::min_element(latencies_.begin(), latencies_.end());
313 }
314
max_latency() const315 int max_latency() const {
316 if (latencies_.empty())
317 return 0;
318 return *std::max_element(latencies_.begin(), latencies_.end());
319 }
320
average_latency() const321 int average_latency() const {
322 if (latencies_.empty())
323 return 0;
324 return 0.5 + static_cast<double>(
325 std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
326 latencies_.size();
327 }
328
PrintResults() const329 void PrintResults() const {
330 PRINT("] ");
331 for (auto it = latencies_.begin(); it != latencies_.end(); ++it) {
332 PRINT("%d ", *it);
333 }
334 PRINT("\n");
335 PRINT("%s[min, max, avg]=[%d, %d, %d] ms\n", kTag, min_latency(),
336 max_latency(), average_latency());
337 }
338
IndexToMilliseconds(double index) const339 int IndexToMilliseconds(double index) const {
340 return static_cast<int>(10.0 * (index / frames_per_buffer_) + 0.5);
341 }
342
343 private:
344 const size_t frames_per_buffer_;
345 const size_t bytes_per_buffer_;
346 size_t play_count_;
347 size_t rec_count_;
348 int64_t pulse_time_;
349 std::vector<int> latencies_;
350 };
351
352 // Mocks the AudioTransport object and proxies actions for the two callbacks
353 // (RecordedDataIsAvailable and NeedMorePlayData) to different implementations
354 // of AudioStreamInterface.
355 class MockAudioTransportAndroid : public test::MockAudioTransport {
356 public:
MockAudioTransportAndroid(int type)357 explicit MockAudioTransportAndroid(int type)
358 : num_callbacks_(0),
359 type_(type),
360 play_count_(0),
361 rec_count_(0),
362 audio_stream_(nullptr) {}
363
~MockAudioTransportAndroid()364 virtual ~MockAudioTransportAndroid() {}
365
366 // Set default actions of the mock object. We are delegating to fake
367 // implementations (of AudioStreamInterface) here.
HandleCallbacks(rtc::Event * test_is_done,AudioStreamInterface * audio_stream,int num_callbacks)368 void HandleCallbacks(rtc::Event* test_is_done,
369 AudioStreamInterface* audio_stream,
370 int num_callbacks) {
371 test_is_done_ = test_is_done;
372 audio_stream_ = audio_stream;
373 num_callbacks_ = num_callbacks;
374 if (play_mode()) {
375 ON_CALL(*this, NeedMorePlayData(_, _, _, _, _, _, _, _))
376 .WillByDefault(
377 Invoke(this, &MockAudioTransportAndroid::RealNeedMorePlayData));
378 }
379 if (rec_mode()) {
380 ON_CALL(*this, RecordedDataIsAvailable(_, _, _, _, _, _, _, _, _, _))
381 .WillByDefault(Invoke(
382 this, &MockAudioTransportAndroid::RealRecordedDataIsAvailable));
383 }
384 }
385
RealRecordedDataIsAvailable(const void * audioSamples,const size_t nSamples,const size_t nBytesPerSample,const size_t nChannels,const uint32_t samplesPerSec,const uint32_t totalDelayMS,const int32_t clockDrift,const uint32_t currentMicLevel,const bool keyPressed,const uint32_t & newMicLevel)386 int32_t RealRecordedDataIsAvailable(const void* audioSamples,
387 const size_t nSamples,
388 const size_t nBytesPerSample,
389 const size_t nChannels,
390 const uint32_t samplesPerSec,
391 const uint32_t totalDelayMS,
392 const int32_t clockDrift,
393 const uint32_t currentMicLevel,
394 const bool keyPressed,
395 const uint32_t& newMicLevel) {
396 EXPECT_TRUE(rec_mode()) << "No test is expecting these callbacks.";
397 rec_count_++;
398 // Process the recorded audio stream if an AudioStreamInterface
399 // implementation exists.
400 if (audio_stream_) {
401 audio_stream_->Write(audioSamples, nSamples);
402 }
403 if (ReceivedEnoughCallbacks()) {
404 test_is_done_->Set();
405 }
406 return 0;
407 }
408
RealNeedMorePlayData(const size_t nSamples,const size_t nBytesPerSample,const size_t nChannels,const uint32_t samplesPerSec,void * audioSamples,size_t & nSamplesOut,int64_t * elapsed_time_ms,int64_t * ntp_time_ms)409 int32_t RealNeedMorePlayData(const size_t nSamples,
410 const size_t nBytesPerSample,
411 const size_t nChannels,
412 const uint32_t samplesPerSec,
413 void* audioSamples,
414 size_t& nSamplesOut, // NOLINT
415 int64_t* elapsed_time_ms,
416 int64_t* ntp_time_ms) {
417 EXPECT_TRUE(play_mode()) << "No test is expecting these callbacks.";
418 play_count_++;
419 nSamplesOut = nSamples;
420 // Read (possibly processed) audio stream samples to be played out if an
421 // AudioStreamInterface implementation exists.
422 if (audio_stream_) {
423 audio_stream_->Read(audioSamples, nSamples);
424 }
425 if (ReceivedEnoughCallbacks()) {
426 test_is_done_->Set();
427 }
428 return 0;
429 }
430
ReceivedEnoughCallbacks()431 bool ReceivedEnoughCallbacks() {
432 bool recording_done = false;
433 if (rec_mode())
434 recording_done = rec_count_ >= num_callbacks_;
435 else
436 recording_done = true;
437
438 bool playout_done = false;
439 if (play_mode())
440 playout_done = play_count_ >= num_callbacks_;
441 else
442 playout_done = true;
443
444 return recording_done && playout_done;
445 }
446
play_mode() const447 bool play_mode() const { return type_ & kPlayout; }
rec_mode() const448 bool rec_mode() const { return type_ & kRecording; }
449
450 private:
451 rtc::Event* test_is_done_;
452 size_t num_callbacks_;
453 int type_;
454 size_t play_count_;
455 size_t rec_count_;
456 AudioStreamInterface* audio_stream_;
457 std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream_;
458 };
459
460 // AudioDeviceTest test fixture.
461 class AudioDeviceTest : public ::testing::Test {
462 protected:
AudioDeviceTest()463 AudioDeviceTest() {
464 // One-time initialization of JVM and application context. Ensures that we
465 // can do calls between C++ and Java. Initializes both Java and OpenSL ES
466 // implementations.
467 // Creates an audio device using a default audio layer.
468 jni_ = AttachCurrentThreadIfNeeded();
469 context_ = test::GetAppContextForTest(jni_);
470 audio_device_ = CreateJavaAudioDeviceModule(jni_, context_.obj());
471 EXPECT_NE(audio_device_.get(), nullptr);
472 EXPECT_EQ(0, audio_device_->Init());
473 audio_manager_ = GetAudioManager(jni_, context_);
474 UpdateParameters();
475 }
~AudioDeviceTest()476 virtual ~AudioDeviceTest() { EXPECT_EQ(0, audio_device_->Terminate()); }
477
total_delay_ms() const478 int total_delay_ms() const { return 10; }
479
UpdateParameters()480 void UpdateParameters() {
481 int input_sample_rate = GetDefaultSampleRate(jni_, audio_manager_);
482 int output_sample_rate = GetDefaultSampleRate(jni_, audio_manager_);
483 bool stereo_playout_is_available;
484 bool stereo_record_is_available;
485 audio_device_->StereoPlayoutIsAvailable(&stereo_playout_is_available);
486 audio_device_->StereoRecordingIsAvailable(&stereo_record_is_available);
487 GetAudioParameters(jni_, context_, audio_manager_, input_sample_rate,
488 output_sample_rate, stereo_playout_is_available,
489 stereo_record_is_available, &input_parameters_,
490 &output_parameters_);
491 }
492
SetActiveAudioLayer(AudioDeviceModule::AudioLayer audio_layer)493 void SetActiveAudioLayer(AudioDeviceModule::AudioLayer audio_layer) {
494 audio_device_ = CreateAudioDevice(audio_layer);
495 EXPECT_NE(audio_device_.get(), nullptr);
496 EXPECT_EQ(0, audio_device_->Init());
497 UpdateParameters();
498 }
499
playout_sample_rate() const500 int playout_sample_rate() const { return output_parameters_.sample_rate(); }
record_sample_rate() const501 int record_sample_rate() const { return input_parameters_.sample_rate(); }
playout_channels() const502 size_t playout_channels() const { return output_parameters_.channels(); }
record_channels() const503 size_t record_channels() const { return input_parameters_.channels(); }
playout_frames_per_10ms_buffer() const504 size_t playout_frames_per_10ms_buffer() const {
505 return output_parameters_.frames_per_10ms_buffer();
506 }
record_frames_per_10ms_buffer() const507 size_t record_frames_per_10ms_buffer() const {
508 return input_parameters_.frames_per_10ms_buffer();
509 }
510
audio_device() const511 rtc::scoped_refptr<AudioDeviceModule> audio_device() const {
512 return audio_device_;
513 }
514
CreateAudioDevice(AudioDeviceModule::AudioLayer audio_layer)515 rtc::scoped_refptr<AudioDeviceModule> CreateAudioDevice(
516 AudioDeviceModule::AudioLayer audio_layer) {
517 #if defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
518 if (audio_layer == AudioDeviceModule::kAndroidAAudioAudio) {
519 return rtc::scoped_refptr<AudioDeviceModule>(
520 CreateAAudioAudioDeviceModule(jni_, context_.obj()));
521 }
522 #endif
523 if (audio_layer == AudioDeviceModule::kAndroidJavaAudio) {
524 return rtc::scoped_refptr<AudioDeviceModule>(
525 CreateJavaAudioDeviceModule(jni_, context_.obj()));
526 } else if (audio_layer == AudioDeviceModule::kAndroidOpenSLESAudio) {
527 return rtc::scoped_refptr<AudioDeviceModule>(
528 CreateOpenSLESAudioDeviceModule(jni_, context_.obj()));
529 } else if (audio_layer ==
530 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio) {
531 return rtc::scoped_refptr<AudioDeviceModule>(
532 CreateJavaInputAndOpenSLESOutputAudioDeviceModule(jni_,
533 context_.obj()));
534 } else {
535 return nullptr;
536 }
537 }
538
539 // Returns file name relative to the resource root given a sample rate.
GetFileName(int sample_rate)540 std::string GetFileName(int sample_rate) {
541 EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100);
542 char fname[64];
543 snprintf(fname, sizeof(fname), "audio_device/audio_short%d",
544 sample_rate / 1000);
545 std::string file_name(webrtc::test::ResourcePath(fname, "pcm"));
546 EXPECT_TRUE(test::FileExists(file_name));
547 #ifdef ENABLE_PRINTF
548 PRINT("file name: %s\n", file_name.c_str());
549 const size_t bytes = test::GetFileSize(file_name);
550 PRINT("file size: %zu [bytes]\n", bytes);
551 PRINT("file size: %zu [samples]\n", bytes / kBytesPerSample);
552 const int seconds =
553 static_cast<int>(bytes / (sample_rate * kBytesPerSample));
554 PRINT("file size: %d [secs]\n", seconds);
555 PRINT("file size: %zu [callbacks]\n", seconds * kNumCallbacksPerSecond);
556 #endif
557 return file_name;
558 }
559
GetActiveAudioLayer() const560 AudioDeviceModule::AudioLayer GetActiveAudioLayer() const {
561 AudioDeviceModule::AudioLayer audio_layer;
562 EXPECT_EQ(0, audio_device()->ActiveAudioLayer(&audio_layer));
563 return audio_layer;
564 }
565
TestDelayOnAudioLayer(const AudioDeviceModule::AudioLayer & layer_to_test)566 int TestDelayOnAudioLayer(
567 const AudioDeviceModule::AudioLayer& layer_to_test) {
568 rtc::scoped_refptr<AudioDeviceModule> audio_device;
569 audio_device = CreateAudioDevice(layer_to_test);
570 EXPECT_NE(audio_device.get(), nullptr);
571 uint16_t playout_delay;
572 EXPECT_EQ(0, audio_device->PlayoutDelay(&playout_delay));
573 return playout_delay;
574 }
575
TestActiveAudioLayer(const AudioDeviceModule::AudioLayer & layer_to_test)576 AudioDeviceModule::AudioLayer TestActiveAudioLayer(
577 const AudioDeviceModule::AudioLayer& layer_to_test) {
578 rtc::scoped_refptr<AudioDeviceModule> audio_device;
579 audio_device = CreateAudioDevice(layer_to_test);
580 EXPECT_NE(audio_device.get(), nullptr);
581 AudioDeviceModule::AudioLayer active;
582 EXPECT_EQ(0, audio_device->ActiveAudioLayer(&active));
583 return active;
584 }
585
586 // One way to ensure that the engine object is valid is to create an
587 // SL Engine interface since it exposes creation methods of all the OpenSL ES
588 // object types and it is only supported on the engine object. This method
589 // also verifies that the engine interface supports at least one interface.
590 // Note that, the test below is not a full test of the SLEngineItf object
591 // but only a simple sanity test to check that the global engine object is OK.
ValidateSLEngine(SLObjectItf engine_object)592 void ValidateSLEngine(SLObjectItf engine_object) {
593 EXPECT_NE(nullptr, engine_object);
594 // Get the SL Engine interface which is exposed by the engine object.
595 SLEngineItf engine;
596 SLresult result =
597 (*engine_object)->GetInterface(engine_object, SL_IID_ENGINE, &engine);
598 EXPECT_EQ(result, SL_RESULT_SUCCESS) << "GetInterface() on engine failed";
599 // Ensure that the SL Engine interface exposes at least one interface.
600 SLuint32 object_id = SL_OBJECTID_ENGINE;
601 SLuint32 num_supported_interfaces = 0;
602 result = (*engine)->QueryNumSupportedInterfaces(engine, object_id,
603 &num_supported_interfaces);
604 EXPECT_EQ(result, SL_RESULT_SUCCESS)
605 << "QueryNumSupportedInterfaces() failed";
606 EXPECT_GE(num_supported_interfaces, 1u);
607 }
608
609 // Volume control is currently only supported for the Java output audio layer.
610 // For OpenSL ES, the internal stream volume is always on max level and there
611 // is no need for this test to set it to max.
AudioLayerSupportsVolumeControl() const612 bool AudioLayerSupportsVolumeControl() const {
613 return GetActiveAudioLayer() == AudioDeviceModule::kAndroidJavaAudio;
614 }
615
SetMaxPlayoutVolume()616 void SetMaxPlayoutVolume() {
617 if (!AudioLayerSupportsVolumeControl())
618 return;
619 uint32_t max_volume;
620 EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
621 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
622 }
623
DisableBuiltInAECIfAvailable()624 void DisableBuiltInAECIfAvailable() {
625 if (audio_device()->BuiltInAECIsAvailable()) {
626 EXPECT_EQ(0, audio_device()->EnableBuiltInAEC(false));
627 }
628 }
629
StartPlayout()630 void StartPlayout() {
631 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
632 EXPECT_FALSE(audio_device()->Playing());
633 EXPECT_EQ(0, audio_device()->InitPlayout());
634 EXPECT_TRUE(audio_device()->PlayoutIsInitialized());
635 EXPECT_EQ(0, audio_device()->StartPlayout());
636 EXPECT_TRUE(audio_device()->Playing());
637 }
638
StopPlayout()639 void StopPlayout() {
640 EXPECT_EQ(0, audio_device()->StopPlayout());
641 EXPECT_FALSE(audio_device()->Playing());
642 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
643 }
644
StartRecording()645 void StartRecording() {
646 EXPECT_FALSE(audio_device()->RecordingIsInitialized());
647 EXPECT_FALSE(audio_device()->Recording());
648 EXPECT_EQ(0, audio_device()->InitRecording());
649 EXPECT_TRUE(audio_device()->RecordingIsInitialized());
650 EXPECT_EQ(0, audio_device()->StartRecording());
651 EXPECT_TRUE(audio_device()->Recording());
652 }
653
StopRecording()654 void StopRecording() {
655 EXPECT_EQ(0, audio_device()->StopRecording());
656 EXPECT_FALSE(audio_device()->Recording());
657 }
658
GetMaxSpeakerVolume() const659 int GetMaxSpeakerVolume() const {
660 uint32_t max_volume(0);
661 EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
662 return max_volume;
663 }
664
GetMinSpeakerVolume() const665 int GetMinSpeakerVolume() const {
666 uint32_t min_volume(0);
667 EXPECT_EQ(0, audio_device()->MinSpeakerVolume(&min_volume));
668 return min_volume;
669 }
670
GetSpeakerVolume() const671 int GetSpeakerVolume() const {
672 uint32_t volume(0);
673 EXPECT_EQ(0, audio_device()->SpeakerVolume(&volume));
674 return volume;
675 }
676
677 JNIEnv* jni_;
678 ScopedJavaLocalRef<jobject> context_;
679 rtc::Event test_is_done_;
680 rtc::scoped_refptr<AudioDeviceModule> audio_device_;
681 ScopedJavaLocalRef<jobject> audio_manager_;
682 AudioParameters output_parameters_;
683 AudioParameters input_parameters_;
684 };
685
TEST_F(AudioDeviceTest,ConstructDestruct)686 TEST_F(AudioDeviceTest, ConstructDestruct) {
687 // Using the test fixture to create and destruct the audio device module.
688 }
689
690 // Verify that it is possible to explicitly create the two types of supported
691 // ADMs. These two tests overrides the default selection of native audio layer
692 // by ignoring if the device supports low-latency output or not.
TEST_F(AudioDeviceTest,CorrectAudioLayerIsUsedForCombinedJavaOpenSLCombo)693 TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForCombinedJavaOpenSLCombo) {
694 AudioDeviceModule::AudioLayer expected_layer =
695 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
696 AudioDeviceModule::AudioLayer active_layer =
697 TestActiveAudioLayer(expected_layer);
698 EXPECT_EQ(expected_layer, active_layer);
699 }
700
TEST_F(AudioDeviceTest,CorrectAudioLayerIsUsedForJavaInBothDirections)701 TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForJavaInBothDirections) {
702 AudioDeviceModule::AudioLayer expected_layer =
703 AudioDeviceModule::kAndroidJavaAudio;
704 AudioDeviceModule::AudioLayer active_layer =
705 TestActiveAudioLayer(expected_layer);
706 EXPECT_EQ(expected_layer, active_layer);
707 }
708
TEST_F(AudioDeviceTest,CorrectAudioLayerIsUsedForOpenSLInBothDirections)709 TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForOpenSLInBothDirections) {
710 AudioDeviceModule::AudioLayer expected_layer =
711 AudioDeviceModule::kAndroidOpenSLESAudio;
712 AudioDeviceModule::AudioLayer active_layer =
713 TestActiveAudioLayer(expected_layer);
714 EXPECT_EQ(expected_layer, active_layer);
715 }
716
717 // TODO(bugs.webrtc.org/8914)
718 // TODO(phensman): Add test for AAudio/Java combination when this combination
719 // is supported.
720 #if !defined(WEBRTC_AUDIO_DEVICE_INCLUDE_ANDROID_AAUDIO)
721 #define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
722 DISABLED_CorrectAudioLayerIsUsedForAAudioInBothDirections
723 #else
724 #define MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections \
725 CorrectAudioLayerIsUsedForAAudioInBothDirections
726 #endif
TEST_F(AudioDeviceTest,MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections)727 TEST_F(AudioDeviceTest,
728 MAYBE_CorrectAudioLayerIsUsedForAAudioInBothDirections) {
729 AudioDeviceModule::AudioLayer expected_layer =
730 AudioDeviceModule::kAndroidAAudioAudio;
731 AudioDeviceModule::AudioLayer active_layer =
732 TestActiveAudioLayer(expected_layer);
733 EXPECT_EQ(expected_layer, active_layer);
734 }
735
736 // The Android ADM supports two different delay reporting modes. One for the
737 // low-latency output path (in combination with OpenSL ES), and one for the
738 // high-latency output path (Java backends in both directions). These two tests
739 // verifies that the audio device reports correct delay estimate given the
740 // selected audio layer. Note that, this delay estimate will only be utilized
741 // if the HW AEC is disabled.
742 // Delay should be 75 ms in high latency and 25 ms in low latency.
TEST_F(AudioDeviceTest,UsesCorrectDelayEstimateForHighLatencyOutputPath)743 TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForHighLatencyOutputPath) {
744 EXPECT_EQ(75, TestDelayOnAudioLayer(AudioDeviceModule::kAndroidJavaAudio));
745 }
746
TEST_F(AudioDeviceTest,UsesCorrectDelayEstimateForLowLatencyOutputPath)747 TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForLowLatencyOutputPath) {
748 EXPECT_EQ(25,
749 TestDelayOnAudioLayer(
750 AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio));
751 }
752
TEST_F(AudioDeviceTest,InitTerminate)753 TEST_F(AudioDeviceTest, InitTerminate) {
754 // Initialization is part of the test fixture.
755 EXPECT_TRUE(audio_device()->Initialized());
756 EXPECT_EQ(0, audio_device()->Terminate());
757 EXPECT_FALSE(audio_device()->Initialized());
758 }
759
TEST_F(AudioDeviceTest,Devices)760 TEST_F(AudioDeviceTest, Devices) {
761 // Device enumeration is not supported. Verify fixed values only.
762 EXPECT_EQ(1, audio_device()->PlayoutDevices());
763 EXPECT_EQ(1, audio_device()->RecordingDevices());
764 }
765
TEST_F(AudioDeviceTest,IsAcousticEchoCancelerSupported)766 TEST_F(AudioDeviceTest, IsAcousticEchoCancelerSupported) {
767 PRINT("%sAcoustic Echo Canceler support: %s\n", kTag,
768 audio_device()->BuiltInAECIsAvailable() ? "Yes" : "No");
769 }
770
TEST_F(AudioDeviceTest,IsNoiseSuppressorSupported)771 TEST_F(AudioDeviceTest, IsNoiseSuppressorSupported) {
772 PRINT("%sNoise Suppressor support: %s\n", kTag,
773 audio_device()->BuiltInNSIsAvailable() ? "Yes" : "No");
774 }
775
776 // Verify that playout side is configured for mono by default.
TEST_F(AudioDeviceTest,UsesMonoPlayoutByDefault)777 TEST_F(AudioDeviceTest, UsesMonoPlayoutByDefault) {
778 EXPECT_EQ(1u, output_parameters_.channels());
779 }
780
781 // Verify that recording side is configured for mono by default.
TEST_F(AudioDeviceTest,UsesMonoRecordingByDefault)782 TEST_F(AudioDeviceTest, UsesMonoRecordingByDefault) {
783 EXPECT_EQ(1u, input_parameters_.channels());
784 }
785
TEST_F(AudioDeviceTest,SpeakerVolumeShouldBeAvailable)786 TEST_F(AudioDeviceTest, SpeakerVolumeShouldBeAvailable) {
787 // The OpenSL ES output audio path does not support volume control.
788 if (!AudioLayerSupportsVolumeControl())
789 return;
790 bool available;
791 EXPECT_EQ(0, audio_device()->SpeakerVolumeIsAvailable(&available));
792 EXPECT_TRUE(available);
793 }
794
TEST_F(AudioDeviceTest,MaxSpeakerVolumeIsPositive)795 TEST_F(AudioDeviceTest, MaxSpeakerVolumeIsPositive) {
796 // The OpenSL ES output audio path does not support volume control.
797 if (!AudioLayerSupportsVolumeControl())
798 return;
799 StartPlayout();
800 EXPECT_GT(GetMaxSpeakerVolume(), 0);
801 StopPlayout();
802 }
803
TEST_F(AudioDeviceTest,MinSpeakerVolumeIsZero)804 TEST_F(AudioDeviceTest, MinSpeakerVolumeIsZero) {
805 // The OpenSL ES output audio path does not support volume control.
806 if (!AudioLayerSupportsVolumeControl())
807 return;
808 EXPECT_EQ(GetMinSpeakerVolume(), 0);
809 }
810
TEST_F(AudioDeviceTest,DefaultSpeakerVolumeIsWithinMinMax)811 TEST_F(AudioDeviceTest, DefaultSpeakerVolumeIsWithinMinMax) {
812 // The OpenSL ES output audio path does not support volume control.
813 if (!AudioLayerSupportsVolumeControl())
814 return;
815 const int default_volume = GetSpeakerVolume();
816 EXPECT_GE(default_volume, GetMinSpeakerVolume());
817 EXPECT_LE(default_volume, GetMaxSpeakerVolume());
818 }
819
TEST_F(AudioDeviceTest,SetSpeakerVolumeActuallySetsVolume)820 TEST_F(AudioDeviceTest, SetSpeakerVolumeActuallySetsVolume) {
821 // The OpenSL ES output audio path does not support volume control.
822 if (!AudioLayerSupportsVolumeControl())
823 return;
824 const int default_volume = GetSpeakerVolume();
825 const int max_volume = GetMaxSpeakerVolume();
826 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
827 int new_volume = GetSpeakerVolume();
828 EXPECT_EQ(new_volume, max_volume);
829 EXPECT_EQ(0, audio_device()->SetSpeakerVolume(default_volume));
830 }
831
832 // Tests that playout can be initiated, started and stopped. No audio callback
833 // is registered in this test.
TEST_F(AudioDeviceTest,StartStopPlayout)834 TEST_F(AudioDeviceTest, StartStopPlayout) {
835 StartPlayout();
836 StopPlayout();
837 StartPlayout();
838 StopPlayout();
839 }
840
841 // Tests that recording can be initiated, started and stopped. No audio callback
842 // is registered in this test.
TEST_F(AudioDeviceTest,StartStopRecording)843 TEST_F(AudioDeviceTest, StartStopRecording) {
844 StartRecording();
845 StopRecording();
846 StartRecording();
847 StopRecording();
848 }
849
850 // Verify that calling StopPlayout() will leave us in an uninitialized state
851 // which will require a new call to InitPlayout(). This test does not call
852 // StartPlayout() while being uninitialized since doing so will hit a
853 // RTC_DCHECK and death tests are not supported on Android.
TEST_F(AudioDeviceTest,StopPlayoutRequiresInitToRestart)854 TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
855 EXPECT_EQ(0, audio_device()->InitPlayout());
856 EXPECT_EQ(0, audio_device()->StartPlayout());
857 EXPECT_EQ(0, audio_device()->StopPlayout());
858 EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
859 }
860
861 // Verify that calling StopRecording() will leave us in an uninitialized state
862 // which will require a new call to InitRecording(). This test does not call
863 // StartRecording() while being uninitialized since doing so will hit a
864 // RTC_DCHECK and death tests are not supported on Android.
TEST_F(AudioDeviceTest,StopRecordingRequiresInitToRestart)865 TEST_F(AudioDeviceTest, StopRecordingRequiresInitToRestart) {
866 EXPECT_EQ(0, audio_device()->InitRecording());
867 EXPECT_EQ(0, audio_device()->StartRecording());
868 EXPECT_EQ(0, audio_device()->StopRecording());
869 EXPECT_FALSE(audio_device()->RecordingIsInitialized());
870 }
871
872 // Start playout and verify that the native audio layer starts asking for real
873 // audio samples to play out using the NeedMorePlayData callback.
TEST_F(AudioDeviceTest,StartPlayoutVerifyCallbacks)874 TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
875 MockAudioTransportAndroid mock(kPlayout);
876 mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
877 EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
878 kBytesPerSample, playout_channels(),
879 playout_sample_rate(), NotNull(), _, _, _))
880 .Times(AtLeast(kNumCallbacks));
881 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
882 StartPlayout();
883 test_is_done_.Wait(kTestTimeOut);
884 StopPlayout();
885 }
886
887 // Start recording and verify that the native audio layer starts feeding real
888 // audio samples via the RecordedDataIsAvailable callback.
TEST_F(AudioDeviceTest,StartRecordingVerifyCallbacks)889 TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
890 MockAudioTransportAndroid mock(kRecording);
891 mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
892 EXPECT_CALL(
893 mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
894 kBytesPerSample, record_channels(),
895 record_sample_rate(), _, 0, 0, false, _, _))
896 .Times(AtLeast(kNumCallbacks));
897
898 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
899 StartRecording();
900 test_is_done_.Wait(kTestTimeOut);
901 StopRecording();
902 }
903
904 // Start playout and recording (full-duplex audio) and verify that audio is
905 // active in both directions.
TEST_F(AudioDeviceTest,StartPlayoutAndRecordingVerifyCallbacks)906 TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
907 MockAudioTransportAndroid mock(kPlayout | kRecording);
908 mock.HandleCallbacks(&test_is_done_, nullptr, kNumCallbacks);
909 EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
910 kBytesPerSample, playout_channels(),
911 playout_sample_rate(), NotNull(), _, _, _))
912 .Times(AtLeast(kNumCallbacks));
913 EXPECT_CALL(
914 mock, RecordedDataIsAvailable(NotNull(), record_frames_per_10ms_buffer(),
915 kBytesPerSample, record_channels(),
916 record_sample_rate(), _, 0, 0, false, _, _))
917 .Times(AtLeast(kNumCallbacks));
918 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
919 StartPlayout();
920 StartRecording();
921 test_is_done_.Wait(kTestTimeOut);
922 StopRecording();
923 StopPlayout();
924 }
925
926 // Start playout and read audio from an external PCM file when the audio layer
927 // asks for data to play out. Real audio is played out in this test but it does
928 // not contain any explicit verification that the audio quality is perfect.
TEST_F(AudioDeviceTest,RunPlayoutWithFileAsSource)929 TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
930 // TODO(henrika): extend test when mono output is supported.
931 EXPECT_EQ(1u, playout_channels());
932 NiceMock<MockAudioTransportAndroid> mock(kPlayout);
933 const int num_callbacks = kFilePlayTimeInSec * kNumCallbacksPerSecond;
934 std::string file_name = GetFileName(playout_sample_rate());
935 std::unique_ptr<FileAudioStream> file_audio_stream(
936 new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
937 mock.HandleCallbacks(&test_is_done_, file_audio_stream.get(), num_callbacks);
938 // SetMaxPlayoutVolume();
939 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
940 StartPlayout();
941 test_is_done_.Wait(kTestTimeOut);
942 StopPlayout();
943 }
944
945 // It should be possible to create an OpenSL engine object if OpenSL ES based
946 // audio is requested in any direction.
TEST_F(AudioDeviceTest,TestCreateOpenSLEngine)947 TEST_F(AudioDeviceTest, TestCreateOpenSLEngine) {
948 // Verify that the global (singleton) OpenSL Engine can be acquired.
949 OpenSLEngineManager engine_manager;
950 SLObjectItf engine_object = engine_manager.GetOpenSLEngine();
951 EXPECT_NE(nullptr, engine_object);
952 // Perform a simple sanity check of the created engine object.
953 ValidateSLEngine(engine_object);
954 }
955
956 // The audio device module only suppors the same sample rate in both directions.
957 // In addition, in full-duplex low-latency mode (OpenSL ES), both input and
958 // output must use the same native buffer size to allow for usage of the fast
959 // audio track in Android.
TEST_F(AudioDeviceTest,VerifyAudioParameters)960 TEST_F(AudioDeviceTest, VerifyAudioParameters) {
961 EXPECT_EQ(output_parameters_.sample_rate(), input_parameters_.sample_rate());
962 SetActiveAudioLayer(AudioDeviceModule::kAndroidOpenSLESAudio);
963 EXPECT_EQ(output_parameters_.frames_per_buffer(),
964 input_parameters_.frames_per_buffer());
965 }
966
TEST_F(AudioDeviceTest,ShowAudioParameterInfo)967 TEST_F(AudioDeviceTest, ShowAudioParameterInfo) {
968 const bool low_latency_out = false;
969 const bool low_latency_in = false;
970 PRINT("PLAYOUT:\n");
971 PRINT("%saudio layer: %s\n", kTag,
972 low_latency_out ? "Low latency OpenSL" : "Java/JNI based AudioTrack");
973 PRINT("%ssample rate: %d Hz\n", kTag, output_parameters_.sample_rate());
974 PRINT("%schannels: %zu\n", kTag, output_parameters_.channels());
975 PRINT("%sframes per buffer: %zu <=> %.2f ms\n", kTag,
976 output_parameters_.frames_per_buffer(),
977 output_parameters_.GetBufferSizeInMilliseconds());
978 PRINT("RECORD: \n");
979 PRINT("%saudio layer: %s\n", kTag,
980 low_latency_in ? "Low latency OpenSL" : "Java/JNI based AudioRecord");
981 PRINT("%ssample rate: %d Hz\n", kTag, input_parameters_.sample_rate());
982 PRINT("%schannels: %zu\n", kTag, input_parameters_.channels());
983 PRINT("%sframes per buffer: %zu <=> %.2f ms\n", kTag,
984 input_parameters_.frames_per_buffer(),
985 input_parameters_.GetBufferSizeInMilliseconds());
986 }
987
988 // Add device-specific information to the test for logging purposes.
TEST_F(AudioDeviceTest,ShowDeviceInfo)989 TEST_F(AudioDeviceTest, ShowDeviceInfo) {
990 std::string model =
991 JavaToNativeString(jni_, Java_BuildInfo_getDeviceModel(jni_));
992 std::string brand = JavaToNativeString(jni_, Java_BuildInfo_getBrand(jni_));
993 std::string manufacturer =
994 JavaToNativeString(jni_, Java_BuildInfo_getDeviceManufacturer(jni_));
995
996 PRINT("%smodel: %s\n", kTag, model.c_str());
997 PRINT("%sbrand: %s\n", kTag, brand.c_str());
998 PRINT("%smanufacturer: %s\n", kTag, manufacturer.c_str());
999 }
1000
1001 // Add Android build information to the test for logging purposes.
TEST_F(AudioDeviceTest,ShowBuildInfo)1002 TEST_F(AudioDeviceTest, ShowBuildInfo) {
1003 std::string release =
1004 JavaToNativeString(jni_, Java_BuildInfo_getBuildRelease(jni_));
1005 std::string build_id =
1006 JavaToNativeString(jni_, Java_BuildInfo_getAndroidBuildId(jni_));
1007 std::string build_type =
1008 JavaToNativeString(jni_, Java_BuildInfo_getBuildType(jni_));
1009 int sdk = Java_BuildInfo_getSdkVersion(jni_);
1010
1011 PRINT("%sbuild release: %s\n", kTag, release.c_str());
1012 PRINT("%sbuild id: %s\n", kTag, build_id.c_str());
1013 PRINT("%sbuild type: %s\n", kTag, build_type.c_str());
1014 PRINT("%sSDK version: %d\n", kTag, sdk);
1015 }
1016
1017 // Basic test of the AudioParameters class using default construction where
1018 // all members are set to zero.
TEST_F(AudioDeviceTest,AudioParametersWithDefaultConstruction)1019 TEST_F(AudioDeviceTest, AudioParametersWithDefaultConstruction) {
1020 AudioParameters params;
1021 EXPECT_FALSE(params.is_valid());
1022 EXPECT_EQ(0, params.sample_rate());
1023 EXPECT_EQ(0U, params.channels());
1024 EXPECT_EQ(0U, params.frames_per_buffer());
1025 EXPECT_EQ(0U, params.frames_per_10ms_buffer());
1026 EXPECT_EQ(0U, params.GetBytesPerFrame());
1027 EXPECT_EQ(0U, params.GetBytesPerBuffer());
1028 EXPECT_EQ(0U, params.GetBytesPer10msBuffer());
1029 EXPECT_EQ(0.0f, params.GetBufferSizeInMilliseconds());
1030 }
1031
1032 // Basic test of the AudioParameters class using non default construction.
TEST_F(AudioDeviceTest,AudioParametersWithNonDefaultConstruction)1033 TEST_F(AudioDeviceTest, AudioParametersWithNonDefaultConstruction) {
1034 const int kSampleRate = 48000;
1035 const size_t kChannels = 1;
1036 const size_t kFramesPerBuffer = 480;
1037 const size_t kFramesPer10msBuffer = 480;
1038 const size_t kBytesPerFrame = 2;
1039 const float kBufferSizeInMs = 10.0f;
1040 AudioParameters params(kSampleRate, kChannels, kFramesPerBuffer);
1041 EXPECT_TRUE(params.is_valid());
1042 EXPECT_EQ(kSampleRate, params.sample_rate());
1043 EXPECT_EQ(kChannels, params.channels());
1044 EXPECT_EQ(kFramesPerBuffer, params.frames_per_buffer());
1045 EXPECT_EQ(static_cast<size_t>(kSampleRate / 100),
1046 params.frames_per_10ms_buffer());
1047 EXPECT_EQ(kBytesPerFrame, params.GetBytesPerFrame());
1048 EXPECT_EQ(kBytesPerFrame * kFramesPerBuffer, params.GetBytesPerBuffer());
1049 EXPECT_EQ(kBytesPerFrame * kFramesPer10msBuffer,
1050 params.GetBytesPer10msBuffer());
1051 EXPECT_EQ(kBufferSizeInMs, params.GetBufferSizeInMilliseconds());
1052 }
1053
1054 // Start playout and recording and store recorded data in an intermediate FIFO
1055 // buffer from which the playout side then reads its samples in the same order
1056 // as they were stored. Under ideal circumstances, a callback sequence would
1057 // look like: ...+-+-+-+-+-+-+-..., where '+' means 'packet recorded' and '-'
1058 // means 'packet played'. Under such conditions, the FIFO would only contain
1059 // one packet on average. However, under more realistic conditions, the size
1060 // of the FIFO will vary more due to an unbalance between the two sides.
1061 // This test tries to verify that the device maintains a balanced callback-
1062 // sequence by running in loopback for kFullDuplexTime seconds while
1063 // measuring the size (max and average) of the FIFO. The size of the FIFO is
1064 // increased by the recording side and decreased by the playout side.
1065 // TODO(henrika): tune the final test parameters after running tests on several
1066 // different devices.
1067 // Disabling this test on bots since it is difficult to come up with a robust
1068 // test condition that all worked as intended. The main issue is that, when
1069 // swarming is used, an initial latency can be built up when the both sides
1070 // starts at different times. Hence, the test can fail even if audio works
1071 // as intended. Keeping the test so it can be enabled manually.
1072 // http://bugs.webrtc.org/7744
TEST_F(AudioDeviceTest,DISABLED_RunPlayoutAndRecordingInFullDuplex)1073 TEST_F(AudioDeviceTest, DISABLED_RunPlayoutAndRecordingInFullDuplex) {
1074 EXPECT_EQ(record_channels(), playout_channels());
1075 EXPECT_EQ(record_sample_rate(), playout_sample_rate());
1076 NiceMock<MockAudioTransportAndroid> mock(kPlayout | kRecording);
1077 std::unique_ptr<FifoAudioStream> fifo_audio_stream(
1078 new FifoAudioStream(playout_frames_per_10ms_buffer()));
1079 mock.HandleCallbacks(&test_is_done_, fifo_audio_stream.get(),
1080 kFullDuplexTime.seconds() * kNumCallbacksPerSecond);
1081 SetMaxPlayoutVolume();
1082 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
1083 StartRecording();
1084 StartPlayout();
1085 test_is_done_.Wait(std::max(kTestTimeOut, kFullDuplexTime));
1086 StopPlayout();
1087 StopRecording();
1088
1089 // These thresholds are set rather high to accomodate differences in hardware
1090 // in several devices, so this test can be used in swarming.
1091 // See http://bugs.webrtc.org/6464
1092 EXPECT_LE(fifo_audio_stream->average_size(), 60u);
1093 EXPECT_LE(fifo_audio_stream->largest_size(), 70u);
1094 }
1095
1096 // Measures loopback latency and reports the min, max and average values for
1097 // a full duplex audio session.
1098 // The latency is measured like so:
1099 // - Insert impulses periodically on the output side.
1100 // - Detect the impulses on the input side.
1101 // - Measure the time difference between the transmit time and receive time.
1102 // - Store time differences in a vector and calculate min, max and average.
1103 // This test requires a special hardware called Audio Loopback Dongle.
1104 // See http://source.android.com/devices/audio/loopback.html for details.
TEST_F(AudioDeviceTest,DISABLED_MeasureLoopbackLatency)1105 TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
1106 EXPECT_EQ(record_channels(), playout_channels());
1107 EXPECT_EQ(record_sample_rate(), playout_sample_rate());
1108 NiceMock<MockAudioTransportAndroid> mock(kPlayout | kRecording);
1109 std::unique_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
1110 new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
1111 mock.HandleCallbacks(&test_is_done_, latency_audio_stream.get(),
1112 kMeasureLatencyTime.seconds() * kNumCallbacksPerSecond);
1113 EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
1114 SetMaxPlayoutVolume();
1115 DisableBuiltInAECIfAvailable();
1116 StartRecording();
1117 StartPlayout();
1118 test_is_done_.Wait(std::max(kTestTimeOut, kMeasureLatencyTime));
1119 StopPlayout();
1120 StopRecording();
1121 // Verify that the correct number of transmitted impulses are detected.
1122 EXPECT_EQ(latency_audio_stream->num_latency_values(),
1123 static_cast<size_t>(
1124 kImpulseFrequencyInHz * kMeasureLatencyTime.seconds() - 1));
1125 latency_audio_stream->PrintResults();
1126 }
1127
TEST(JavaAudioDeviceTest,TestRunningTwoAdmsSimultaneously)1128 TEST(JavaAudioDeviceTest, TestRunningTwoAdmsSimultaneously) {
1129 JNIEnv* jni = AttachCurrentThreadIfNeeded();
1130 ScopedJavaLocalRef<jobject> context = test::GetAppContextForTest(jni);
1131
1132 // Create and start the first ADM.
1133 rtc::scoped_refptr<AudioDeviceModule> adm_1 =
1134 CreateJavaAudioDeviceModule(jni, context.obj());
1135 EXPECT_EQ(0, adm_1->Init());
1136 EXPECT_EQ(0, adm_1->InitRecording());
1137 EXPECT_EQ(0, adm_1->StartRecording());
1138
1139 // Create and start a second ADM. Expect this to fail due to the microphone
1140 // already being in use.
1141 rtc::scoped_refptr<AudioDeviceModule> adm_2 =
1142 CreateJavaAudioDeviceModule(jni, context.obj());
1143 int32_t err = adm_2->Init();
1144 err |= adm_2->InitRecording();
1145 err |= adm_2->StartRecording();
1146 EXPECT_NE(0, err);
1147
1148 // Stop and terminate second adm.
1149 adm_2->StopRecording();
1150 adm_2->Terminate();
1151
1152 // Stop first ADM.
1153 EXPECT_EQ(0, adm_1->StopRecording());
1154 EXPECT_EQ(0, adm_1->Terminate());
1155 }
1156
1157 } // namespace jni
1158
1159 } // namespace webrtc
1160