1/* 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11#import <AVFoundation/AVFoundation.h> 12#import <Foundation/Foundation.h> 13 14#include "audio_device_ios.h" 15 16#include <cmath> 17 18#include "api/array_view.h" 19#include "api/task_queue/pending_task_safety_flag.h" 20#include "helpers.h" 21#include "modules/audio_device/fine_audio_buffer.h" 22#include "rtc_base/checks.h" 23#include "rtc_base/logging.h" 24#include "rtc_base/thread.h" 25#include "rtc_base/thread_annotations.h" 26#include "rtc_base/time_utils.h" 27#include "system_wrappers/include/field_trial.h" 28#include "system_wrappers/include/metrics.h" 29 30#import "base/RTCLogging.h" 31#import "components/audio/RTCAudioSession+Private.h" 32#import "components/audio/RTCAudioSession.h" 33#import "components/audio/RTCAudioSessionConfiguration.h" 34#import "components/audio/RTCNativeAudioSessionDelegateAdapter.h" 35 36namespace webrtc { 37namespace ios_adm { 38 39#define LOGI() RTC_LOG(LS_INFO) << "AudioDeviceIOS::" 40 41#define LOG_AND_RETURN_IF_ERROR(error, message) \ 42 do { \ 43 OSStatus err = error; \ 44 if (err) { \ 45 RTC_LOG(LS_ERROR) << message << ": " << err; \ 46 return false; \ 47 } \ 48 } while (0) 49 50#define LOG_IF_ERROR(error, message) \ 51 do { \ 52 OSStatus err = error; \ 53 if (err) { \ 54 RTC_LOG(LS_ERROR) << message << ": " << err; \ 55 } \ 56 } while (0) 57 58// Hardcoded delay estimates based on real measurements. 59// TODO(henrika): these value is not used in combination with built-in AEC. 60// Can most likely be removed. 61const UInt16 kFixedPlayoutDelayEstimate = 30; 62const UInt16 kFixedRecordDelayEstimate = 30; 63 64using ios::CheckAndLogError; 65 66#if !defined(NDEBUG) 67// Returns true when the code runs on a device simulator. 68static bool DeviceIsSimulator() { 69 return ios::GetDeviceName() == "x86_64"; 70} 71 72// Helper method that logs essential device information strings. 73static void LogDeviceInfo() { 74 RTC_LOG(LS_INFO) << "LogDeviceInfo"; 75 @autoreleasepool { 76 RTC_LOG(LS_INFO) << " system name: " << ios::GetSystemName(); 77 RTC_LOG(LS_INFO) << " system version: " << ios::GetSystemVersionAsString(); 78 RTC_LOG(LS_INFO) << " device type: " << ios::GetDeviceType(); 79 RTC_LOG(LS_INFO) << " device name: " << ios::GetDeviceName(); 80 RTC_LOG(LS_INFO) << " process name: " << ios::GetProcessName(); 81 RTC_LOG(LS_INFO) << " process ID: " << ios::GetProcessID(); 82 RTC_LOG(LS_INFO) << " OS version: " << ios::GetOSVersionString(); 83 RTC_LOG(LS_INFO) << " processing cores: " << ios::GetProcessorCount(); 84 RTC_LOG(LS_INFO) << " low power mode: " << ios::GetLowPowerModeEnabled(); 85#if TARGET_IPHONE_SIMULATOR 86 RTC_LOG(LS_INFO) << " TARGET_IPHONE_SIMULATOR is defined"; 87#endif 88 RTC_LOG(LS_INFO) << " DeviceIsSimulator: " << DeviceIsSimulator(); 89 } 90} 91#endif // !defined(NDEBUG) 92 93AudioDeviceIOS::AudioDeviceIOS(bool bypass_voice_processing) 94 : bypass_voice_processing_(bypass_voice_processing), 95 audio_device_buffer_(nullptr), 96 audio_unit_(nullptr), 97 recording_(0), 98 playing_(0), 99 initialized_(false), 100 audio_is_initialized_(false), 101 is_interrupted_(false), 102 has_configured_session_(false), 103 num_detected_playout_glitches_(0), 104 last_playout_time_(0), 105 num_playout_callbacks_(0), 106 last_output_volume_change_time_(0) { 107 LOGI() << "ctor" << ios::GetCurrentThreadDescription() 108 << ",bypass_voice_processing=" << bypass_voice_processing_; 109 io_thread_checker_.Detach(); 110 thread_ = rtc::Thread::Current(); 111 112 audio_session_observer_ = [[RTCNativeAudioSessionDelegateAdapter alloc] initWithObserver:this]; 113} 114 115AudioDeviceIOS::~AudioDeviceIOS() { 116 RTC_DCHECK_RUN_ON(thread_); 117 LOGI() << "~dtor" << ios::GetCurrentThreadDescription(); 118 safety_->SetNotAlive(); 119 Terminate(); 120 audio_session_observer_ = nil; 121} 122 123void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { 124 LOGI() << "AttachAudioBuffer"; 125 RTC_DCHECK(audioBuffer); 126 RTC_DCHECK_RUN_ON(thread_); 127 audio_device_buffer_ = audioBuffer; 128} 129 130AudioDeviceGeneric::InitStatus AudioDeviceIOS::Init() { 131 LOGI() << "Init"; 132 io_thread_checker_.Detach(); 133 134 RTC_DCHECK_RUN_ON(thread_); 135 if (initialized_) { 136 return InitStatus::OK; 137 } 138#if !defined(NDEBUG) 139 LogDeviceInfo(); 140#endif 141 // Store the preferred sample rate and preferred number of channels already 142 // here. They have not been set and confirmed yet since configureForWebRTC 143 // is not called until audio is about to start. However, it makes sense to 144 // store the parameters now and then verify at a later stage. 145 RTC_OBJC_TYPE(RTCAudioSessionConfiguration)* config = 146 [RTC_OBJC_TYPE(RTCAudioSessionConfiguration) webRTCConfiguration]; 147 playout_parameters_.reset(config.sampleRate, config.outputNumberOfChannels); 148 record_parameters_.reset(config.sampleRate, config.inputNumberOfChannels); 149 // Ensure that the audio device buffer (ADB) knows about the internal audio 150 // parameters. Note that, even if we are unable to get a mono audio session, 151 // we will always tell the I/O audio unit to do a channel format conversion 152 // to guarantee mono on the "input side" of the audio unit. 153 UpdateAudioDeviceBuffer(); 154 initialized_ = true; 155 return InitStatus::OK; 156} 157 158int32_t AudioDeviceIOS::Terminate() { 159 LOGI() << "Terminate"; 160 RTC_DCHECK_RUN_ON(thread_); 161 if (!initialized_) { 162 return 0; 163 } 164 StopPlayout(); 165 StopRecording(); 166 initialized_ = false; 167 return 0; 168} 169 170bool AudioDeviceIOS::Initialized() const { 171 RTC_DCHECK_RUN_ON(thread_); 172 return initialized_; 173} 174 175int32_t AudioDeviceIOS::InitPlayout() { 176 LOGI() << "InitPlayout"; 177 RTC_DCHECK_RUN_ON(thread_); 178 RTC_DCHECK(initialized_); 179 RTC_DCHECK(!audio_is_initialized_); 180 RTC_DCHECK(!playing_.load()); 181 if (!audio_is_initialized_) { 182 if (!InitPlayOrRecord()) { 183 RTC_LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitPlayout!"; 184 return -1; 185 } 186 } 187 audio_is_initialized_ = true; 188 return 0; 189} 190 191bool AudioDeviceIOS::PlayoutIsInitialized() const { 192 RTC_DCHECK_RUN_ON(thread_); 193 return audio_is_initialized_; 194} 195 196bool AudioDeviceIOS::RecordingIsInitialized() const { 197 RTC_DCHECK_RUN_ON(thread_); 198 return audio_is_initialized_; 199} 200 201int32_t AudioDeviceIOS::InitRecording() { 202 LOGI() << "InitRecording"; 203 RTC_DCHECK_RUN_ON(thread_); 204 RTC_DCHECK(initialized_); 205 RTC_DCHECK(!audio_is_initialized_); 206 RTC_DCHECK(!recording_.load()); 207 if (!audio_is_initialized_) { 208 if (!InitPlayOrRecord()) { 209 RTC_LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitRecording!"; 210 return -1; 211 } 212 } 213 audio_is_initialized_ = true; 214 return 0; 215} 216 217int32_t AudioDeviceIOS::StartPlayout() { 218 LOGI() << "StartPlayout"; 219 RTC_DCHECK_RUN_ON(thread_); 220 RTC_DCHECK(audio_is_initialized_); 221 RTC_DCHECK(!playing_.load()); 222 RTC_DCHECK(audio_unit_); 223 if (fine_audio_buffer_) { 224 fine_audio_buffer_->ResetPlayout(); 225 } 226 if (!recording_.load() && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) { 227 OSStatus result = audio_unit_->Start(); 228 if (result != noErr) { 229 RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; 230 [session notifyAudioUnitStartFailedWithError:result]; 231 RTCLogError(@"StartPlayout failed to start audio unit, reason %d", result); 232 return -1; 233 } 234 RTC_LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started"; 235 } 236 playing_.store(1, std::memory_order_release); 237 num_playout_callbacks_ = 0; 238 num_detected_playout_glitches_ = 0; 239 return 0; 240} 241 242int32_t AudioDeviceIOS::StopPlayout() { 243 LOGI() << "StopPlayout"; 244 RTC_DCHECK_RUN_ON(thread_); 245 if (!audio_is_initialized_ || !playing_.load()) { 246 return 0; 247 } 248 if (!recording_.load()) { 249 ShutdownPlayOrRecord(); 250 audio_is_initialized_ = false; 251 } 252 playing_.store(0, std::memory_order_release); 253 254 // Derive average number of calls to OnGetPlayoutData() between detected 255 // audio glitches and add the result to a histogram. 256 int average_number_of_playout_callbacks_between_glitches = 100000; 257 RTC_DCHECK_GE(num_playout_callbacks_, num_detected_playout_glitches_); 258 if (num_detected_playout_glitches_ > 0) { 259 average_number_of_playout_callbacks_between_glitches = 260 num_playout_callbacks_ / num_detected_playout_glitches_; 261 } 262 RTC_HISTOGRAM_COUNTS_100000("WebRTC.Audio.AveragePlayoutCallbacksBetweenGlitches", 263 average_number_of_playout_callbacks_between_glitches); 264 RTCLog(@"Average number of playout callbacks between glitches: %d", 265 average_number_of_playout_callbacks_between_glitches); 266 return 0; 267} 268 269bool AudioDeviceIOS::Playing() const { 270 return playing_.load(); 271} 272 273int32_t AudioDeviceIOS::StartRecording() { 274 LOGI() << "StartRecording"; 275 RTC_DCHECK_RUN_ON(thread_); 276 RTC_DCHECK(audio_is_initialized_); 277 RTC_DCHECK(!recording_.load()); 278 RTC_DCHECK(audio_unit_); 279 if (fine_audio_buffer_) { 280 fine_audio_buffer_->ResetRecord(); 281 } 282 if (!playing_.load() && audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) { 283 OSStatus result = audio_unit_->Start(); 284 if (result != noErr) { 285 RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; 286 [session notifyAudioUnitStartFailedWithError:result]; 287 RTCLogError(@"StartRecording failed to start audio unit, reason %d", result); 288 return -1; 289 } 290 RTC_LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started"; 291 } 292 recording_.store(1, std::memory_order_release); 293 return 0; 294} 295 296int32_t AudioDeviceIOS::StopRecording() { 297 LOGI() << "StopRecording"; 298 RTC_DCHECK_RUN_ON(thread_); 299 if (!audio_is_initialized_ || !recording_.load()) { 300 return 0; 301 } 302 if (!playing_.load()) { 303 ShutdownPlayOrRecord(); 304 audio_is_initialized_ = false; 305 } 306 recording_.store(0, std::memory_order_release); 307 return 0; 308} 309 310bool AudioDeviceIOS::Recording() const { 311 return recording_.load(); 312} 313 314int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const { 315 delayMS = kFixedPlayoutDelayEstimate; 316 return 0; 317} 318 319int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const { 320 LOGI() << "GetPlayoutAudioParameters"; 321 RTC_DCHECK(playout_parameters_.is_valid()); 322 RTC_DCHECK_RUN_ON(thread_); 323 *params = playout_parameters_; 324 return 0; 325} 326 327int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const { 328 LOGI() << "GetRecordAudioParameters"; 329 RTC_DCHECK(record_parameters_.is_valid()); 330 RTC_DCHECK_RUN_ON(thread_); 331 *params = record_parameters_; 332 return 0; 333} 334 335void AudioDeviceIOS::OnInterruptionBegin() { 336 RTC_DCHECK(thread_); 337 LOGI() << "OnInterruptionBegin"; 338 thread_->PostTask(SafeTask(safety_, [this] { HandleInterruptionBegin(); })); 339} 340 341void AudioDeviceIOS::OnInterruptionEnd() { 342 RTC_DCHECK(thread_); 343 LOGI() << "OnInterruptionEnd"; 344 thread_->PostTask(SafeTask(safety_, [this] { HandleInterruptionEnd(); })); 345} 346 347void AudioDeviceIOS::OnValidRouteChange() { 348 RTC_DCHECK(thread_); 349 thread_->PostTask(SafeTask(safety_, [this] { HandleValidRouteChange(); })); 350} 351 352void AudioDeviceIOS::OnCanPlayOrRecordChange(bool can_play_or_record) { 353 RTC_DCHECK(thread_); 354 thread_->PostTask(SafeTask( 355 safety_, [this, can_play_or_record] { HandleCanPlayOrRecordChange(can_play_or_record); })); 356} 357 358void AudioDeviceIOS::OnChangedOutputVolume() { 359 RTC_DCHECK(thread_); 360 thread_->PostTask(SafeTask(safety_, [this] { HandleOutputVolumeChange(); })); 361} 362 363OSStatus AudioDeviceIOS::OnDeliverRecordedData(AudioUnitRenderActionFlags* flags, 364 const AudioTimeStamp* time_stamp, 365 UInt32 bus_number, 366 UInt32 num_frames, 367 AudioBufferList* /* io_data */) { 368 RTC_DCHECK_RUN_ON(&io_thread_checker_); 369 OSStatus result = noErr; 370 // Simply return if recording is not enabled. 371 if (!recording_.load(std::memory_order_acquire)) return result; 372 373 // Set the size of our own audio buffer and clear it first to avoid copying 374 // in combination with potential reallocations. 375 // On real iOS devices, the size will only be set once (at first callback). 376 record_audio_buffer_.Clear(); 377 record_audio_buffer_.SetSize(num_frames); 378 379 // Allocate AudioBuffers to be used as storage for the received audio. 380 // The AudioBufferList structure works as a placeholder for the 381 // AudioBuffer structure, which holds a pointer to the actual data buffer 382 // in `record_audio_buffer_`. Recorded audio will be rendered into this memory 383 // at each input callback when calling AudioUnitRender(). 384 AudioBufferList audio_buffer_list; 385 audio_buffer_list.mNumberBuffers = 1; 386 AudioBuffer* audio_buffer = &audio_buffer_list.mBuffers[0]; 387 audio_buffer->mNumberChannels = record_parameters_.channels(); 388 audio_buffer->mDataByteSize = 389 record_audio_buffer_.size() * VoiceProcessingAudioUnit::kBytesPerSample; 390 audio_buffer->mData = reinterpret_cast<int8_t*>(record_audio_buffer_.data()); 391 392 // Obtain the recorded audio samples by initiating a rendering cycle. 393 // Since it happens on the input bus, the `io_data` parameter is a reference 394 // to the preallocated audio buffer list that the audio unit renders into. 395 // We can make the audio unit provide a buffer instead in io_data, but we 396 // currently just use our own. 397 // TODO(henrika): should error handling be improved? 398 result = audio_unit_->Render(flags, time_stamp, bus_number, num_frames, &audio_buffer_list); 399 if (result != noErr) { 400 RTCLogError(@"Failed to render audio."); 401 return result; 402 } 403 404 // Get a pointer to the recorded audio and send it to the WebRTC ADB. 405 // Use the FineAudioBuffer instance to convert between native buffer size 406 // and the 10ms buffer size used by WebRTC. 407 fine_audio_buffer_->DeliverRecordedData(record_audio_buffer_, kFixedRecordDelayEstimate); 408 return noErr; 409} 410 411OSStatus AudioDeviceIOS::OnGetPlayoutData(AudioUnitRenderActionFlags* flags, 412 const AudioTimeStamp* time_stamp, 413 UInt32 bus_number, 414 UInt32 num_frames, 415 AudioBufferList* io_data) { 416 RTC_DCHECK_RUN_ON(&io_thread_checker_); 417 // Verify 16-bit, noninterleaved mono PCM signal format. 418 RTC_DCHECK_EQ(1, io_data->mNumberBuffers); 419 AudioBuffer* audio_buffer = &io_data->mBuffers[0]; 420 RTC_DCHECK_EQ(1, audio_buffer->mNumberChannels); 421 422 // Produce silence and give audio unit a hint about it if playout is not 423 // activated. 424 if (!playing_.load(std::memory_order_acquire)) { 425 const size_t size_in_bytes = audio_buffer->mDataByteSize; 426 RTC_CHECK_EQ(size_in_bytes / VoiceProcessingAudioUnit::kBytesPerSample, num_frames); 427 *flags |= kAudioUnitRenderAction_OutputIsSilence; 428 memset(static_cast<int8_t*>(audio_buffer->mData), 0, size_in_bytes); 429 return noErr; 430 } 431 432 // Measure time since last call to OnGetPlayoutData() and see if it is larger 433 // than a well defined threshold which depends on the current IO buffer size. 434 // If so, we have an indication of a glitch in the output audio since the 435 // core audio layer will most likely run dry in this state. 436 ++num_playout_callbacks_; 437 const int64_t now_time = rtc::TimeMillis(); 438 if (time_stamp->mSampleTime != num_frames) { 439 const int64_t delta_time = now_time - last_playout_time_; 440 const int glitch_threshold = 1.6 * playout_parameters_.GetBufferSizeInMilliseconds(); 441 if (delta_time > glitch_threshold) { 442 RTCLogWarning(@"Possible playout audio glitch detected.\n" 443 " Time since last OnGetPlayoutData was %lld ms.\n", 444 delta_time); 445 // Exclude extreme delta values since they do most likely not correspond 446 // to a real glitch. Instead, the most probable cause is that a headset 447 // has been plugged in or out. There are more direct ways to detect 448 // audio device changes (see HandleValidRouteChange()) but experiments 449 // show that using it leads to more complex implementations. 450 // TODO(henrika): more tests might be needed to come up with an even 451 // better upper limit. 452 if (glitch_threshold < 120 && delta_time > 120) { 453 RTCLog(@"Glitch warning is ignored. Probably caused by device switch."); 454 } else { 455 thread_->PostTask(SafeTask(safety_, [this] { HandlePlayoutGlitchDetected(); })); 456 } 457 } 458 } 459 last_playout_time_ = now_time; 460 461 // Read decoded 16-bit PCM samples from WebRTC (using a size that matches 462 // the native I/O audio unit) and copy the result to the audio buffer in the 463 // `io_data` destination. 464 fine_audio_buffer_->GetPlayoutData( 465 rtc::ArrayView<int16_t>(static_cast<int16_t*>(audio_buffer->mData), num_frames), 466 kFixedPlayoutDelayEstimate); 467 return noErr; 468} 469 470void AudioDeviceIOS::HandleInterruptionBegin() { 471 RTC_DCHECK_RUN_ON(thread_); 472 RTCLog(@"Interruption begin. IsInterrupted changed from %d to 1.", is_interrupted_); 473 if (audio_unit_ && audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) { 474 RTCLog(@"Stopping the audio unit due to interruption begin."); 475 if (!audio_unit_->Stop()) { 476 RTCLogError(@"Failed to stop the audio unit for interruption begin."); 477 } 478 PrepareForNewStart(); 479 } 480 is_interrupted_ = true; 481} 482 483void AudioDeviceIOS::HandleInterruptionEnd() { 484 RTC_DCHECK_RUN_ON(thread_); 485 RTCLog(@"Interruption ended. IsInterrupted changed from %d to 0. " 486 "Updating audio unit state.", 487 is_interrupted_); 488 is_interrupted_ = false; 489 if (!audio_unit_) return; 490 if (webrtc::field_trial::IsEnabled("WebRTC-Audio-iOS-Holding")) { 491 // Work around an issue where audio does not restart properly after an interruption 492 // by restarting the audio unit when the interruption ends. 493 if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) { 494 audio_unit_->Stop(); 495 PrepareForNewStart(); 496 } 497 if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) { 498 audio_unit_->Uninitialize(); 499 } 500 // Allocate new buffers given the potentially new stream format. 501 SetupAudioBuffersForActiveAudioSession(); 502 } 503 UpdateAudioUnit([RTC_OBJC_TYPE(RTCAudioSession) sharedInstance].canPlayOrRecord); 504} 505 506void AudioDeviceIOS::HandleValidRouteChange() { 507 RTC_DCHECK_RUN_ON(thread_); 508 RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; 509 RTCLog(@"%@", session); 510 HandleSampleRateChange(); 511} 512 513void AudioDeviceIOS::HandleCanPlayOrRecordChange(bool can_play_or_record) { 514 RTCLog(@"Handling CanPlayOrRecord change to: %d", can_play_or_record); 515 UpdateAudioUnit(can_play_or_record); 516} 517 518void AudioDeviceIOS::HandleSampleRateChange() { 519 RTC_DCHECK_RUN_ON(thread_); 520 RTCLog(@"Handling sample rate change."); 521 522 // Don't do anything if we're interrupted. 523 if (is_interrupted_) { 524 RTCLog(@"Ignoring sample rate change due to interruption."); 525 return; 526 } 527 528 // If we don't have an audio unit yet, or the audio unit is uninitialized, 529 // there is no work to do. 530 if (!audio_unit_ || audio_unit_->GetState() < VoiceProcessingAudioUnit::kInitialized) { 531 return; 532 } 533 534 // The audio unit is already initialized or started. 535 // Check to see if the sample rate or buffer size has changed. 536 RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; 537 const double new_sample_rate = session.sampleRate; 538 const NSTimeInterval session_buffer_duration = session.IOBufferDuration; 539 const size_t new_frames_per_buffer = 540 static_cast<size_t>(new_sample_rate * session_buffer_duration + .5); 541 const double current_sample_rate = playout_parameters_.sample_rate(); 542 const size_t current_frames_per_buffer = playout_parameters_.frames_per_buffer(); 543 RTCLog(@"Handling playout sample rate change:\n" 544 " Session sample rate: %f frames_per_buffer: %lu\n" 545 " ADM sample rate: %f frames_per_buffer: %lu", 546 new_sample_rate, 547 (unsigned long)new_frames_per_buffer, 548 current_sample_rate, 549 (unsigned long)current_frames_per_buffer); 550 551 // Sample rate and buffer size are the same, no work to do. 552 if (std::abs(current_sample_rate - new_sample_rate) <= DBL_EPSILON && 553 current_frames_per_buffer == new_frames_per_buffer) { 554 RTCLog(@"Ignoring sample rate change since audio parameters are intact."); 555 return; 556 } 557 558 // Extra sanity check to ensure that the new sample rate is valid. 559 if (new_sample_rate <= 0.0) { 560 RTCLogError(@"Sample rate is invalid: %f", new_sample_rate); 561 return; 562 } 563 564 // We need to adjust our format and buffer sizes. 565 // The stream format is about to be changed and it requires that we first 566 // stop and uninitialize the audio unit to deallocate its resources. 567 RTCLog(@"Stopping and uninitializing audio unit to adjust buffers."); 568 bool restart_audio_unit = false; 569 if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kStarted) { 570 audio_unit_->Stop(); 571 restart_audio_unit = true; 572 PrepareForNewStart(); 573 } 574 if (audio_unit_->GetState() == VoiceProcessingAudioUnit::kInitialized) { 575 audio_unit_->Uninitialize(); 576 } 577 578 // Allocate new buffers given the new stream format. 579 SetupAudioBuffersForActiveAudioSession(); 580 581 // Initialize the audio unit again with the new sample rate. 582 if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) { 583 RTCLogError(@"Failed to initialize the audio unit with sample rate: %d", 584 playout_parameters_.sample_rate()); 585 return; 586 } 587 588 // Restart the audio unit if it was already running. 589 if (restart_audio_unit) { 590 OSStatus result = audio_unit_->Start(); 591 if (result != noErr) { 592 RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; 593 [session notifyAudioUnitStartFailedWithError:result]; 594 RTCLogError(@"Failed to start audio unit with sample rate: %d, reason %d", 595 playout_parameters_.sample_rate(), 596 result); 597 return; 598 } 599 } 600 RTCLog(@"Successfully handled sample rate change."); 601} 602 603void AudioDeviceIOS::HandlePlayoutGlitchDetected() { 604 RTC_DCHECK_RUN_ON(thread_); 605 // Don't update metrics if we're interrupted since a "glitch" is expected 606 // in this state. 607 if (is_interrupted_) { 608 RTCLog(@"Ignoring audio glitch due to interruption."); 609 return; 610 } 611 // Avoid doing glitch detection for two seconds after a volume change 612 // has been detected to reduce the risk of false alarm. 613 if (last_output_volume_change_time_ > 0 && 614 rtc::TimeSince(last_output_volume_change_time_) < 2000) { 615 RTCLog(@"Ignoring audio glitch due to recent output volume change."); 616 return; 617 } 618 num_detected_playout_glitches_++; 619 RTCLog(@"Number of detected playout glitches: %lld", num_detected_playout_glitches_); 620 621 int64_t glitch_count = num_detected_playout_glitches_; 622 dispatch_async(dispatch_get_main_queue(), ^{ 623 RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; 624 [session notifyDidDetectPlayoutGlitch:glitch_count]; 625 }); 626} 627 628void AudioDeviceIOS::HandleOutputVolumeChange() { 629 RTC_DCHECK_RUN_ON(thread_); 630 RTCLog(@"Output volume change detected."); 631 // Store time of this detection so it can be used to defer detection of 632 // glitches too close in time to this event. 633 last_output_volume_change_time_ = rtc::TimeMillis(); 634} 635 636void AudioDeviceIOS::UpdateAudioDeviceBuffer() { 637 LOGI() << "UpdateAudioDevicebuffer"; 638 // AttachAudioBuffer() is called at construction by the main class but check 639 // just in case. 640 RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first"; 641 RTC_DCHECK_GT(playout_parameters_.sample_rate(), 0); 642 RTC_DCHECK_GT(record_parameters_.sample_rate(), 0); 643 RTC_DCHECK_EQ(playout_parameters_.channels(), 1); 644 RTC_DCHECK_EQ(record_parameters_.channels(), 1); 645 // Inform the audio device buffer (ADB) about the new audio format. 646 audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate()); 647 audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels()); 648 audio_device_buffer_->SetRecordingSampleRate(record_parameters_.sample_rate()); 649 audio_device_buffer_->SetRecordingChannels(record_parameters_.channels()); 650} 651 652void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() { 653 LOGI() << "SetupAudioBuffersForActiveAudioSession"; 654 // Verify the current values once the audio session has been activated. 655 RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; 656 double sample_rate = session.sampleRate; 657 NSTimeInterval io_buffer_duration = session.IOBufferDuration; 658 RTCLog(@"%@", session); 659 660 // Log a warning message for the case when we are unable to set the preferred 661 // hardware sample rate but continue and use the non-ideal sample rate after 662 // reinitializing the audio parameters. Most BT headsets only support 8kHz or 663 // 16kHz. 664 RTC_OBJC_TYPE(RTCAudioSessionConfiguration)* webRTCConfig = 665 [RTC_OBJC_TYPE(RTCAudioSessionConfiguration) webRTCConfiguration]; 666 if (sample_rate != webRTCConfig.sampleRate) { 667 RTC_LOG(LS_WARNING) << "Unable to set the preferred sample rate"; 668 } 669 670 // Crash reports indicates that it can happen in rare cases that the reported 671 // sample rate is less than or equal to zero. If that happens and if a valid 672 // sample rate has already been set during initialization, the best guess we 673 // can do is to reuse the current sample rate. 674 if (sample_rate <= DBL_EPSILON && playout_parameters_.sample_rate() > 0) { 675 RTCLogError(@"Reported rate is invalid: %f. " 676 "Using %d as sample rate instead.", 677 sample_rate, playout_parameters_.sample_rate()); 678 sample_rate = playout_parameters_.sample_rate(); 679 } 680 681 // At this stage, we also know the exact IO buffer duration and can add 682 // that info to the existing audio parameters where it is converted into 683 // number of audio frames. 684 // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz. 685 // Hence, 128 is the size we expect to see in upcoming render callbacks. 686 playout_parameters_.reset(sample_rate, playout_parameters_.channels(), io_buffer_duration); 687 RTC_DCHECK(playout_parameters_.is_complete()); 688 record_parameters_.reset(sample_rate, record_parameters_.channels(), io_buffer_duration); 689 RTC_DCHECK(record_parameters_.is_complete()); 690 RTC_LOG(LS_INFO) << " frames per I/O buffer: " << playout_parameters_.frames_per_buffer(); 691 RTC_LOG(LS_INFO) << " bytes per I/O buffer: " << playout_parameters_.GetBytesPerBuffer(); 692 RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(), record_parameters_.GetBytesPerBuffer()); 693 694 // Update the ADB parameters since the sample rate might have changed. 695 UpdateAudioDeviceBuffer(); 696 697 // Create a modified audio buffer class which allows us to ask for, 698 // or deliver, any number of samples (and not only multiple of 10ms) to match 699 // the native audio unit buffer size. 700 RTC_DCHECK(audio_device_buffer_); 701 fine_audio_buffer_.reset(new FineAudioBuffer(audio_device_buffer_)); 702} 703 704bool AudioDeviceIOS::CreateAudioUnit() { 705 RTC_DCHECK(!audio_unit_); 706 707 audio_unit_.reset(new VoiceProcessingAudioUnit(bypass_voice_processing_, this)); 708 if (!audio_unit_->Init()) { 709 audio_unit_.reset(); 710 return false; 711 } 712 713 return true; 714} 715 716void AudioDeviceIOS::UpdateAudioUnit(bool can_play_or_record) { 717 RTC_DCHECK_RUN_ON(thread_); 718 RTCLog(@"Updating audio unit state. CanPlayOrRecord=%d IsInterrupted=%d", 719 can_play_or_record, 720 is_interrupted_); 721 722 if (is_interrupted_) { 723 RTCLog(@"Ignoring audio unit update due to interruption."); 724 return; 725 } 726 727 // If we're not initialized we don't need to do anything. Audio unit will 728 // be initialized on initialization. 729 if (!audio_is_initialized_) return; 730 731 // If we're initialized, we must have an audio unit. 732 RTC_DCHECK(audio_unit_); 733 734 bool should_initialize_audio_unit = false; 735 bool should_uninitialize_audio_unit = false; 736 bool should_start_audio_unit = false; 737 bool should_stop_audio_unit = false; 738 739 switch (audio_unit_->GetState()) { 740 case VoiceProcessingAudioUnit::kInitRequired: 741 RTCLog(@"VPAU state: InitRequired"); 742 RTC_DCHECK_NOTREACHED(); 743 break; 744 case VoiceProcessingAudioUnit::kUninitialized: 745 RTCLog(@"VPAU state: Uninitialized"); 746 should_initialize_audio_unit = can_play_or_record; 747 should_start_audio_unit = 748 should_initialize_audio_unit && (playing_.load() || recording_.load()); 749 break; 750 case VoiceProcessingAudioUnit::kInitialized: 751 RTCLog(@"VPAU state: Initialized"); 752 should_start_audio_unit = can_play_or_record && (playing_.load() || recording_.load()); 753 should_uninitialize_audio_unit = !can_play_or_record; 754 break; 755 case VoiceProcessingAudioUnit::kStarted: 756 RTCLog(@"VPAU state: Started"); 757 RTC_DCHECK(playing_.load() || recording_.load()); 758 should_stop_audio_unit = !can_play_or_record; 759 should_uninitialize_audio_unit = should_stop_audio_unit; 760 break; 761 } 762 763 if (should_initialize_audio_unit) { 764 RTCLog(@"Initializing audio unit for UpdateAudioUnit"); 765 ConfigureAudioSession(); 766 SetupAudioBuffersForActiveAudioSession(); 767 if (!audio_unit_->Initialize(playout_parameters_.sample_rate())) { 768 RTCLogError(@"Failed to initialize audio unit."); 769 return; 770 } 771 } 772 773 if (should_start_audio_unit) { 774 RTCLog(@"Starting audio unit for UpdateAudioUnit"); 775 // Log session settings before trying to start audio streaming. 776 RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; 777 RTCLog(@"%@", session); 778 OSStatus result = audio_unit_->Start(); 779 if (result != noErr) { 780 [session notifyAudioUnitStartFailedWithError:result]; 781 RTCLogError(@"Failed to start audio unit, reason %d", result); 782 return; 783 } 784 } 785 786 if (should_stop_audio_unit) { 787 RTCLog(@"Stopping audio unit for UpdateAudioUnit"); 788 if (!audio_unit_->Stop()) { 789 RTCLogError(@"Failed to stop audio unit."); 790 PrepareForNewStart(); 791 return; 792 } 793 PrepareForNewStart(); 794 } 795 796 if (should_uninitialize_audio_unit) { 797 RTCLog(@"Uninitializing audio unit for UpdateAudioUnit"); 798 audio_unit_->Uninitialize(); 799 UnconfigureAudioSession(); 800 } 801} 802 803bool AudioDeviceIOS::ConfigureAudioSession() { 804 RTC_DCHECK_RUN_ON(thread_); 805 RTCLog(@"Configuring audio session."); 806 if (has_configured_session_) { 807 RTCLogWarning(@"Audio session already configured."); 808 return false; 809 } 810 RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; 811 [session lockForConfiguration]; 812 bool success = [session configureWebRTCSession:nil]; 813 [session unlockForConfiguration]; 814 if (success) { 815 has_configured_session_ = true; 816 RTCLog(@"Configured audio session."); 817 } else { 818 RTCLog(@"Failed to configure audio session."); 819 } 820 return success; 821} 822 823bool AudioDeviceIOS::ConfigureAudioSessionLocked() { 824 RTC_DCHECK_RUN_ON(thread_); 825 RTCLog(@"Configuring audio session."); 826 if (has_configured_session_) { 827 RTCLogWarning(@"Audio session already configured."); 828 return false; 829 } 830 RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; 831 bool success = [session configureWebRTCSession:nil]; 832 if (success) { 833 has_configured_session_ = true; 834 RTCLog(@"Configured audio session."); 835 } else { 836 RTCLog(@"Failed to configure audio session."); 837 } 838 return success; 839} 840 841void AudioDeviceIOS::UnconfigureAudioSession() { 842 RTC_DCHECK_RUN_ON(thread_); 843 RTCLog(@"Unconfiguring audio session."); 844 if (!has_configured_session_) { 845 RTCLogWarning(@"Audio session already unconfigured."); 846 return; 847 } 848 RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; 849 [session lockForConfiguration]; 850 [session unconfigureWebRTCSession:nil]; 851 [session endWebRTCSession:nil]; 852 [session unlockForConfiguration]; 853 has_configured_session_ = false; 854 RTCLog(@"Unconfigured audio session."); 855} 856 857bool AudioDeviceIOS::InitPlayOrRecord() { 858 LOGI() << "InitPlayOrRecord"; 859 RTC_DCHECK_RUN_ON(thread_); 860 861 // There should be no audio unit at this point. 862 if (!CreateAudioUnit()) { 863 return false; 864 } 865 866 RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; 867 // Subscribe to audio session events. 868 [session pushDelegate:audio_session_observer_]; 869 is_interrupted_ = session.isInterrupted ? true : false; 870 871 // Lock the session to make configuration changes. 872 [session lockForConfiguration]; 873 NSError* error = nil; 874 if (![session beginWebRTCSession:&error]) { 875 [session unlockForConfiguration]; 876 RTCLogError(@"Failed to begin WebRTC session: %@", error.localizedDescription); 877 audio_unit_.reset(); 878 return false; 879 } 880 881 // If we are ready to play or record, and if the audio session can be 882 // configured, then initialize the audio unit. 883 if (session.canPlayOrRecord) { 884 if (!ConfigureAudioSessionLocked()) { 885 // One possible reason for failure is if an attempt was made to use the 886 // audio session during or after a Media Services failure. 887 // See AVAudioSessionErrorCodeMediaServicesFailed for details. 888 [session unlockForConfiguration]; 889 audio_unit_.reset(); 890 return false; 891 } 892 SetupAudioBuffersForActiveAudioSession(); 893 audio_unit_->Initialize(playout_parameters_.sample_rate()); 894 } 895 896 // Release the lock. 897 [session unlockForConfiguration]; 898 return true; 899} 900 901void AudioDeviceIOS::ShutdownPlayOrRecord() { 902 LOGI() << "ShutdownPlayOrRecord"; 903 RTC_DCHECK_RUN_ON(thread_); 904 905 // Stop the audio unit to prevent any additional audio callbacks. 906 audio_unit_->Stop(); 907 908 // Close and delete the voice-processing I/O unit. 909 audio_unit_.reset(); 910 911 // Detach thread checker for the AURemoteIO::IOThread to ensure that the 912 // next session uses a fresh thread id. 913 io_thread_checker_.Detach(); 914 915 // Remove audio session notification observers. 916 RTC_OBJC_TYPE(RTCAudioSession)* session = [RTC_OBJC_TYPE(RTCAudioSession) sharedInstance]; 917 [session removeDelegate:audio_session_observer_]; 918 919 // All I/O should be stopped or paused prior to deactivating the audio 920 // session, hence we deactivate as last action. 921 UnconfigureAudioSession(); 922} 923 924void AudioDeviceIOS::PrepareForNewStart() { 925 LOGI() << "PrepareForNewStart"; 926 // The audio unit has been stopped and preparations are needed for an upcoming 927 // restart. It will result in audio callbacks from a new native I/O thread 928 // which means that we must detach thread checkers here to be prepared for an 929 // upcoming new audio stream. 930 io_thread_checker_.Detach(); 931} 932 933bool AudioDeviceIOS::IsInterrupted() { 934 return is_interrupted_; 935} 936 937#pragma mark - Not Implemented 938 939int32_t AudioDeviceIOS::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const { 940 audioLayer = AudioDeviceModule::kPlatformDefaultAudio; 941 return 0; 942} 943 944int16_t AudioDeviceIOS::PlayoutDevices() { 945 // TODO(henrika): improve. 946 RTC_LOG_F(LS_WARNING) << "Not implemented"; 947 return (int16_t)1; 948} 949 950int16_t AudioDeviceIOS::RecordingDevices() { 951 // TODO(henrika): improve. 952 RTC_LOG_F(LS_WARNING) << "Not implemented"; 953 return (int16_t)1; 954} 955 956int32_t AudioDeviceIOS::InitSpeaker() { 957 return 0; 958} 959 960bool AudioDeviceIOS::SpeakerIsInitialized() const { 961 return true; 962} 963 964int32_t AudioDeviceIOS::SpeakerVolumeIsAvailable(bool& available) { 965 available = false; 966 return 0; 967} 968 969int32_t AudioDeviceIOS::SetSpeakerVolume(uint32_t volume) { 970 RTC_DCHECK_NOTREACHED() << "Not implemented"; 971 return -1; 972} 973 974int32_t AudioDeviceIOS::SpeakerVolume(uint32_t& volume) const { 975 RTC_DCHECK_NOTREACHED() << "Not implemented"; 976 return -1; 977} 978 979int32_t AudioDeviceIOS::MaxSpeakerVolume(uint32_t& maxVolume) const { 980 RTC_DCHECK_NOTREACHED() << "Not implemented"; 981 return -1; 982} 983 984int32_t AudioDeviceIOS::MinSpeakerVolume(uint32_t& minVolume) const { 985 RTC_DCHECK_NOTREACHED() << "Not implemented"; 986 return -1; 987} 988 989int32_t AudioDeviceIOS::SpeakerMuteIsAvailable(bool& available) { 990 available = false; 991 return 0; 992} 993 994int32_t AudioDeviceIOS::SetSpeakerMute(bool enable) { 995 RTC_DCHECK_NOTREACHED() << "Not implemented"; 996 return -1; 997} 998 999int32_t AudioDeviceIOS::SpeakerMute(bool& enabled) const { 1000 RTC_DCHECK_NOTREACHED() << "Not implemented"; 1001 return -1; 1002} 1003 1004int32_t AudioDeviceIOS::SetPlayoutDevice(uint16_t index) { 1005 RTC_LOG_F(LS_WARNING) << "Not implemented"; 1006 return 0; 1007} 1008 1009int32_t AudioDeviceIOS::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType) { 1010 RTC_DCHECK_NOTREACHED() << "Not implemented"; 1011 return -1; 1012} 1013 1014int32_t AudioDeviceIOS::InitMicrophone() { 1015 return 0; 1016} 1017 1018bool AudioDeviceIOS::MicrophoneIsInitialized() const { 1019 return true; 1020} 1021 1022int32_t AudioDeviceIOS::MicrophoneMuteIsAvailable(bool& available) { 1023 available = false; 1024 return 0; 1025} 1026 1027int32_t AudioDeviceIOS::SetMicrophoneMute(bool enable) { 1028 RTC_DCHECK_NOTREACHED() << "Not implemented"; 1029 return -1; 1030} 1031 1032int32_t AudioDeviceIOS::MicrophoneMute(bool& enabled) const { 1033 RTC_DCHECK_NOTREACHED() << "Not implemented"; 1034 return -1; 1035} 1036 1037int32_t AudioDeviceIOS::StereoRecordingIsAvailable(bool& available) { 1038 available = false; 1039 return 0; 1040} 1041 1042int32_t AudioDeviceIOS::SetStereoRecording(bool enable) { 1043 RTC_LOG_F(LS_WARNING) << "Not implemented"; 1044 return -1; 1045} 1046 1047int32_t AudioDeviceIOS::StereoRecording(bool& enabled) const { 1048 enabled = false; 1049 return 0; 1050} 1051 1052int32_t AudioDeviceIOS::StereoPlayoutIsAvailable(bool& available) { 1053 available = false; 1054 return 0; 1055} 1056 1057int32_t AudioDeviceIOS::SetStereoPlayout(bool enable) { 1058 RTC_LOG_F(LS_WARNING) << "Not implemented"; 1059 return -1; 1060} 1061 1062int32_t AudioDeviceIOS::StereoPlayout(bool& enabled) const { 1063 enabled = false; 1064 return 0; 1065} 1066 1067int32_t AudioDeviceIOS::MicrophoneVolumeIsAvailable(bool& available) { 1068 available = false; 1069 return 0; 1070} 1071 1072int32_t AudioDeviceIOS::SetMicrophoneVolume(uint32_t volume) { 1073 RTC_DCHECK_NOTREACHED() << "Not implemented"; 1074 return -1; 1075} 1076 1077int32_t AudioDeviceIOS::MicrophoneVolume(uint32_t& volume) const { 1078 RTC_DCHECK_NOTREACHED() << "Not implemented"; 1079 return -1; 1080} 1081 1082int32_t AudioDeviceIOS::MaxMicrophoneVolume(uint32_t& maxVolume) const { 1083 RTC_DCHECK_NOTREACHED() << "Not implemented"; 1084 return -1; 1085} 1086 1087int32_t AudioDeviceIOS::MinMicrophoneVolume(uint32_t& minVolume) const { 1088 RTC_DCHECK_NOTREACHED() << "Not implemented"; 1089 return -1; 1090} 1091 1092int32_t AudioDeviceIOS::PlayoutDeviceName(uint16_t index, 1093 char name[kAdmMaxDeviceNameSize], 1094 char guid[kAdmMaxGuidSize]) { 1095 RTC_DCHECK_NOTREACHED() << "Not implemented"; 1096 return -1; 1097} 1098 1099int32_t AudioDeviceIOS::RecordingDeviceName(uint16_t index, 1100 char name[kAdmMaxDeviceNameSize], 1101 char guid[kAdmMaxGuidSize]) { 1102 RTC_DCHECK_NOTREACHED() << "Not implemented"; 1103 return -1; 1104} 1105 1106int32_t AudioDeviceIOS::SetRecordingDevice(uint16_t index) { 1107 RTC_LOG_F(LS_WARNING) << "Not implemented"; 1108 return 0; 1109} 1110 1111int32_t AudioDeviceIOS::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType) { 1112 RTC_DCHECK_NOTREACHED() << "Not implemented"; 1113 return -1; 1114} 1115 1116int32_t AudioDeviceIOS::PlayoutIsAvailable(bool& available) { 1117 available = true; 1118 return 0; 1119} 1120 1121int32_t AudioDeviceIOS::RecordingIsAvailable(bool& available) { 1122 available = true; 1123 return 0; 1124} 1125 1126} // namespace ios_adm 1127} // namespace webrtc 1128