1 /* 2 * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #ifndef MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_ 12 #define MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_ 13 14 #include <audioclient.h> 15 #include <audiopolicy.h> 16 #include <avrt.h> 17 #include <comdef.h> 18 #include <mmdeviceapi.h> 19 #include <objbase.h> 20 #include <propidl.h> 21 #include <wrl/client.h> 22 23 #include <string> 24 25 #include "absl/strings/string_view.h" 26 #include "api/units/time_delta.h" 27 #include "modules/audio_device/audio_device_name.h" 28 #include "modules/audio_device/include/audio_device_defines.h" 29 #include "rtc_base/logging.h" 30 #include "rtc_base/string_utils.h" 31 32 #pragma comment(lib, "Avrt.lib") 33 34 namespace webrtc { 35 namespace webrtc_win { 36 37 // Utility class which registers a thread with MMCSS in the constructor and 38 // deregisters MMCSS in the destructor. The task name is given by `task_name`. 39 // The Multimedia Class Scheduler service (MMCSS) enables multimedia 40 // applications to ensure that their time-sensitive processing receives 41 // prioritized access to CPU resources without denying CPU resources to 42 // lower-priority applications. 43 class ScopedMMCSSRegistration { 44 public: PriorityClassToString(DWORD priority_class)45 const char* PriorityClassToString(DWORD priority_class) { 46 switch (priority_class) { 47 case ABOVE_NORMAL_PRIORITY_CLASS: 48 return "ABOVE_NORMAL"; 49 case BELOW_NORMAL_PRIORITY_CLASS: 50 return "BELOW_NORMAL"; 51 case HIGH_PRIORITY_CLASS: 52 return "HIGH"; 53 case IDLE_PRIORITY_CLASS: 54 return "IDLE"; 55 case NORMAL_PRIORITY_CLASS: 56 return "NORMAL"; 57 case REALTIME_PRIORITY_CLASS: 58 return "REALTIME"; 59 default: 60 return "INVALID"; 61 } 62 } 63 PriorityToString(int priority)64 const char* PriorityToString(int priority) { 65 switch (priority) { 66 case THREAD_PRIORITY_ABOVE_NORMAL: 67 return "ABOVE_NORMAL"; 68 case THREAD_PRIORITY_BELOW_NORMAL: 69 return "BELOW_NORMAL"; 70 case THREAD_PRIORITY_HIGHEST: 71 return "HIGHEST"; 72 case THREAD_PRIORITY_IDLE: 73 return "IDLE"; 74 case THREAD_PRIORITY_LOWEST: 75 return "LOWEST"; 76 case THREAD_PRIORITY_NORMAL: 77 return "NORMAL"; 78 case THREAD_PRIORITY_TIME_CRITICAL: 79 return "TIME_CRITICAL"; 80 default: 81 // Can happen in combination with REALTIME_PRIORITY_CLASS. 82 return "INVALID"; 83 } 84 } 85 ScopedMMCSSRegistration(const wchar_t * task_name)86 explicit ScopedMMCSSRegistration(const wchar_t* task_name) { 87 RTC_DLOG(LS_INFO) << "ScopedMMCSSRegistration: " << rtc::ToUtf8(task_name); 88 // Register the calling thread with MMCSS for the supplied `task_name`. 89 DWORD mmcss_task_index = 0; 90 mmcss_handle_ = AvSetMmThreadCharacteristicsW(task_name, &mmcss_task_index); 91 if (mmcss_handle_ == nullptr) { 92 RTC_LOG(LS_ERROR) << "Failed to enable MMCSS on this thread: " 93 << GetLastError(); 94 } else { 95 const DWORD priority_class = GetPriorityClass(GetCurrentProcess()); 96 const int priority = GetThreadPriority(GetCurrentThread()); 97 RTC_DLOG(LS_INFO) << "priority class: " 98 << PriorityClassToString(priority_class) << "(" 99 << priority_class << ")"; 100 RTC_DLOG(LS_INFO) << "priority: " << PriorityToString(priority) << "(" 101 << priority << ")"; 102 } 103 } 104 ~ScopedMMCSSRegistration()105 ~ScopedMMCSSRegistration() { 106 if (Succeeded()) { 107 // Deregister with MMCSS. 108 RTC_DLOG(LS_INFO) << "~ScopedMMCSSRegistration"; 109 AvRevertMmThreadCharacteristics(mmcss_handle_); 110 } 111 } 112 113 ScopedMMCSSRegistration(const ScopedMMCSSRegistration&) = delete; 114 ScopedMMCSSRegistration& operator=(const ScopedMMCSSRegistration&) = delete; 115 Succeeded()116 bool Succeeded() const { return mmcss_handle_ != nullptr; } 117 118 private: 119 HANDLE mmcss_handle_ = nullptr; 120 }; 121 122 // A PROPVARIANT that is automatically initialized and cleared upon respective 123 // construction and destruction of this class. 124 class ScopedPropVariant { 125 public: ScopedPropVariant()126 ScopedPropVariant() { PropVariantInit(&pv_); } 127 ~ScopedPropVariant()128 ~ScopedPropVariant() { Reset(); } 129 130 ScopedPropVariant(const ScopedPropVariant&) = delete; 131 ScopedPropVariant& operator=(const ScopedPropVariant&) = delete; 132 bool operator==(const ScopedPropVariant&) const = delete; 133 bool operator!=(const ScopedPropVariant&) const = delete; 134 135 // Returns a pointer to the underlying PROPVARIANT for use as an out param in 136 // a function call. Receive()137 PROPVARIANT* Receive() { 138 RTC_DCHECK_EQ(pv_.vt, VT_EMPTY); 139 return &pv_; 140 } 141 142 // Clears the instance to prepare it for re-use (e.g., via Receive). Reset()143 void Reset() { 144 if (pv_.vt != VT_EMPTY) { 145 HRESULT result = PropVariantClear(&pv_); 146 RTC_DCHECK_EQ(result, S_OK); 147 } 148 } 149 get()150 const PROPVARIANT& get() const { return pv_; } ptr()151 const PROPVARIANT* ptr() const { return &pv_; } 152 153 private: 154 PROPVARIANT pv_; 155 }; 156 157 // Simple scoped memory releaser class for COM allocated memory. 158 template <typename T> 159 class ScopedCoMem { 160 public: ScopedCoMem()161 ScopedCoMem() : mem_ptr_(nullptr) {} 162 ~ScopedCoMem()163 ~ScopedCoMem() { Reset(nullptr); } 164 165 ScopedCoMem(const ScopedCoMem&) = delete; 166 ScopedCoMem& operator=(const ScopedCoMem&) = delete; 167 168 T** operator&() { // NOLINT 169 RTC_DCHECK(mem_ptr_ == nullptr); // To catch memory leaks. 170 return &mem_ptr_; 171 } 172 173 operator T*() { return mem_ptr_; } 174 175 T* operator->() { 176 RTC_DCHECK(mem_ptr_ != nullptr); 177 return mem_ptr_; 178 } 179 180 const T* operator->() const { 181 RTC_DCHECK(mem_ptr_ != nullptr); 182 return mem_ptr_; 183 } 184 185 explicit operator bool() const { return mem_ptr_; } 186 187 friend bool operator==(const ScopedCoMem& lhs, std::nullptr_t) { 188 return lhs.Get() == nullptr; 189 } 190 191 friend bool operator==(std::nullptr_t, const ScopedCoMem& rhs) { 192 return rhs.Get() == nullptr; 193 } 194 195 friend bool operator!=(const ScopedCoMem& lhs, std::nullptr_t) { 196 return lhs.Get() != nullptr; 197 } 198 199 friend bool operator!=(std::nullptr_t, const ScopedCoMem& rhs) { 200 return rhs.Get() != nullptr; 201 } 202 Reset(T * ptr)203 void Reset(T* ptr) { 204 if (mem_ptr_) 205 CoTaskMemFree(mem_ptr_); 206 mem_ptr_ = ptr; 207 } 208 Get()209 T* Get() const { return mem_ptr_; } 210 211 private: 212 T* mem_ptr_; 213 }; 214 215 // A HANDLE that is automatically initialized and closed upon respective 216 // construction and destruction of this class. 217 class ScopedHandle { 218 public: ScopedHandle()219 ScopedHandle() : handle_(nullptr) {} ScopedHandle(HANDLE h)220 explicit ScopedHandle(HANDLE h) : handle_(nullptr) { Set(h); } 221 ~ScopedHandle()222 ~ScopedHandle() { Close(); } 223 224 ScopedHandle& operator=(const ScopedHandle&) = delete; 225 bool operator==(const ScopedHandle&) const = delete; 226 bool operator!=(const ScopedHandle&) const = delete; 227 228 // Use this instead of comparing to INVALID_HANDLE_VALUE. IsValid()229 bool IsValid() const { return handle_ != nullptr; } 230 Set(HANDLE new_handle)231 void Set(HANDLE new_handle) { 232 Close(); 233 // Windows is inconsistent about invalid handles. 234 // See https://blogs.msdn.microsoft.com/oldnewthing/20040302-00/?p=40443 235 // for details. 236 if (new_handle != INVALID_HANDLE_VALUE) { 237 handle_ = new_handle; 238 } 239 } 240 Get()241 HANDLE Get() const { return handle_; } 242 HANDLE()243 operator HANDLE() const { return handle_; } 244 Close()245 void Close() { 246 if (handle_) { 247 if (!::CloseHandle(handle_)) { 248 RTC_DCHECK_NOTREACHED(); 249 } 250 handle_ = nullptr; 251 } 252 } 253 254 private: 255 HANDLE handle_; 256 }; 257 258 // Utility methods for the Core Audio API on Windows. 259 // Always ensure that Core Audio is supported before using these methods. 260 // Use webrtc_win::core_audio_utility::IsSupported() for this purpose. 261 // Also, all methods must be called on a valid COM thread. This can be done 262 // by using the ScopedCOMInitializer helper class. 263 // These methods are based on media::CoreAudioUtil in Chrome. 264 namespace core_audio_utility { 265 266 // Helper class which automates casting between WAVEFORMATEX and 267 // WAVEFORMATEXTENSIBLE raw pointers using implicit constructors and 268 // operator overloading. Note that, no memory is allocated by this utility 269 // structure. It only serves as a handle (or a wrapper) of the structure 270 // provided to it at construction. 271 class WaveFormatWrapper { 272 public: WaveFormatWrapper(WAVEFORMATEXTENSIBLE * p)273 WaveFormatWrapper(WAVEFORMATEXTENSIBLE* p) 274 : ptr_(reinterpret_cast<WAVEFORMATEX*>(p)) {} WaveFormatWrapper(WAVEFORMATEX * p)275 WaveFormatWrapper(WAVEFORMATEX* p) : ptr_(p) {} 276 ~WaveFormatWrapper() = default; 277 278 operator WAVEFORMATEX*() const { return ptr_; } 279 WAVEFORMATEX* operator->() const { return ptr_; } get()280 WAVEFORMATEX* get() const { return ptr_; } 281 WAVEFORMATEXTENSIBLE* GetExtensible() const; 282 283 bool IsExtensible() const; 284 bool IsPcm() const; 285 bool IsFloat() const; 286 size_t size() const; 287 288 private: 289 WAVEFORMATEX* ptr_; 290 }; 291 292 // Returns true if Windows Core Audio is supported. 293 // Always verify that this method returns true before using any of the 294 // other methods in this class. 295 bool IsSupported(); 296 297 // Returns true if Multimedia Class Scheduler service (MMCSS) is supported. 298 // The MMCSS enables multimedia applications to ensure that their time-sensitive 299 // processing receives prioritized access to CPU resources without denying CPU 300 // resources to lower-priority applications. 301 bool IsMMCSSSupported(); 302 303 // The MMDevice API lets clients discover the audio endpoint devices in the 304 // system and determine which devices are suitable for the application to use. 305 // Header file Mmdeviceapi.h defines the interfaces in the MMDevice API. 306 307 // Number of active audio devices in the specified data flow direction. 308 // Set `data_flow` to eAll to retrieve the total number of active audio 309 // devices. 310 int NumberOfActiveDevices(EDataFlow data_flow); 311 312 // Returns 1, 2, or 3 depending on what version of IAudioClient the platform 313 // supports. 314 // Example: IAudioClient2 is supported on Windows 8 and higher => 2 is returned. 315 uint32_t GetAudioClientVersion(); 316 317 // Creates an IMMDeviceEnumerator interface which provides methods for 318 // enumerating audio endpoint devices. 319 // TODO(henrika): IMMDeviceEnumerator::RegisterEndpointNotificationCallback. 320 Microsoft::WRL::ComPtr<IMMDeviceEnumerator> CreateDeviceEnumerator(); 321 322 // These functions return the unique device id of the default or 323 // communications input/output device, or an empty string if no such device 324 // exists or if the device has been disabled. 325 std::string GetDefaultInputDeviceID(); 326 std::string GetDefaultOutputDeviceID(); 327 std::string GetCommunicationsInputDeviceID(); 328 std::string GetCommunicationsOutputDeviceID(); 329 330 // Creates an IMMDevice interface corresponding to the unique device id in 331 // `device_id`, or by data-flow direction and role if `device_id` is set to 332 // AudioDeviceName::kDefaultDeviceId. 333 Microsoft::WRL::ComPtr<IMMDevice> CreateDevice(absl::string_view device_id, 334 EDataFlow data_flow, 335 ERole role); 336 337 // Returns the unique ID and user-friendly name of a given endpoint device. 338 // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}", and 339 // "Microphone (Realtek High Definition Audio)". 340 webrtc::AudioDeviceName GetDeviceName(IMMDevice* device); 341 342 // Gets the user-friendly name of the endpoint device which is represented 343 // by a unique id in `device_id`, or by data-flow direction and role if 344 // `device_id` is set to AudioDeviceName::kDefaultDeviceId. 345 std::string GetFriendlyName(absl::string_view device_id, 346 EDataFlow data_flow, 347 ERole role); 348 349 // Query if the audio device is a rendering device or a capture device. 350 EDataFlow GetDataFlow(IMMDevice* device); 351 352 // Enumerates all input devices and adds the names (friendly name and unique 353 // device id) to the list in `device_names`. 354 bool GetInputDeviceNames(webrtc::AudioDeviceNames* device_names); 355 356 // Enumerates all output devices and adds the names (friendly name and unique 357 // device id) to the list in `device_names`. 358 bool GetOutputDeviceNames(webrtc::AudioDeviceNames* device_names); 359 360 // The Windows Audio Session API (WASAPI) enables client applications to 361 // manage the flow of audio data between the application and an audio endpoint 362 // device. Header files Audioclient.h and Audiopolicy.h define the WASAPI 363 // interfaces. 364 365 // Creates an IAudioSessionManager2 interface for the specified `device`. 366 // This interface provides access to e.g. the IAudioSessionEnumerator 367 Microsoft::WRL::ComPtr<IAudioSessionManager2> CreateSessionManager2( 368 IMMDevice* device); 369 370 // Creates an IAudioSessionEnumerator interface for the specified `device`. 371 // The client can use the interface to enumerate audio sessions on the audio 372 // device 373 Microsoft::WRL::ComPtr<IAudioSessionEnumerator> CreateSessionEnumerator( 374 IMMDevice* device); 375 376 // Number of active audio sessions for the given `device`. Expired or inactive 377 // sessions are not included. 378 int NumberOfActiveSessions(IMMDevice* device); 379 380 // Creates an IAudioClient instance for a specific device or the default 381 // device specified by data-flow direction and role. 382 Microsoft::WRL::ComPtr<IAudioClient> CreateClient(absl::string_view device_id, 383 EDataFlow data_flow, 384 ERole role); 385 Microsoft::WRL::ComPtr<IAudioClient2> CreateClient2(absl::string_view device_id, 386 EDataFlow data_flow, 387 ERole role); 388 Microsoft::WRL::ComPtr<IAudioClient3> CreateClient3(absl::string_view device_id, 389 EDataFlow data_flow, 390 ERole role); 391 392 // Sets the AudioCategory_Communications category. Should be called before 393 // GetSharedModeMixFormat() and IsFormatSupported(). The `client` argument must 394 // be an IAudioClient2 or IAudioClient3 interface pointer, hence only supported 395 // on Windows 8 and above. 396 // TODO(henrika): evaluate effect (if any). 397 HRESULT SetClientProperties(IAudioClient2* client); 398 399 // Returns the buffer size limits of the hardware audio engine in 400 // 100-nanosecond units given a specified `format`. Does not require prior 401 // audio stream initialization. The `client` argument must be an IAudioClient2 402 // or IAudioClient3 interface pointer, hence only supported on Windows 8 and 403 // above. 404 // TODO(henrika): always fails with AUDCLNT_E_OFFLOAD_MODE_ONLY. 405 HRESULT GetBufferSizeLimits(IAudioClient2* client, 406 const WAVEFORMATEXTENSIBLE* format, 407 REFERENCE_TIME* min_buffer_duration, 408 REFERENCE_TIME* max_buffer_duration); 409 410 // Get the mix format that the audio engine uses internally for processing 411 // of shared-mode streams. The client can call this method before calling 412 // IAudioClient::Initialize. When creating a shared-mode stream for an audio 413 // endpoint device, the Initialize method always accepts the stream format 414 // obtained by this method. 415 HRESULT GetSharedModeMixFormat(IAudioClient* client, 416 WAVEFORMATEXTENSIBLE* format); 417 418 // Returns true if the specified `client` supports the format in `format` 419 // for the given `share_mode` (shared or exclusive). The client can call this 420 // method before calling IAudioClient::Initialize. 421 bool IsFormatSupported(IAudioClient* client, 422 AUDCLNT_SHAREMODE share_mode, 423 const WAVEFORMATEXTENSIBLE* format); 424 425 // For a shared-mode stream, the audio engine periodically processes the 426 // data in the endpoint buffer at the period obtained in `device_period`. 427 // For an exclusive mode stream, `device_period` corresponds to the minimum 428 // time interval between successive processing by the endpoint device. 429 // This period plus the stream latency between the buffer and endpoint device 430 // represents the minimum possible latency that an audio application can 431 // achieve. The time in `device_period` is expressed in 100-nanosecond units. 432 HRESULT GetDevicePeriod(IAudioClient* client, 433 AUDCLNT_SHAREMODE share_mode, 434 REFERENCE_TIME* device_period); 435 436 // Returns the range of periodicities supported by the engine for the specified 437 // stream `format`. The periodicity of the engine is the rate at which the 438 // engine wakes an event-driven audio client to transfer audio data to or from 439 // the engine. Can be used for low-latency support on some devices. 440 // The `client` argument must be an IAudioClient3 interface pointer, hence only 441 // supported on Windows 10 and above. 442 HRESULT GetSharedModeEnginePeriod(IAudioClient3* client3, 443 const WAVEFORMATEXTENSIBLE* format, 444 uint32_t* default_period_in_frames, 445 uint32_t* fundamental_period_in_frames, 446 uint32_t* min_period_in_frames, 447 uint32_t* max_period_in_frames); 448 449 // Get the preferred audio parameters for the given `client` corresponding to 450 // the stream format that the audio engine uses for its internal processing of 451 // shared-mode streams. The acquired values should only be utilized for shared 452 // mode streamed since there are no preferred settings for an exclusive mode 453 // stream. 454 HRESULT GetPreferredAudioParameters(IAudioClient* client, 455 webrtc::AudioParameters* params); 456 // As above but override the preferred sample rate and use `sample_rate` 457 // instead. Intended mainly for testing purposes and in combination with rate 458 // conversion. 459 HRESULT GetPreferredAudioParameters(IAudioClient* client, 460 webrtc::AudioParameters* params, 461 uint32_t sample_rate); 462 463 // After activating an IAudioClient interface on an audio endpoint device, 464 // the client must initialize it once, and only once, to initialize the audio 465 // stream between the client and the device. In shared mode, the client 466 // connects indirectly through the audio engine which does the mixing. 467 // If a valid event is provided in `event_handle`, the client will be 468 // initialized for event-driven buffer handling. If `event_handle` is set to 469 // nullptr, event-driven buffer handling is not utilized. To achieve the 470 // minimum stream latency between the client application and audio endpoint 471 // device, set `buffer_duration` to 0. A client has the option of requesting a 472 // buffer size that is larger than what is strictly necessary to make timing 473 // glitches rare or nonexistent. Increasing the buffer size does not necessarily 474 // increase the stream latency. Each unit of reference time is 100 nanoseconds. 475 // The `auto_convert_pcm` parameter can be used for testing purposes to ensure 476 // that the sample rate of the client side does not have to match the audio 477 // engine mix format. If `auto_convert_pcm` is set to true, a rate converter 478 // will be inserted to convert between the sample rate in `format` and the 479 // preferred rate given by GetPreferredAudioParameters(). 480 // The output parameter `endpoint_buffer_size` contains the size of the 481 // endpoint buffer and it is expressed as the number of audio frames the 482 // buffer can hold. 483 HRESULT SharedModeInitialize(IAudioClient* client, 484 const WAVEFORMATEXTENSIBLE* format, 485 HANDLE event_handle, 486 REFERENCE_TIME buffer_duration, 487 bool auto_convert_pcm, 488 uint32_t* endpoint_buffer_size); 489 490 // Works as SharedModeInitialize() but adds support for using smaller engine 491 // periods than the default period. 492 // The `client` argument must be an IAudioClient3 interface pointer, hence only 493 // supported on Windows 10 and above. 494 // TODO(henrika): can probably be merged into SharedModeInitialize() to avoid 495 // duplicating code. Keeping as separate method for now until decided if we 496 // need low-latency support. 497 HRESULT SharedModeInitializeLowLatency(IAudioClient3* client, 498 const WAVEFORMATEXTENSIBLE* format, 499 HANDLE event_handle, 500 uint32_t period_in_frames, 501 bool auto_convert_pcm, 502 uint32_t* endpoint_buffer_size); 503 504 // Creates an IAudioRenderClient client for an existing IAudioClient given by 505 // `client`. The IAudioRenderClient interface enables a client to write 506 // output data to a rendering endpoint buffer. The methods in this interface 507 // manage the movement of data packets that contain audio-rendering data. 508 Microsoft::WRL::ComPtr<IAudioRenderClient> CreateRenderClient( 509 IAudioClient* client); 510 511 // Creates an IAudioCaptureClient client for an existing IAudioClient given by 512 // `client`. The IAudioCaptureClient interface enables a client to read 513 // input data from a capture endpoint buffer. The methods in this interface 514 // manage the movement of data packets that contain capture data. 515 Microsoft::WRL::ComPtr<IAudioCaptureClient> CreateCaptureClient( 516 IAudioClient* client); 517 518 // Creates an IAudioClock interface for an existing IAudioClient given by 519 // `client`. The IAudioClock interface enables a client to monitor a stream's 520 // data rate and the current position in the stream. 521 Microsoft::WRL::ComPtr<IAudioClock> CreateAudioClock(IAudioClient* client); 522 523 // Creates an AudioSessionControl interface for an existing IAudioClient given 524 // by `client`. The IAudioControl interface enables a client to configure the 525 // control parameters for an audio session and to monitor events in the session. 526 Microsoft::WRL::ComPtr<IAudioSessionControl> CreateAudioSessionControl( 527 IAudioClient* client); 528 529 // Creates an ISimpleAudioVolume interface for an existing IAudioClient given by 530 // `client`. This interface enables a client to control the master volume level 531 // of an active audio session. 532 Microsoft::WRL::ComPtr<ISimpleAudioVolume> CreateSimpleAudioVolume( 533 IAudioClient* client); 534 535 // Fills up the endpoint rendering buffer with silence for an existing 536 // IAudioClient given by `client` and a corresponding IAudioRenderClient 537 // given by `render_client`. 538 bool FillRenderEndpointBufferWithSilence(IAudioClient* client, 539 IAudioRenderClient* render_client); 540 541 // Prints/logs all fields of the format structure in `format`. 542 // Also supports extended versions (WAVEFORMATEXTENSIBLE). 543 std::string WaveFormatToString(WaveFormatWrapper format); 544 545 // Converts Windows internal REFERENCE_TIME (100 nanosecond units) into 546 // generic webrtc::TimeDelta which then can be converted to any time unit. 547 webrtc::TimeDelta ReferenceTimeToTimeDelta(REFERENCE_TIME time); 548 549 // Converts size expressed in number of audio frames, `num_frames`, into 550 // milliseconds given a specified `sample_rate`. 551 double FramesToMilliseconds(uint32_t num_frames, uint16_t sample_rate); 552 553 // Converts a COM error into a human-readable string. 554 std::string ErrorToString(const _com_error& error); 555 556 } // namespace core_audio_utility 557 } // namespace webrtc_win 558 } // namespace webrtc 559 560 #endif // MODULES_AUDIO_DEVICE_WIN_CORE_AUDIO_UTILITY_WIN_H_ 561