1 /*
2 **
3 ** Copyright 2012, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 ** http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17
18
19 #define LOG_TAG "AudioFlinger"
20 // #define LOG_NDEBUG 0
21 #define ATRACE_TAG ATRACE_TAG_AUDIO
22
23 #include "Threads.h"
24
25 #include "Client.h"
26 #include "IAfEffect.h"
27 #include "MelReporter.h"
28 #include "ResamplerBufferProvider.h"
29
30 #include <afutils/FallibleLockGuard.h>
31 #include <afutils/Permission.h>
32 #include <afutils/TypedLogger.h>
33 #include <afutils/Vibrator.h>
34 #include <audio_utils/MelProcessor.h>
35 #include <audio_utils/Metadata.h>
36 #include <audio_utils/Trace.h>
37 #include <com_android_media_audioserver.h>
38 #ifdef DEBUG_CPU_USAGE
39 #include <audio_utils/Statistics.h>
40 #include <cpustats/ThreadCpuUsage.h>
41 #endif
42 #include <audio_utils/channels.h>
43 #include <audio_utils/format.h>
44 #include <audio_utils/minifloat.h>
45 #include <audio_utils/mono_blend.h>
46 #include <audio_utils/primitives.h>
47 #include <audio_utils/safe_math.h>
48 #include <audiomanager/AudioManager.h>
49 #include <binder/IPCThreadState.h>
50 #include <binder/IServiceManager.h>
51 #include <binder/PersistableBundle.h>
52 #include <com_android_media_audio.h>
53 #include <com_android_media_audioserver.h>
54 #include <cutils/bitops.h>
55 #include <cutils/properties.h>
56 #include <fastpath/AutoPark.h>
57 #include <media/AudioContainers.h>
58 #include <media/AudioDeviceTypeAddr.h>
59 #include <media/AudioParameter.h>
60 #include <media/AudioResamplerPublic.h>
61 #ifdef ADD_BATTERY_DATA
62 #include <media/IMediaPlayerService.h>
63 #include <media/IMediaDeathNotifier.h>
64 #endif
65 #include <media/MmapStreamCallback.h>
66 #include <media/RecordBufferConverter.h>
67 #include <media/TypeConverter.h>
68 #include <media/audiohal/EffectsFactoryHalInterface.h>
69 #include <media/audiohal/StreamHalInterface.h>
70 #include <media/nbaio/AudioStreamInSource.h>
71 #include <media/nbaio/AudioStreamOutSink.h>
72 #include <media/nbaio/MonoPipe.h>
73 #include <media/nbaio/MonoPipeReader.h>
74 #include <media/nbaio/Pipe.h>
75 #include <media/nbaio/PipeReader.h>
76 #include <media/nbaio/SourceAudioBufferProvider.h>
77 #include <media/ValidatedAttributionSourceState.h>
78 #include <mediautils/BatteryNotifier.h>
79 #include <mediautils/Process.h>
80 #include <mediautils/SchedulingPolicyService.h>
81 #include <mediautils/ServiceUtilities.h>
82 #include <powermanager/PowerManager.h>
83 #include <private/android_filesystem_config.h>
84 #include <private/media/AudioTrackShared.h>
85 #include <psh_utils/AudioPowerManager.h>
86 #include <system/audio_effects/effect_aec.h>
87 #include <system/audio_effects/effect_downmix.h>
88 #include <system/audio_effects/effect_ns.h>
89 #include <system/audio_effects/effect_spatializer.h>
90 #include <utils/Log.h>
91 #include <utils/Trace.h>
92
93 #include <fcntl.h>
94 #include <linux/futex.h>
95 #include <math.h>
96 #include <memory>
97 #include <pthread.h>
98 #include <sstream>
99 #include <string>
100 #include <sys/stat.h>
101 #include <sys/syscall.h>
102
103 // ----------------------------------------------------------------------------
104
105 // Note: the following macro is used for extremely verbose logging message. In
106 // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
107 // 0; but one side effect of this is to turn all LOGV's as well. Some messages
108 // are so verbose that we want to suppress them even when we have ALOG_ASSERT
109 // turned on. Do not uncomment the #def below unless you really know what you
110 // are doing and want to see all of the extremely verbose messages.
111 //#define VERY_VERY_VERBOSE_LOGGING
112 #ifdef VERY_VERY_VERBOSE_LOGGING
113 #define ALOGVV ALOGV
114 #else
115 #define ALOGVV(a...) do { } while(0)
116 #endif
117
118 // TODO: Move these macro/inlines to a header file.
119 #define max(a, b) ((a) > (b) ? (a) : (b))
120
121 template <typename T>
min(const T & a,const T & b)122 static inline T min(const T& a, const T& b)
123 {
124 return a < b ? a : b;
125 }
126
127 using com::android::media::permission::ValidatedAttributionSourceState;
128 namespace audioserver_flags = com::android::media::audioserver;
129
130 namespace android {
131
132 using audioflinger::SyncEvent;
133 using media::IEffectClient;
134 using content::AttributionSourceState;
135
136 // Keep in sync with java definition in media/java/android/media/AudioRecord.java
137 static constexpr int32_t kMaxSharedAudioHistoryMs = 5000;
138
139 // retry counts for buffer fill timeout
140 // 50 * ~20msecs = 1 second
141 static const int8_t kMaxTrackRetries = 50;
142 static const int8_t kMaxTrackStartupRetries = 50;
143
144 // allow less retry attempts on direct output thread.
145 // direct outputs can be a scarce resource in audio hardware and should
146 // be released as quickly as possible.
147 // Notes:
148 // 1) The retry duration kMaxTrackRetriesDirectMs may be increased
149 // in case the data write is bursty for the AudioTrack. The application
150 // should endeavor to write at least once every kMaxTrackRetriesDirectMs
151 // to prevent an underrun situation. If the data is bursty, then
152 // the application can also throttle the data sent to be even.
153 // 2) For compressed audio data, any data present in the AudioTrack buffer
154 // will be sent and reset the retry count. This delivers data as
155 // it arrives, with approximately kDirectMinSleepTimeUs = 10ms checking interval.
156 // 3) For linear PCM or proportional PCM, we wait one period for a period's worth
157 // of data to be available, then any remaining data is delivered.
158 // This is required to ensure the last bit of data is delivered before underrun.
159 //
160 // Sleep time per cycle is kDirectMinSleepTimeUs for compressed tracks
161 // or the size of the HAL period for proportional / linear PCM tracks.
162 static const int32_t kMaxTrackRetriesDirectMs = 200;
163
164 // don't warn about blocked writes or record buffer overflows more often than this
165 static const nsecs_t kWarningThrottleNs = seconds(5);
166
167 // RecordThread loop sleep time upon application overrun or audio HAL read error
168 static const int kRecordThreadSleepUs = 5000;
169
170 // maximum time to wait in sendConfigEvent_l() for a status to be received
171 static const nsecs_t kConfigEventTimeoutNs = seconds(2);
172 // longer timeout for create audio patch to account for specific scenarii
173 // with Bluetooth devices
174 static const nsecs_t kCreatePatchEventTimeoutNs = seconds(4);
175
176 // minimum sleep time for the mixer thread loop when tracks are active but in underrun
177 static const uint32_t kMinThreadSleepTimeUs = 5000;
178 // maximum divider applied to the active sleep time in the mixer thread loop
179 static const uint32_t kMaxThreadSleepTimeShift = 2;
180
181 // minimum normal sink buffer size, expressed in milliseconds rather than frames
182 // FIXME This should be based on experimentally observed scheduling jitter
183 static const uint32_t kMinNormalSinkBufferSizeMs = 20;
184 // maximum normal sink buffer size
185 static const uint32_t kMaxNormalSinkBufferSizeMs = 24;
186
187 // minimum capture buffer size in milliseconds to _not_ need a fast capture thread
188 // FIXME This should be based on experimentally observed scheduling jitter
189 static const uint32_t kMinNormalCaptureBufferSizeMs = 12;
190
191 // Offloaded output thread standby delay: allows track transition without going to standby
192 static const nsecs_t kOffloadStandbyDelayNs = seconds(1);
193
194 // Direct output thread minimum sleep time in idle or active(underrun) state
195 static const nsecs_t kDirectMinSleepTimeUs = 10000;
196
197 // Minimum amount of time between checking to see if the timestamp is advancing
198 // for underrun detection. If we check too frequently, we may not detect a
199 // timestamp update and will falsely detect underrun.
200 static constexpr nsecs_t kMinimumTimeBetweenTimestampChecksNs = 150 /* ms */ * 1'000'000;
201
202 // The universal constant for ubiquitous 20ms value. The value of 20ms seems to provide a good
203 // balance between power consumption and latency, and allows threads to be scheduled reliably
204 // by the CFS scheduler.
205 // FIXME Express other hardcoded references to 20ms with references to this constant and move
206 // it appropriately.
207 #define FMS_20 20
208
209 // Whether to use fast mixer
210 static const enum {
211 FastMixer_Never, // never initialize or use: for debugging only
212 FastMixer_Always, // always initialize and use, even if not needed: for debugging only
213 // normal mixer multiplier is 1
214 FastMixer_Static, // initialize if needed, then use all the time if initialized,
215 // multiplier is calculated based on min & max normal mixer buffer size
216 FastMixer_Dynamic, // initialize if needed, then use dynamically depending on track load,
217 // multiplier is calculated based on min & max normal mixer buffer size
218 // FIXME for FastMixer_Dynamic:
219 // Supporting this option will require fixing HALs that can't handle large writes.
220 // For example, one HAL implementation returns an error from a large write,
221 // and another HAL implementation corrupts memory, possibly in the sample rate converter.
222 // We could either fix the HAL implementations, or provide a wrapper that breaks
223 // up large writes into smaller ones, and the wrapper would need to deal with scheduler.
224 } kUseFastMixer = FastMixer_Static;
225
226 // Whether to use fast capture
227 static const enum {
228 FastCapture_Never, // never initialize or use: for debugging only
229 FastCapture_Always, // always initialize and use, even if not needed: for debugging only
230 FastCapture_Static, // initialize if needed, then use all the time if initialized
231 } kUseFastCapture = FastCapture_Static;
232
233 // Priorities for requestPriority
234 static const int kPriorityAudioApp = 2;
235 static const int kPriorityFastMixer = 3;
236 static const int kPriorityFastCapture = 3;
237 // Request real-time priority for PlaybackThread in ARC
238 static const int kPriorityPlaybackThreadArc = 1;
239
240 // IAudioFlinger::createTrack() has an in/out parameter 'pFrameCount' for the total size of the
241 // track buffer in shared memory. Zero on input means to use a default value. For fast tracks,
242 // AudioFlinger derives the default from HAL buffer size and 'fast track multiplier'.
243
244 // This is the default value, if not specified by property.
245 static const int kFastTrackMultiplier = 2;
246
247 // The minimum and maximum allowed values
248 static const int kFastTrackMultiplierMin = 1;
249 static const int kFastTrackMultiplierMax = 2;
250
251 // The actual value to use, which can be specified per-device via property af.fast_track_multiplier.
252 static int sFastTrackMultiplier = kFastTrackMultiplier;
253
254 // See Thread::readOnlyHeap().
255 // Initially this heap is used to allocate client buffers for "fast" AudioRecord.
256 // Eventually it will be the single buffer that FastCapture writes into via HAL read(),
257 // and that all "fast" AudioRecord clients read from. In either case, the size can be small.
258 static const size_t kRecordThreadReadOnlyHeapSize = 0xD000;
259
260 static const nsecs_t kDefaultStandbyTimeInNsecs = seconds(3);
261
getStandbyTimeInNanos()262 static nsecs_t getStandbyTimeInNanos() {
263 static nsecs_t standbyTimeInNanos = []() {
264 const int ms = property_get_int32("ro.audio.flinger_standbytime_ms",
265 kDefaultStandbyTimeInNsecs / NANOS_PER_MILLISECOND);
266 ALOGI("%s: Using %d ms as standby time", __func__, ms);
267 return milliseconds(ms);
268 }();
269 return standbyTimeInNanos;
270 }
271
272 // Set kEnableExtendedChannels to true to enable greater than stereo output
273 // for the MixerThread and device sink. Number of channels allowed is
274 // FCC_2 <= channels <= FCC_LIMIT.
275 constexpr bool kEnableExtendedChannels = true;
276
277 // Returns true if channel mask is permitted for the PCM sink in the MixerThread
278 /* static */
isValidPcmSinkChannelMask(audio_channel_mask_t channelMask)279 bool IAfThreadBase::isValidPcmSinkChannelMask(audio_channel_mask_t channelMask) {
280 switch (audio_channel_mask_get_representation(channelMask)) {
281 case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
282 // Haptic channel mask is only applicable for channel position mask.
283 const uint32_t channelCount = audio_channel_count_from_out_mask(
284 static_cast<audio_channel_mask_t>(channelMask & ~AUDIO_CHANNEL_HAPTIC_ALL));
285 const uint32_t maxChannelCount = kEnableExtendedChannels
286 ? FCC_LIMIT : FCC_2;
287 if (channelCount < FCC_2 // mono is not supported at this time
288 || channelCount > maxChannelCount) {
289 return false;
290 }
291 // check that channelMask is the "canonical" one we expect for the channelCount.
292 return audio_channel_position_mask_is_out_canonical(channelMask);
293 }
294 case AUDIO_CHANNEL_REPRESENTATION_INDEX:
295 if (kEnableExtendedChannels) {
296 const uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
297 if (channelCount >= FCC_2 // mono is not supported at this time
298 && channelCount <= FCC_LIMIT) {
299 return true;
300 }
301 }
302 return false;
303 default:
304 return false;
305 }
306 }
307
308 // Set kEnableExtendedPrecision to true to use extended precision in MixerThread
309 constexpr bool kEnableExtendedPrecision = true;
310
311 // Returns true if format is permitted for the PCM sink in the MixerThread
312 /* static */
isValidPcmSinkFormat(audio_format_t format)313 bool IAfThreadBase::isValidPcmSinkFormat(audio_format_t format) {
314 switch (format) {
315 case AUDIO_FORMAT_PCM_16_BIT:
316 return true;
317 case AUDIO_FORMAT_PCM_FLOAT:
318 case AUDIO_FORMAT_PCM_24_BIT_PACKED:
319 case AUDIO_FORMAT_PCM_32_BIT:
320 case AUDIO_FORMAT_PCM_8_24_BIT:
321 return kEnableExtendedPrecision;
322 default:
323 return false;
324 }
325 }
326
327 // ----------------------------------------------------------------------------
328
329 // formatToString() needs to be exact for MediaMetrics purposes.
330 // Do not use media/TypeConverter.h toString().
331 /* static */
formatToString(audio_format_t format)332 std::string IAfThreadBase::formatToString(audio_format_t format) {
333 std::string result;
334 FormatConverter::toString(format, result);
335 return result;
336 }
337
338 // TODO: move all toString helpers to audio.h
339 // under #ifdef __cplusplus #endif
patchSinksToString(const struct audio_patch * patch)340 static std::string patchSinksToString(const struct audio_patch *patch)
341 {
342 std::string s;
343 for (size_t i = 0; i < patch->num_sinks; ++i) {
344 if (i > 0) s.append("|");
345 if (patch->sinks[i].ext.device.address[0]) {
346 s.append("(").append(toString(patch->sinks[i].ext.device.type))
347 .append(", ").append(patch->sinks[i].ext.device.address).append(")");
348 } else {
349 s.append(toString(patch->sinks[i].ext.device.type));
350 }
351 }
352 return s;
353 }
354
patchSourcesToString(const struct audio_patch * patch)355 static std::string patchSourcesToString(const struct audio_patch *patch)
356 {
357 std::string s;
358 for (size_t i = 0; i < patch->num_sources; ++i) {
359 if (i > 0) s.append("|");
360 if (patch->sources[i].ext.device.address[0]) {
361 s.append("(").append(toString(patch->sources[i].ext.device.type))
362 .append(", ").append(patch->sources[i].ext.device.address).append(")");
363 } else {
364 s.append(toString(patch->sources[i].ext.device.type));
365 }
366 }
367 return s;
368 }
369
toString(audio_latency_mode_t mode)370 static std::string toString(audio_latency_mode_t mode) {
371 // We convert to the AIDL type to print (eventually the legacy type will be removed).
372 const auto result = legacy2aidl_audio_latency_mode_t_AudioLatencyMode(mode);
373 return result.has_value() ? media::audio::common::toString(*result) : "UNKNOWN";
374 }
375
376 // Could be made a template, but other toString overloads for std::vector are confused.
toString(const std::vector<audio_latency_mode_t> & elements)377 static std::string toString(const std::vector<audio_latency_mode_t>& elements) {
378 std::string s("{ ");
379 for (const auto& e : elements) {
380 s.append(toString(e));
381 s.append(" ");
382 }
383 s.append("}");
384 return s;
385 }
386
387 static pthread_once_t sFastTrackMultiplierOnce = PTHREAD_ONCE_INIT;
388
sFastTrackMultiplierInit()389 static void sFastTrackMultiplierInit()
390 {
391 char value[PROPERTY_VALUE_MAX];
392 if (property_get("af.fast_track_multiplier", value, NULL) > 0) {
393 char *endptr;
394 unsigned long ul = strtoul(value, &endptr, 0);
395 if (*endptr == '\0' && kFastTrackMultiplierMin <= ul && ul <= kFastTrackMultiplierMax) {
396 sFastTrackMultiplier = (int) ul;
397 }
398 }
399 }
400
401 // ----------------------------------------------------------------------------
402
403 #ifdef ADD_BATTERY_DATA
404 // To collect the amplifier usage
addBatteryData(uint32_t params)405 static void addBatteryData(uint32_t params) {
406 sp<IMediaPlayerService> service = IMediaDeathNotifier::getMediaPlayerService();
407 if (service == NULL) {
408 // it already logged
409 return;
410 }
411
412 service->addBatteryData(params);
413 }
414 #endif
415
416 // Track the CLOCK_BOOTTIME versus CLOCK_MONOTONIC timebase offset
417 struct {
418 // call when you acquire a partial wakelock
acquireandroid::__anonad8abf220408419 void acquire(const sp<IBinder> &wakeLockToken) {
420 pthread_mutex_lock(&mLock);
421 if (wakeLockToken.get() == nullptr) {
422 adjustTimebaseOffset(&mBoottimeOffset, ExtendedTimestamp::TIMEBASE_BOOTTIME);
423 } else {
424 if (mCount == 0) {
425 adjustTimebaseOffset(&mBoottimeOffset, ExtendedTimestamp::TIMEBASE_BOOTTIME);
426 }
427 ++mCount;
428 }
429 pthread_mutex_unlock(&mLock);
430 }
431
432 // call when you release a partial wakelock.
releaseandroid::__anonad8abf220408433 void release(const sp<IBinder> &wakeLockToken) {
434 if (wakeLockToken.get() == nullptr) {
435 return;
436 }
437 pthread_mutex_lock(&mLock);
438 if (--mCount < 0) {
439 ALOGE("negative wakelock count");
440 mCount = 0;
441 }
442 pthread_mutex_unlock(&mLock);
443 }
444
445 // retrieves the boottime timebase offset from monotonic.
getBoottimeOffsetandroid::__anonad8abf220408446 int64_t getBoottimeOffset() {
447 pthread_mutex_lock(&mLock);
448 int64_t boottimeOffset = mBoottimeOffset;
449 pthread_mutex_unlock(&mLock);
450 return boottimeOffset;
451 }
452
453 // Adjusts the timebase offset between TIMEBASE_MONOTONIC
454 // and the selected timebase.
455 // Currently only TIMEBASE_BOOTTIME is allowed.
456 //
457 // This only needs to be called upon acquiring the first partial wakelock
458 // after all other partial wakelocks are released.
459 //
460 // We do an empirical measurement of the offset rather than parsing
461 // /proc/timer_list since the latter is not a formal kernel ABI.
adjustTimebaseOffsetandroid::__anonad8abf220408462 static void adjustTimebaseOffset(int64_t *offset, ExtendedTimestamp::Timebase timebase) {
463 int clockbase;
464 switch (timebase) {
465 case ExtendedTimestamp::TIMEBASE_BOOTTIME:
466 clockbase = SYSTEM_TIME_BOOTTIME;
467 break;
468 default:
469 LOG_ALWAYS_FATAL("invalid timebase %d", timebase);
470 break;
471 }
472 // try three times to get the clock offset, choose the one
473 // with the minimum gap in measurements.
474 const int tries = 3;
475 nsecs_t bestGap = 0, measured = 0; // not required, initialized for clang-tidy
476 for (int i = 0; i < tries; ++i) {
477 const nsecs_t tmono = systemTime(SYSTEM_TIME_MONOTONIC);
478 const nsecs_t tbase = systemTime(clockbase);
479 const nsecs_t tmono2 = systemTime(SYSTEM_TIME_MONOTONIC);
480 const nsecs_t gap = tmono2 - tmono;
481 if (i == 0 || gap < bestGap) {
482 bestGap = gap;
483 measured = tbase - ((tmono + tmono2) >> 1);
484 }
485 }
486
487 // to avoid micro-adjusting, we don't change the timebase
488 // unless it is significantly different.
489 //
490 // Assumption: It probably takes more than toleranceNs to
491 // suspend and resume the device.
492 static int64_t toleranceNs = 10000; // 10 us
493 if (llabs(*offset - measured) > toleranceNs) {
494 ALOGV("Adjusting timebase offset old: %lld new: %lld",
495 (long long)*offset, (long long)measured);
496 *offset = measured;
497 }
498 }
499
500 pthread_mutex_t mLock;
501 int32_t mCount;
502 int64_t mBoottimeOffset;
503 } gBoottime = { PTHREAD_MUTEX_INITIALIZER, 0, 0 }; // static, so use POD initialization
504
505 // ----------------------------------------------------------------------------
506 // CPU Stats
507 // ----------------------------------------------------------------------------
508
509 class CpuStats {
510 public:
511 CpuStats();
512 void sample(const String8 &title);
513 #ifdef DEBUG_CPU_USAGE
514 private:
515 ThreadCpuUsage mCpuUsage; // instantaneous thread CPU usage in wall clock ns
516 audio_utils::Statistics<double> mWcStats; // statistics on thread CPU usage in wall clock ns
517
518 audio_utils::Statistics<double> mHzStats; // statistics on thread CPU usage in cycles
519
520 int mCpuNum; // thread's current CPU number
521 int mCpukHz; // frequency of thread's current CPU in kHz
522 #endif
523 };
524
CpuStats()525 CpuStats::CpuStats()
526 #ifdef DEBUG_CPU_USAGE
527 : mCpuNum(-1), mCpukHz(-1)
528 #endif
529 {
530 }
531
sample(const String8 & title __unused)532 void CpuStats::sample(const String8 &title
533 #ifndef DEBUG_CPU_USAGE
534 __unused
535 #endif
536 ) {
537 #ifdef DEBUG_CPU_USAGE
538 // get current thread's delta CPU time in wall clock ns
539 double wcNs;
540 bool valid = mCpuUsage.sampleAndEnable(wcNs);
541
542 // record sample for wall clock statistics
543 if (valid) {
544 mWcStats.add(wcNs);
545 }
546
547 // get the current CPU number
548 int cpuNum = sched_getcpu();
549
550 // get the current CPU frequency in kHz
551 int cpukHz = mCpuUsage.getCpukHz(cpuNum);
552
553 // check if either CPU number or frequency changed
554 if (cpuNum != mCpuNum || cpukHz != mCpukHz) {
555 mCpuNum = cpuNum;
556 mCpukHz = cpukHz;
557 // ignore sample for purposes of cycles
558 valid = false;
559 }
560
561 // if no change in CPU number or frequency, then record sample for cycle statistics
562 if (valid && mCpukHz > 0) {
563 const double cycles = wcNs * cpukHz * 0.000001;
564 mHzStats.add(cycles);
565 }
566
567 const unsigned n = mWcStats.getN();
568 // mCpuUsage.elapsed() is expensive, so don't call it every loop
569 if ((n & 127) == 1) {
570 const long long elapsed = mCpuUsage.elapsed();
571 if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) {
572 const double perLoop = elapsed / (double) n;
573 const double perLoop100 = perLoop * 0.01;
574 const double perLoop1k = perLoop * 0.001;
575 const double mean = mWcStats.getMean();
576 const double stddev = mWcStats.getStdDev();
577 const double minimum = mWcStats.getMin();
578 const double maximum = mWcStats.getMax();
579 const double meanCycles = mHzStats.getMean();
580 const double stddevCycles = mHzStats.getStdDev();
581 const double minCycles = mHzStats.getMin();
582 const double maxCycles = mHzStats.getMax();
583 mCpuUsage.resetElapsed();
584 mWcStats.reset();
585 mHzStats.reset();
586 ALOGD("CPU usage for %s over past %.1f secs\n"
587 " (%u mixer loops at %.1f mean ms per loop):\n"
588 " us per mix loop: mean=%.0f stddev=%.0f min=%.0f max=%.0f\n"
589 " %% of wall: mean=%.1f stddev=%.1f min=%.1f max=%.1f\n"
590 " MHz: mean=%.1f, stddev=%.1f, min=%.1f max=%.1f",
591 title.c_str(),
592 elapsed * .000000001, n, perLoop * .000001,
593 mean * .001,
594 stddev * .001,
595 minimum * .001,
596 maximum * .001,
597 mean / perLoop100,
598 stddev / perLoop100,
599 minimum / perLoop100,
600 maximum / perLoop100,
601 meanCycles / perLoop1k,
602 stddevCycles / perLoop1k,
603 minCycles / perLoop1k,
604 maxCycles / perLoop1k);
605
606 }
607 }
608 #endif
609 };
610
611 // ----------------------------------------------------------------------------
612 // ThreadBase
613 // ----------------------------------------------------------------------------
614
615 // static
threadTypeToString(ThreadBase::type_t type)616 const char* IAfThreadBase::threadTypeToString(ThreadBase::type_t type)
617 {
618 switch (type) {
619 case MIXER:
620 return "MIXER";
621 case DIRECT:
622 return "DIRECT";
623 case DUPLICATING:
624 return "DUPLICATING";
625 case RECORD:
626 return "RECORD";
627 case OFFLOAD:
628 return "OFFLOAD";
629 case MMAP_PLAYBACK:
630 return "MMAP_PLAYBACK";
631 case MMAP_CAPTURE:
632 return "MMAP_CAPTURE";
633 case SPATIALIZER:
634 return "SPATIALIZER";
635 case BIT_PERFECT:
636 return "BIT_PERFECT";
637 default:
638 return "unknown";
639 }
640 }
641
ThreadBase(const sp<IAfThreadCallback> & afThreadCallback,audio_io_handle_t id,type_t type,bool systemReady,bool isOut)642 ThreadBase::ThreadBase(const sp<IAfThreadCallback>& afThreadCallback, audio_io_handle_t id,
643 type_t type, bool systemReady, bool isOut)
644 : Thread(false /*canCallJava*/),
645 mType(type),
646 mAfThreadCallback(afThreadCallback),
647 mThreadMetrics(std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_THREAD) + std::to_string(id),
648 isOut),
649 mIsOut(isOut),
650 // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, mFormat, mBufferSize
651 // are set by PlaybackThread::readOutputParameters_l() or
652 // RecordThread::readInputParameters_l()
653 //FIXME: mStandby should be true here. Is this some kind of hack?
654 mStandby(false),
655 mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
656 // mName will be set by concrete (non-virtual) subclass
657 mDeathRecipient(new PMDeathRecipient(this)),
658 mSystemReady(systemReady),
659 mSignalPending(false)
660 {
661 mThreadMetrics.logConstructor(getpid(), threadTypeToString(type), id);
662 memset(&mPatch, 0, sizeof(struct audio_patch));
663 }
664
~ThreadBase()665 ThreadBase::~ThreadBase()
666 {
667 // mConfigEvents should be empty, but just in case it isn't, free the memory it owns
668 mConfigEvents.clear();
669
670 // do not lock the mutex in destructor
671 releaseWakeLock_l();
672 if (mPowerManager != 0) {
673 sp<IBinder> binder = IInterface::asBinder(mPowerManager);
674 binder->unlinkToDeath(mDeathRecipient);
675 }
676
677 sendStatistics(true /* force */);
678 }
679
readyToRun()680 status_t ThreadBase::readyToRun()
681 {
682 status_t status = initCheck();
683 if (status == NO_ERROR) {
684 ALOGI("AudioFlinger's thread %p tid=%d ready to run", this, getTid());
685 } else {
686 ALOGE("No working audio driver found.");
687 }
688 return status;
689 }
690
exit()691 void ThreadBase::exit()
692 {
693 ALOGV("ThreadBase::exit");
694 // do any cleanup required for exit to succeed
695 preExit();
696 {
697 // This lock prevents the following race in thread (uniprocessor for illustration):
698 // if (!exitPending()) {
699 // // context switch from here to exit()
700 // // exit() calls requestExit(), what exitPending() observes
701 // // exit() calls signal(), which is dropped since no waiters
702 // // context switch back from exit() to here
703 // mWaitWorkCV.wait(...);
704 // // now thread is hung
705 // }
706 audio_utils::lock_guard lock(mutex());
707 requestExit();
708 mWaitWorkCV.notify_all();
709 }
710 // When Thread::requestExitAndWait is made virtual and this method is renamed to
711 // "virtual status_t requestExitAndWait()", replace by "return Thread::requestExitAndWait();"
712
713 // For TimeCheck: track waiting on the thread join of getTid().
714 audio_utils::mutex::scoped_join_wait_check sjw(getTid());
715
716 requestExitAndWait();
717 }
718
setParameters(const String8 & keyValuePairs)719 status_t ThreadBase::setParameters(const String8& keyValuePairs)
720 {
721 ALOGV("ThreadBase::setParameters() %s", keyValuePairs.c_str());
722 audio_utils::lock_guard _l(mutex());
723
724 return sendSetParameterConfigEvent_l(keyValuePairs);
725 }
726
727 // sendConfigEvent_l() must be called with ThreadBase::mLock held
728 // Can temporarily release the lock if waiting for a reply from processConfigEvents_l().
sendConfigEvent_l(sp<ConfigEvent> & event)729 status_t ThreadBase::sendConfigEvent_l(sp<ConfigEvent>& event)
730 NO_THREAD_SAFETY_ANALYSIS // condition variable
731 {
732 status_t status = NO_ERROR;
733
734 if (event->mRequiresSystemReady && !mSystemReady) {
735 event->mWaitStatus = false;
736 mPendingConfigEvents.add(event);
737 return status;
738 }
739 mConfigEvents.add(event);
740 ALOGV("sendConfigEvent_l() num events %zu event %d", mConfigEvents.size(), event->mType);
741 mWaitWorkCV.notify_one();
742 mutex().unlock();
743 {
744 audio_utils::unique_lock _l(event->mutex());
745 nsecs_t timeoutNs = event->mType == CFG_EVENT_CREATE_AUDIO_PATCH ?
746 kCreatePatchEventTimeoutNs : kConfigEventTimeoutNs;
747 while (event->mWaitStatus) {
748 if (event->mCondition.wait_for(
749 _l, std::chrono::nanoseconds(timeoutNs), getTid())
750 == std::cv_status::timeout) {
751 event->mStatus = TIMED_OUT;
752 event->mWaitStatus = false;
753 }
754 }
755 status = event->mStatus;
756 }
757 mutex().lock();
758 return status;
759 }
760
sendIoConfigEvent(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId)761 void ThreadBase::sendIoConfigEvent(audio_io_config_event_t event, pid_t pid,
762 audio_port_handle_t portId)
763 {
764 audio_utils::lock_guard _l(mutex());
765 sendIoConfigEvent_l(event, pid, portId);
766 }
767
768 // sendIoConfigEvent_l() must be called with ThreadBase::mutex() held
sendIoConfigEvent_l(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId)769 void ThreadBase::sendIoConfigEvent_l(audio_io_config_event_t event, pid_t pid,
770 audio_port_handle_t portId)
771 {
772 // The audio statistics history is exponentially weighted to forget events
773 // about five or more seconds in the past. In order to have
774 // crisper statistics for mediametrics, we reset the statistics on
775 // an IoConfigEvent, to reflect different properties for a new device.
776 mIoJitterMs.reset();
777 mLatencyMs.reset();
778 mProcessTimeMs.reset();
779 mMonopipePipeDepthStats.reset();
780 mTimestampVerifier.discontinuity(mTimestampVerifier.DISCONTINUITY_MODE_CONTINUOUS);
781
782 sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event, pid, portId);
783 sendConfigEvent_l(configEvent);
784 }
785
sendPrioConfigEvent(pid_t pid,pid_t tid,int32_t prio,bool forApp)786 void ThreadBase::sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio, bool forApp)
787 {
788 audio_utils::lock_guard _l(mutex());
789 sendPrioConfigEvent_l(pid, tid, prio, forApp);
790 }
791
792 // sendPrioConfigEvent_l() must be called with ThreadBase::mutex() held
sendPrioConfigEvent_l(pid_t pid,pid_t tid,int32_t prio,bool forApp)793 void ThreadBase::sendPrioConfigEvent_l(
794 pid_t pid, pid_t tid, int32_t prio, bool forApp)
795 {
796 sp<ConfigEvent> configEvent = (ConfigEvent *)new PrioConfigEvent(pid, tid, prio, forApp);
797 sendConfigEvent_l(configEvent);
798 }
799
800 // sendSetParameterConfigEvent_l() must be called with ThreadBase::mutex() held
sendSetParameterConfigEvent_l(const String8 & keyValuePair)801 status_t ThreadBase::sendSetParameterConfigEvent_l(const String8& keyValuePair)
802 {
803 sp<ConfigEvent> configEvent;
804 AudioParameter param(keyValuePair);
805 int value;
806 if (param.getInt(String8(AudioParameter::keyMonoOutput), value) == NO_ERROR) {
807 setMasterMono_l(value != 0);
808 if (param.size() == 1) {
809 return NO_ERROR; // should be a solo parameter - we don't pass down
810 }
811 param.remove(String8(AudioParameter::keyMonoOutput));
812 configEvent = new SetParameterConfigEvent(param.toString());
813 } else {
814 configEvent = new SetParameterConfigEvent(keyValuePair);
815 }
816 return sendConfigEvent_l(configEvent);
817 }
818
sendCreateAudioPatchConfigEvent(const struct audio_patch * patch,audio_patch_handle_t * handle)819 status_t ThreadBase::sendCreateAudioPatchConfigEvent(
820 const struct audio_patch *patch,
821 audio_patch_handle_t *handle)
822 {
823 audio_utils::lock_guard _l(mutex());
824 sp<ConfigEvent> configEvent = (ConfigEvent *)new CreateAudioPatchConfigEvent(*patch, *handle);
825 status_t status = sendConfigEvent_l(configEvent);
826 if (status == NO_ERROR) {
827 CreateAudioPatchConfigEventData *data =
828 (CreateAudioPatchConfigEventData *)configEvent->mData.get();
829 *handle = data->mHandle;
830 }
831 return status;
832 }
833
sendReleaseAudioPatchConfigEvent(const audio_patch_handle_t handle)834 status_t ThreadBase::sendReleaseAudioPatchConfigEvent(
835 const audio_patch_handle_t handle)
836 {
837 audio_utils::lock_guard _l(mutex());
838 sp<ConfigEvent> configEvent = (ConfigEvent *)new ReleaseAudioPatchConfigEvent(handle);
839 return sendConfigEvent_l(configEvent);
840 }
841
sendUpdateOutDeviceConfigEvent(const DeviceDescriptorBaseVector & outDevices)842 status_t ThreadBase::sendUpdateOutDeviceConfigEvent(
843 const DeviceDescriptorBaseVector& outDevices)
844 {
845 if (type() != RECORD) {
846 // The update out device operation is only for record thread.
847 return INVALID_OPERATION;
848 }
849 audio_utils::lock_guard _l(mutex());
850 sp<ConfigEvent> configEvent = (ConfigEvent *)new UpdateOutDevicesConfigEvent(outDevices);
851 return sendConfigEvent_l(configEvent);
852 }
853
sendResizeBufferConfigEvent_l(int32_t maxSharedAudioHistoryMs)854 void ThreadBase::sendResizeBufferConfigEvent_l(int32_t maxSharedAudioHistoryMs)
855 {
856 ALOG_ASSERT(type() == RECORD, "sendResizeBufferConfigEvent_l() called on non record thread");
857 sp<ConfigEvent> configEvent =
858 (ConfigEvent *)new ResizeBufferConfigEvent(maxSharedAudioHistoryMs);
859 sendConfigEvent_l(configEvent);
860 }
861
sendCheckOutputStageEffectsEvent()862 void ThreadBase::sendCheckOutputStageEffectsEvent()
863 {
864 audio_utils::lock_guard _l(mutex());
865 sendCheckOutputStageEffectsEvent_l();
866 }
867
sendCheckOutputStageEffectsEvent_l()868 void ThreadBase::sendCheckOutputStageEffectsEvent_l()
869 {
870 sp<ConfigEvent> configEvent =
871 (ConfigEvent *)new CheckOutputStageEffectsEvent();
872 sendConfigEvent_l(configEvent);
873 }
874
sendHalLatencyModesChangedEvent_l()875 void ThreadBase::sendHalLatencyModesChangedEvent_l()
876 {
877 sp<ConfigEvent> configEvent = sp<HalLatencyModesChangedEvent>::make();
878 sendConfigEvent_l(configEvent);
879 }
880
881 // post condition: mConfigEvents.isEmpty()
processConfigEvents_l()882 void ThreadBase::processConfigEvents_l()
883 {
884 bool configChanged = false;
885
886 while (!mConfigEvents.isEmpty()) {
887 ALOGV("processConfigEvents_l() remaining events %zu", mConfigEvents.size());
888 sp<ConfigEvent> event = mConfigEvents[0];
889 mConfigEvents.removeAt(0);
890 switch (event->mType) {
891 case CFG_EVENT_PRIO: {
892 PrioConfigEventData *data = (PrioConfigEventData *)event->mData.get();
893 // FIXME Need to understand why this has to be done asynchronously
894 int err = requestPriority(data->mPid, data->mTid, data->mPrio, data->mForApp,
895 true /*asynchronous*/);
896 if (err != 0) {
897 ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
898 data->mPrio, data->mPid, data->mTid, err);
899 }
900 } break;
901 case CFG_EVENT_IO: {
902 IoConfigEventData *data = (IoConfigEventData *)event->mData.get();
903 ioConfigChanged_l(data->mEvent, data->mPid, data->mPortId);
904 } break;
905 case CFG_EVENT_SET_PARAMETER: {
906 SetParameterConfigEventData *data = (SetParameterConfigEventData *)event->mData.get();
907 if (checkForNewParameter_l(data->mKeyValuePairs, event->mStatus)) {
908 configChanged = true;
909 mLocalLog.log("CFG_EVENT_SET_PARAMETER: (%s) configuration changed",
910 data->mKeyValuePairs.c_str());
911 }
912 } break;
913 case CFG_EVENT_CREATE_AUDIO_PATCH: {
914 const DeviceTypeSet oldDevices = getDeviceTypes_l();
915 CreateAudioPatchConfigEventData *data =
916 (CreateAudioPatchConfigEventData *)event->mData.get();
917 event->mStatus = createAudioPatch_l(&data->mPatch, &data->mHandle);
918 const DeviceTypeSet newDevices = getDeviceTypes_l();
919 configChanged = oldDevices != newDevices;
920 mLocalLog.log("CFG_EVENT_CREATE_AUDIO_PATCH: old device %s (%s) new device %s (%s)",
921 dumpDeviceTypes(oldDevices).c_str(), toString(oldDevices).c_str(),
922 dumpDeviceTypes(newDevices).c_str(), toString(newDevices).c_str());
923 } break;
924 case CFG_EVENT_RELEASE_AUDIO_PATCH: {
925 const DeviceTypeSet oldDevices = getDeviceTypes_l();
926 ReleaseAudioPatchConfigEventData *data =
927 (ReleaseAudioPatchConfigEventData *)event->mData.get();
928 event->mStatus = releaseAudioPatch_l(data->mHandle);
929 const DeviceTypeSet newDevices = getDeviceTypes_l();
930 configChanged = oldDevices != newDevices;
931 mLocalLog.log("CFG_EVENT_RELEASE_AUDIO_PATCH: old device %s (%s) new device %s (%s)",
932 dumpDeviceTypes(oldDevices).c_str(), toString(oldDevices).c_str(),
933 dumpDeviceTypes(newDevices).c_str(), toString(newDevices).c_str());
934 } break;
935 case CFG_EVENT_UPDATE_OUT_DEVICE: {
936 UpdateOutDevicesConfigEventData *data =
937 (UpdateOutDevicesConfigEventData *)event->mData.get();
938 updateOutDevices(data->mOutDevices);
939 } break;
940 case CFG_EVENT_RESIZE_BUFFER: {
941 ResizeBufferConfigEventData *data =
942 (ResizeBufferConfigEventData *)event->mData.get();
943 resizeInputBuffer_l(data->mMaxSharedAudioHistoryMs);
944 } break;
945
946 case CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS: {
947 setCheckOutputStageEffects();
948 } break;
949
950 case CFG_EVENT_HAL_LATENCY_MODES_CHANGED: {
951 onHalLatencyModesChanged_l();
952 } break;
953
954 default:
955 ALOG_ASSERT(false, "processConfigEvents_l() unknown event type %d", event->mType);
956 break;
957 }
958 {
959 audio_utils::lock_guard _l(event->mutex());
960 if (event->mWaitStatus) {
961 event->mWaitStatus = false;
962 event->mCondition.notify_one();
963 }
964 }
965 ALOGV_IF(mConfigEvents.isEmpty(), "processConfigEvents_l() DONE thread %p", this);
966 }
967
968 if (configChanged) {
969 cacheParameters_l();
970 }
971 }
972
channelMaskToString(audio_channel_mask_t mask,bool output)973 String8 channelMaskToString(audio_channel_mask_t mask, bool output) {
974 String8 s;
975 const audio_channel_representation_t representation =
976 audio_channel_mask_get_representation(mask);
977
978 switch (representation) {
979 // Travel all single bit channel mask to convert channel mask to string.
980 case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
981 if (output) {
982 if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
983 if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
984 if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
985 if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low-frequency, ");
986 if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
987 if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
988 if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
989 if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, ");
990 if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, ");
991 if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, ");
992 if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, ");
993 if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,");
994 if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, ");
995 if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, ");
996 if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, ");
997 if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
998 if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, ");
999 if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, ");
1000 if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT) s.append("top-side-left, ");
1001 if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) s.append("top-side-right, ");
1002 if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT) s.append("bottom-front-left, ");
1003 if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER) s.append("bottom-front-center, ");
1004 if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT) s.append("bottom-front-right, ");
1005 if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) s.append("low-frequency-2, ");
1006 if (mask & AUDIO_CHANNEL_OUT_HAPTIC_B) s.append("haptic-B, ");
1007 if (mask & AUDIO_CHANNEL_OUT_HAPTIC_A) s.append("haptic-A, ");
1008 if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, ");
1009 } else {
1010 if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
1011 if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, ");
1012 if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, ");
1013 if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, ");
1014 if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, ");
1015 if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, ");
1016 if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, ");
1017 if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, ");
1018 if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, ");
1019 if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
1020 if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
1021 if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
1022 if (mask & AUDIO_CHANNEL_IN_BACK_LEFT) s.append("back-left, ");
1023 if (mask & AUDIO_CHANNEL_IN_BACK_RIGHT) s.append("back-right, ");
1024 if (mask & AUDIO_CHANNEL_IN_CENTER) s.append("center, ");
1025 if (mask & AUDIO_CHANNEL_IN_LOW_FREQUENCY) s.append("low-frequency, ");
1026 if (mask & AUDIO_CHANNEL_IN_TOP_LEFT) s.append("top-left, ");
1027 if (mask & AUDIO_CHANNEL_IN_TOP_RIGHT) s.append("top-right, ");
1028 if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
1029 if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
1030 if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown, ");
1031 }
1032 const int len = s.length();
1033 if (len > 2) {
1034 (void) s.lockBuffer(len); // needed?
1035 s.unlockBuffer(len - 2); // remove trailing ", "
1036 }
1037 return s;
1038 }
1039 case AUDIO_CHANNEL_REPRESENTATION_INDEX:
1040 s.appendFormat("index mask, bits:%#x", audio_channel_mask_get_bits(mask));
1041 return s;
1042 default:
1043 s.appendFormat("unknown mask, representation:%d bits:%#x",
1044 representation, audio_channel_mask_get_bits(mask));
1045 return s;
1046 }
1047 }
1048
dump(int fd,const Vector<String16> & args)1049 void ThreadBase::dump(int fd, const Vector<String16>& args)
1050 {
1051 dprintf(fd, "\n%s thread %p, name %s, tid %d, type %d (%s):\n", isOutput() ? "Output" : "Input",
1052 this, mThreadName, getTid(), type(), threadTypeToString(type()));
1053
1054 {
1055 afutils::FallibleLockGuard l{mutex()};
1056 if (!l) {
1057 dprintf(fd, " Thread may be deadlocked\n");
1058 }
1059 dumpBase_l(fd, args);
1060 dumpInternals_l(fd, args);
1061 dumpTracks_l(fd, args);
1062 dumpEffectChains_l(fd, args);
1063 }
1064
1065 dprintf(fd, " Local log:\n");
1066 const auto logHeader = this->getLocalLogHeader();
1067 write(fd, logHeader.data(), logHeader.length());
1068 mLocalLog.dump(fd, " " /* prefix */);
1069
1070 // --all does the statistics
1071 bool dumpAll = false;
1072 for (const auto &arg : args) {
1073 if (arg == String16("--all")) {
1074 dumpAll = true;
1075 }
1076 }
1077 if (dumpAll || type() == SPATIALIZER) {
1078 const std::string sched = mThreadSnapshot.toString();
1079 if (!sched.empty()) {
1080 (void)write(fd, sched.c_str(), sched.size());
1081 }
1082 }
1083 }
1084
dumpBase_l(int fd,const Vector<String16> &)1085 void ThreadBase::dumpBase_l(int fd, const Vector<String16>& /* args */)
1086 {
1087 dprintf(fd, " I/O handle: %d\n", mId);
1088 dprintf(fd, " Standby: %s\n", mStandby ? "yes" : "no");
1089 dprintf(fd, " Sample rate: %u Hz\n", mSampleRate);
1090 dprintf(fd, " HAL frame count: %zu\n", mFrameCount);
1091 dprintf(fd, " HAL format: 0x%x (%s)\n", mHALFormat,
1092 IAfThreadBase::formatToString(mHALFormat).c_str());
1093 dprintf(fd, " HAL buffer size: %zu bytes\n", mBufferSize);
1094 dprintf(fd, " Channel count: %u\n", mChannelCount);
1095 dprintf(fd, " Channel mask: 0x%08x (%s)\n", mChannelMask,
1096 channelMaskToString(mChannelMask, mType != RECORD).c_str());
1097 dprintf(fd, " Processing format: 0x%x (%s)\n", mFormat,
1098 IAfThreadBase::formatToString(mFormat).c_str());
1099 dprintf(fd, " Processing frame size: %zu bytes\n", mFrameSize);
1100 dprintf(fd, " Pending config events:");
1101 size_t numConfig = mConfigEvents.size();
1102 if (numConfig) {
1103 const size_t SIZE = 256;
1104 char buffer[SIZE];
1105 for (size_t i = 0; i < numConfig; i++) {
1106 mConfigEvents[i]->dump(buffer, SIZE);
1107 dprintf(fd, "\n %s", buffer);
1108 }
1109 dprintf(fd, "\n");
1110 } else {
1111 dprintf(fd, " none\n");
1112 }
1113 // Note: output device may be used by capture threads for effects such as AEC.
1114 dprintf(fd, " Output devices: %s (%s)\n",
1115 dumpDeviceTypes(outDeviceTypes_l()).c_str(), toString(outDeviceTypes_l()).c_str());
1116 dprintf(fd, " Input device: %#x (%s)\n",
1117 inDeviceType_l(), toString(inDeviceType_l()).c_str());
1118 dprintf(fd, " Audio source: %d (%s)\n", mAudioSource, toString(mAudioSource).c_str());
1119
1120 // Dump timestamp statistics for the Thread types that support it.
1121 if (mType == RECORD
1122 || mType == MIXER
1123 || mType == DUPLICATING
1124 || mType == DIRECT
1125 || mType == OFFLOAD
1126 || mType == SPATIALIZER) {
1127 dprintf(fd, " Timestamp stats: %s\n", mTimestampVerifier.toString().c_str());
1128 dprintf(fd, " Timestamp corrected: %s\n",
1129 isTimestampCorrectionEnabled_l() ? "yes" : "no");
1130 }
1131
1132 if (mLastIoBeginNs > 0) { // MMAP may not set this
1133 dprintf(fd, " Last %s occurred (msecs): %lld\n",
1134 isOutput() ? "write" : "read",
1135 (long long) (systemTime() - mLastIoBeginNs) / NANOS_PER_MILLISECOND);
1136 }
1137
1138 if (mProcessTimeMs.getN() > 0) {
1139 dprintf(fd, " Process time ms stats: %s\n", mProcessTimeMs.toString().c_str());
1140 }
1141
1142 if (mIoJitterMs.getN() > 0) {
1143 dprintf(fd, " Hal %s jitter ms stats: %s\n",
1144 isOutput() ? "write" : "read",
1145 mIoJitterMs.toString().c_str());
1146 }
1147
1148 if (mLatencyMs.getN() > 0) {
1149 dprintf(fd, " Threadloop %s latency stats: %s\n",
1150 isOutput() ? "write" : "read",
1151 mLatencyMs.toString().c_str());
1152 }
1153
1154 if (mMonopipePipeDepthStats.getN() > 0) {
1155 dprintf(fd, " Monopipe %s pipe depth stats: %s\n",
1156 isOutput() ? "write" : "read",
1157 mMonopipePipeDepthStats.toString().c_str());
1158 }
1159 }
1160
dumpEffectChains_l(int fd,const Vector<String16> & args)1161 void ThreadBase::dumpEffectChains_l(int fd, const Vector<String16>& args)
1162 {
1163 const size_t SIZE = 256;
1164 char buffer[SIZE];
1165
1166 size_t numEffectChains = mEffectChains.size();
1167 snprintf(buffer, SIZE, " %zu Effect Chains\n", numEffectChains);
1168 write(fd, buffer, strlen(buffer));
1169
1170 for (size_t i = 0; i < numEffectChains; ++i) {
1171 sp<IAfEffectChain> chain = mEffectChains[i];
1172 if (chain != 0) {
1173 chain->dump(fd, args);
1174 }
1175 }
1176 }
1177
acquireWakeLock()1178 void ThreadBase::acquireWakeLock()
1179 {
1180 audio_utils::lock_guard _l(mutex());
1181 acquireWakeLock_l();
1182 }
1183
getWakeLockTag()1184 String16 ThreadBase::getWakeLockTag()
1185 {
1186 switch (mType) {
1187 case MIXER:
1188 return String16("AudioMix");
1189 case DIRECT:
1190 return String16("AudioDirectOut");
1191 case DUPLICATING:
1192 return String16("AudioDup");
1193 case RECORD:
1194 return String16("AudioIn");
1195 case OFFLOAD:
1196 return String16("AudioOffload");
1197 case MMAP_PLAYBACK:
1198 return String16("MmapPlayback");
1199 case MMAP_CAPTURE:
1200 return String16("MmapCapture");
1201 case SPATIALIZER:
1202 return String16("AudioSpatial");
1203 case BIT_PERFECT:
1204 return String16("AudioBitPerfect");
1205 default:
1206 ALOG_ASSERT(false);
1207 return String16("AudioUnknown");
1208 }
1209 }
1210
acquireWakeLock_l()1211 void ThreadBase::acquireWakeLock_l()
1212 {
1213 getPowerManager_l();
1214 if (mPowerManager != 0) {
1215 sp<IBinder> binder = new BBinder();
1216 // Uses AID_AUDIOSERVER for wakelock. updateWakeLockUids_l() updates with client uids.
1217 binder::Status status = mPowerManager->acquireWakeLockAsync(binder,
1218 POWERMANAGER_PARTIAL_WAKE_LOCK,
1219 getWakeLockTag(),
1220 String16("audioserver"),
1221 {} /* workSource */,
1222 {} /* historyTag */);
1223 if (status.isOk()) {
1224 mWakeLockToken = binder;
1225 if (media::psh_utils::AudioPowerManager::enabled()) {
1226 mThreadToken = media::psh_utils::createAudioThreadToken(
1227 getTid(), String8(getWakeLockTag()).c_str());
1228 }
1229 }
1230 ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status.exceptionCode());
1231 }
1232
1233 gBoottime.acquire(mWakeLockToken);
1234 mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME] =
1235 gBoottime.getBoottimeOffset();
1236 }
1237
releaseWakeLock()1238 void ThreadBase::releaseWakeLock()
1239 {
1240 audio_utils::lock_guard _l(mutex());
1241 releaseWakeLock_l();
1242 }
1243
releaseWakeLock_l()1244 void ThreadBase::releaseWakeLock_l()
1245 {
1246 gBoottime.release(mWakeLockToken);
1247 if (mWakeLockToken != 0) {
1248 ALOGV("releaseWakeLock_l() %s", mThreadName);
1249 if (mPowerManager != 0) {
1250 mPowerManager->releaseWakeLockAsync(mWakeLockToken, 0);
1251 }
1252 mWakeLockToken.clear();
1253 }
1254 mThreadToken.reset();
1255 }
1256
getPowerManager_l()1257 void ThreadBase::getPowerManager_l() {
1258 if (mSystemReady && mPowerManager == 0) {
1259 // use checkService() to avoid blocking if power service is not up yet
1260 sp<IBinder> binder =
1261 defaultServiceManager()->checkService(String16("power"));
1262 if (binder == 0) {
1263 ALOGW("Thread %s cannot connect to the power manager service", mThreadName);
1264 } else {
1265 mPowerManager = interface_cast<os::IPowerManager>(binder);
1266 binder->linkToDeath(mDeathRecipient);
1267 }
1268 }
1269 }
1270
updateWakeLockUids_l(const SortedVector<uid_t> & uids)1271 void ThreadBase::updateWakeLockUids_l(const SortedVector<uid_t>& uids) {
1272 getPowerManager_l();
1273
1274 #if !LOG_NDEBUG
1275 std::stringstream s;
1276 for (uid_t uid : uids) {
1277 s << uid << " ";
1278 }
1279 ALOGD("updateWakeLockUids_l %s uids:%s", mThreadName, s.str().c_str());
1280 #endif
1281
1282 if (mWakeLockToken == NULL) { // token may be NULL if AudioFlinger::systemReady() not called.
1283 if (mSystemReady) {
1284 ALOGE("no wake lock to update, but system ready!");
1285 } else {
1286 ALOGW("no wake lock to update, system not ready yet");
1287 }
1288 return;
1289 }
1290 if (mPowerManager != 0) {
1291 std::vector<int> uidsAsInt(uids.begin(), uids.end()); // powermanager expects uids as ints
1292 binder::Status status = mPowerManager->updateWakeLockUidsAsync(
1293 mWakeLockToken, uidsAsInt);
1294 ALOGV("updateWakeLockUids_l() %s status %d", mThreadName, status.exceptionCode());
1295 }
1296 }
1297
clearPowerManager()1298 void ThreadBase::clearPowerManager()
1299 {
1300 audio_utils::lock_guard _l(mutex());
1301 releaseWakeLock_l();
1302 mPowerManager.clear();
1303 }
1304
updateOutDevices(const DeviceDescriptorBaseVector & outDevices __unused)1305 void ThreadBase::updateOutDevices(
1306 const DeviceDescriptorBaseVector& outDevices __unused)
1307 {
1308 ALOGE("%s should only be called in RecordThread", __func__);
1309 }
1310
resizeInputBuffer_l(int32_t)1311 void ThreadBase::resizeInputBuffer_l(int32_t /* maxSharedAudioHistoryMs */)
1312 {
1313 ALOGE("%s should only be called in RecordThread", __func__);
1314 }
1315
binderDied(const wp<IBinder> &)1316 void ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& /* who */)
1317 {
1318 sp<ThreadBase> thread = mThread.promote();
1319 if (thread != 0) {
1320 thread->clearPowerManager();
1321 }
1322 ALOGW("power manager service died !!!");
1323 }
1324
setEffectSuspended_l(const effect_uuid_t * type,bool suspend,audio_session_t sessionId)1325 void ThreadBase::setEffectSuspended_l(
1326 const effect_uuid_t *type, bool suspend, audio_session_t sessionId)
1327 {
1328 sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
1329 if (chain != 0) {
1330 if (type != NULL) {
1331 chain->setEffectSuspended_l(type, suspend);
1332 } else {
1333 chain->setEffectSuspendedAll_l(suspend);
1334 }
1335 }
1336
1337 updateSuspendedSessions_l(type, suspend, sessionId);
1338 }
1339
checkSuspendOnAddEffectChain_l(const sp<IAfEffectChain> & chain)1340 void ThreadBase::checkSuspendOnAddEffectChain_l(const sp<IAfEffectChain>& chain)
1341 {
1342 ssize_t index = mSuspendedSessions.indexOfKey(chain->sessionId());
1343 if (index < 0) {
1344 return;
1345 }
1346
1347 const KeyedVector <int, sp<SuspendedSessionDesc> >& sessionEffects =
1348 mSuspendedSessions.valueAt(index);
1349
1350 for (size_t i = 0; i < sessionEffects.size(); i++) {
1351 const sp<SuspendedSessionDesc>& desc = sessionEffects.valueAt(i);
1352 for (int j = 0; j < desc->mRefCount; j++) {
1353 if (sessionEffects.keyAt(i) == IAfEffectChain::kKeyForSuspendAll) {
1354 chain->setEffectSuspendedAll_l(true);
1355 } else {
1356 ALOGV("checkSuspendOnAddEffectChain_l() suspending effects %08x",
1357 desc->mType.timeLow);
1358 chain->setEffectSuspended_l(&desc->mType, true);
1359 }
1360 }
1361 }
1362 }
1363
updateSuspendedSessions_l(const effect_uuid_t * type,bool suspend,audio_session_t sessionId)1364 void ThreadBase::updateSuspendedSessions_l(const effect_uuid_t* type,
1365 bool suspend,
1366 audio_session_t sessionId)
1367 {
1368 ssize_t index = mSuspendedSessions.indexOfKey(sessionId);
1369
1370 KeyedVector <int, sp<SuspendedSessionDesc> > sessionEffects;
1371
1372 if (suspend) {
1373 if (index >= 0) {
1374 sessionEffects = mSuspendedSessions.valueAt(index);
1375 } else {
1376 mSuspendedSessions.add(sessionId, sessionEffects);
1377 }
1378 } else {
1379 if (index < 0) {
1380 return;
1381 }
1382 sessionEffects = mSuspendedSessions.valueAt(index);
1383 }
1384
1385
1386 int key = IAfEffectChain::kKeyForSuspendAll;
1387 if (type != NULL) {
1388 key = type->timeLow;
1389 }
1390 index = sessionEffects.indexOfKey(key);
1391
1392 sp<SuspendedSessionDesc> desc;
1393 if (suspend) {
1394 if (index >= 0) {
1395 desc = sessionEffects.valueAt(index);
1396 } else {
1397 desc = new SuspendedSessionDesc();
1398 if (type != NULL) {
1399 desc->mType = *type;
1400 }
1401 sessionEffects.add(key, desc);
1402 ALOGV("updateSuspendedSessions_l() suspend adding effect %08x", key);
1403 }
1404 desc->mRefCount++;
1405 } else {
1406 if (index < 0) {
1407 return;
1408 }
1409 desc = sessionEffects.valueAt(index);
1410 if (--desc->mRefCount == 0) {
1411 ALOGV("updateSuspendedSessions_l() restore removing effect %08x", key);
1412 sessionEffects.removeItemsAt(index);
1413 if (sessionEffects.isEmpty()) {
1414 ALOGV("updateSuspendedSessions_l() restore removing session %d",
1415 sessionId);
1416 mSuspendedSessions.removeItem(sessionId);
1417 }
1418 }
1419 }
1420 if (!sessionEffects.isEmpty()) {
1421 mSuspendedSessions.replaceValueFor(sessionId, sessionEffects);
1422 }
1423 }
1424
checkSuspendOnEffectEnabled(bool enabled,audio_session_t sessionId,bool threadLocked)1425 void ThreadBase::checkSuspendOnEffectEnabled(bool enabled,
1426 audio_session_t sessionId,
1427 bool threadLocked)
1428 NO_THREAD_SAFETY_ANALYSIS // manual locking
1429 {
1430 if (!threadLocked) {
1431 mutex().lock();
1432 }
1433
1434 if (mType != RECORD) {
1435 // suspend all effects in AUDIO_SESSION_OUTPUT_MIX when enabling any effect on
1436 // another session. This gives the priority to well behaved effect control panels
1437 // and applications not using global effects.
1438 // Enabling post processing in AUDIO_SESSION_OUTPUT_STAGE session does not affect
1439 // global effects
1440 if (!audio_is_global_session(sessionId)) {
1441 setEffectSuspended_l(NULL, enabled, AUDIO_SESSION_OUTPUT_MIX);
1442 }
1443 }
1444
1445 if (!threadLocked) {
1446 mutex().unlock();
1447 }
1448 }
1449
1450 // checkEffectCompatibility_l() must be called with ThreadBase::mutex() held
checkEffectCompatibility_l(const effect_descriptor_t * desc,audio_session_t sessionId)1451 status_t RecordThread::checkEffectCompatibility_l(
1452 const effect_descriptor_t *desc, audio_session_t sessionId)
1453 {
1454 // No global output effect sessions on record threads
1455 if (sessionId == AUDIO_SESSION_OUTPUT_MIX
1456 || sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
1457 ALOGW("checkEffectCompatibility_l(): global effect %s on record thread %s",
1458 desc->name, mThreadName);
1459 return BAD_VALUE;
1460 }
1461 // only pre processing effects on record thread
1462 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC) {
1463 ALOGW("checkEffectCompatibility_l(): non pre processing effect %s on record thread %s",
1464 desc->name, mThreadName);
1465 return BAD_VALUE;
1466 }
1467
1468 // always allow effects without processing load or latency
1469 if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) == EFFECT_FLAG_NO_PROCESS) {
1470 return NO_ERROR;
1471 }
1472
1473 audio_input_flags_t flags = mInput->flags;
1474 if (hasFastCapture() || (flags & AUDIO_INPUT_FLAG_FAST)) {
1475 if (flags & AUDIO_INPUT_FLAG_RAW) {
1476 ALOGW("checkEffectCompatibility_l(): effect %s on record thread %s in raw mode",
1477 desc->name, mThreadName);
1478 return BAD_VALUE;
1479 }
1480 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
1481 ALOGW("checkEffectCompatibility_l(): non HW effect %s on record thread %s in fast mode",
1482 desc->name, mThreadName);
1483 return BAD_VALUE;
1484 }
1485 }
1486
1487 if (IAfEffectModule::isHapticGenerator(&desc->type)) {
1488 ALOGE("%s(): HapticGenerator is not supported in RecordThread", __func__);
1489 return BAD_VALUE;
1490 }
1491 return NO_ERROR;
1492 }
1493
1494 // checkEffectCompatibility_l() must be called with ThreadBase::mutex() held
checkEffectCompatibility_l(const effect_descriptor_t * desc,audio_session_t sessionId)1495 status_t PlaybackThread::checkEffectCompatibility_l(
1496 const effect_descriptor_t *desc, audio_session_t sessionId)
1497 {
1498 // no preprocessing on playback threads
1499 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC) {
1500 ALOGW("%s: pre processing effect %s created on playback"
1501 " thread %s", __func__, desc->name, mThreadName);
1502 return BAD_VALUE;
1503 }
1504
1505 // always allow effects without processing load or latency
1506 if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) == EFFECT_FLAG_NO_PROCESS) {
1507 return NO_ERROR;
1508 }
1509
1510 if (IAfEffectModule::isHapticGenerator(&desc->type) && mHapticChannelCount == 0) {
1511 ALOGW("%s: thread (%s) doesn't support haptic playback while the effect is HapticGenerator",
1512 __func__, threadTypeToString(mType));
1513 return BAD_VALUE;
1514 }
1515
1516 if (IAfEffectModule::isSpatializer(&desc->type)
1517 && mType != SPATIALIZER) {
1518 ALOGW("%s: attempt to create a spatializer effect on a thread of type %d",
1519 __func__, mType);
1520 return BAD_VALUE;
1521 }
1522
1523 switch (mType) {
1524 case MIXER: {
1525 audio_output_flags_t flags = mOutput->flags;
1526 if (hasFastMixer() || (flags & AUDIO_OUTPUT_FLAG_FAST)) {
1527 if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
1528 // global effects are applied only to non fast tracks if they are SW
1529 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
1530 break;
1531 }
1532 } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
1533 // only post processing on output stage session
1534 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
1535 ALOGW("%s: non post processing effect %s not allowed on output stage session",
1536 __func__, desc->name);
1537 return BAD_VALUE;
1538 }
1539 } else if (sessionId == AUDIO_SESSION_DEVICE) {
1540 // only post processing on output stage session
1541 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
1542 ALOGW("%s: non post processing effect %s not allowed on device session",
1543 __func__, desc->name);
1544 return BAD_VALUE;
1545 }
1546 } else {
1547 // no restriction on effects applied on non fast tracks
1548 if ((hasAudioSession_l(sessionId) & ThreadBase::FAST_SESSION) == 0) {
1549 break;
1550 }
1551 }
1552
1553 if (flags & AUDIO_OUTPUT_FLAG_RAW) {
1554 ALOGW("%s: effect %s on playback thread in raw mode", __func__, desc->name);
1555 return BAD_VALUE;
1556 }
1557 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
1558 ALOGW("%s: non HW effect %s on playback thread in fast mode",
1559 __func__, desc->name);
1560 return BAD_VALUE;
1561 }
1562 }
1563 } break;
1564 case OFFLOAD:
1565 // nothing actionable on offload threads, if the effect:
1566 // - is offloadable: the effect can be created
1567 // - is NOT offloadable: the effect should still be created, but EffectHandle::enable()
1568 // will take care of invalidating the tracks of the thread
1569 break;
1570 case DIRECT:
1571 // Reject any effect on Direct output threads for now, since the format of
1572 // mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo).
1573 ALOGW("%s: effect %s on DIRECT output thread %s",
1574 __func__, desc->name, mThreadName);
1575 return BAD_VALUE;
1576 case DUPLICATING:
1577 if (audio_is_global_session(sessionId)) {
1578 ALOGW("%s: global effect %s on DUPLICATING thread %s",
1579 __func__, desc->name, mThreadName);
1580 return BAD_VALUE;
1581 }
1582 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
1583 ALOGW("%s: post processing effect %s on DUPLICATING thread %s",
1584 __func__, desc->name, mThreadName);
1585 return BAD_VALUE;
1586 }
1587 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) != 0) {
1588 ALOGW("%s: HW tunneled effect %s on DUPLICATING thread %s",
1589 __func__, desc->name, mThreadName);
1590 return BAD_VALUE;
1591 }
1592 break;
1593 case SPATIALIZER:
1594 // Global effects (AUDIO_SESSION_OUTPUT_MIX) are supported on spatializer mixer, but only
1595 // the spatialized track have global effects applied for now.
1596 // Post processing effects (AUDIO_SESSION_OUTPUT_STAGE or AUDIO_SESSION_DEVICE)
1597 // are supported and added after the spatializer.
1598 if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
1599 ALOGD("%s: global effect %s on spatializer thread %s", __func__, desc->name,
1600 mThreadName);
1601 } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
1602 // only post processing , downmixer or spatializer effects on output stage session
1603 if (IAfEffectModule::isSpatializer(&desc->type)
1604 || memcmp(&desc->type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
1605 break;
1606 }
1607 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
1608 ALOGW("%s: non post processing effect %s not allowed on output stage session",
1609 __func__, desc->name);
1610 return BAD_VALUE;
1611 }
1612 } else if (sessionId == AUDIO_SESSION_DEVICE) {
1613 // only post processing on output stage session
1614 if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
1615 ALOGW("%s: non post processing effect %s not allowed on device session",
1616 __func__, desc->name);
1617 return BAD_VALUE;
1618 }
1619 }
1620 break;
1621 case BIT_PERFECT:
1622 if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) != 0) {
1623 // Allow HW accelerated effects of tunnel type
1624 break;
1625 }
1626 // As bit-perfect tracks will not be allowed to apply audio effect that will touch the audio
1627 // data, effects will not be allowed on 1) global effects (AUDIO_SESSION_OUTPUT_MIX),
1628 // 2) post-processing effects (AUDIO_SESSION_OUTPUT_STAGE or AUDIO_SESSION_DEVICE) and
1629 // 3) there is any bit-perfect track with the given session id.
1630 if (sessionId == AUDIO_SESSION_OUTPUT_MIX || sessionId == AUDIO_SESSION_OUTPUT_STAGE ||
1631 sessionId == AUDIO_SESSION_DEVICE) {
1632 ALOGW("%s: effect %s not supported on bit-perfect thread %s",
1633 __func__, desc->name, mThreadName);
1634 return BAD_VALUE;
1635 } else if ((hasAudioSession_l(sessionId) & ThreadBase::BIT_PERFECT_SESSION) != 0) {
1636 ALOGW("%s: effect %s not supported as there is a bit-perfect track with session as %d",
1637 __func__, desc->name, sessionId);
1638 return BAD_VALUE;
1639 }
1640 break;
1641 default:
1642 LOG_ALWAYS_FATAL("checkEffectCompatibility_l(): wrong thread type %d", mType);
1643 }
1644
1645 return NO_ERROR;
1646 }
1647
1648 // ThreadBase::createEffect_l() must be called with AudioFlinger::mutex() held
createEffect_l(const sp<Client> & client,const sp<IEffectClient> & effectClient,int32_t priority,audio_session_t sessionId,effect_descriptor_t * desc,int * enabled,status_t * status,bool pinned,bool probe,bool notifyFramesProcessed)1649 sp<IAfEffectHandle> ThreadBase::createEffect_l(
1650 const sp<Client>& client,
1651 const sp<IEffectClient>& effectClient,
1652 int32_t priority,
1653 audio_session_t sessionId,
1654 effect_descriptor_t *desc,
1655 int *enabled,
1656 status_t *status,
1657 bool pinned,
1658 bool probe,
1659 bool notifyFramesProcessed)
1660 {
1661 sp<IAfEffectModule> effect;
1662 sp<IAfEffectHandle> handle;
1663 status_t lStatus;
1664 sp<IAfEffectChain> chain;
1665 bool chainCreated = false;
1666 bool effectCreated = false;
1667 audio_unique_id_t effectId = AUDIO_UNIQUE_ID_USE_UNSPECIFIED;
1668
1669 lStatus = initCheck();
1670 if (lStatus != NO_ERROR) {
1671 ALOGW("createEffect_l() Audio driver not initialized.");
1672 goto Exit;
1673 }
1674
1675 ALOGV("createEffect_l() thread %p effect %s on session %d", this, desc->name, sessionId);
1676
1677 { // scope for mutex()
1678 audio_utils::lock_guard _l(mutex());
1679
1680 lStatus = checkEffectCompatibility_l(desc, sessionId);
1681 if (probe || lStatus != NO_ERROR) {
1682 goto Exit;
1683 }
1684
1685 // check for existing effect chain with the requested audio session
1686 chain = getEffectChain_l(sessionId);
1687 if (chain == 0) {
1688 // create a new chain for this session
1689 ALOGV("createEffect_l() new effect chain for session %d", sessionId);
1690 chain = IAfEffectChain::create(this, sessionId, mAfThreadCallback);
1691 addEffectChain_l(chain);
1692 chain->setStrategy(getStrategyForSession_l(sessionId));
1693 chainCreated = true;
1694 } else {
1695 effect = chain->getEffectFromDesc(desc);
1696 }
1697
1698 ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get());
1699
1700 if (effect == 0) {
1701 effectId = mAfThreadCallback->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
1702 // create a new effect module if none present in the chain
1703 lStatus = chain->createEffect(effect, desc, effectId, sessionId, pinned);
1704 if (lStatus != NO_ERROR) {
1705 goto Exit;
1706 }
1707 effectCreated = true;
1708
1709 // FIXME: use vector of device and address when effect interface is ready.
1710 effect->setDevices(outDeviceTypeAddrs());
1711 effect->setInputDevice(inDeviceTypeAddr());
1712 effect->setMode(mAfThreadCallback->getMode());
1713 effect->setAudioSource(mAudioSource);
1714 }
1715 if (effect->isHapticGenerator()) {
1716 // TODO(b/184194057): Use the vibrator information from the vibrator that will be used
1717 // for the HapticGenerator.
1718 const std::optional<media::AudioVibratorInfo> defaultVibratorInfo =
1719 mAfThreadCallback->getDefaultVibratorInfo_l();
1720 if (defaultVibratorInfo) {
1721 audio_utils::lock_guard _cl(chain->mutex());
1722 // Only set the vibrator info when it is a valid one.
1723 effect->setVibratorInfo_l(*defaultVibratorInfo);
1724 }
1725 }
1726 // create effect handle and connect it to effect module
1727 handle = IAfEffectHandle::create(
1728 effect, client, effectClient, priority, notifyFramesProcessed);
1729 lStatus = handle->initCheck();
1730 if (lStatus == OK) {
1731 lStatus = effect->addHandle(handle.get());
1732 sendCheckOutputStageEffectsEvent_l();
1733 }
1734 if (enabled != NULL) {
1735 *enabled = (int)effect->isEnabled();
1736 }
1737 }
1738
1739 Exit:
1740 if (!probe && lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
1741 audio_utils::lock_guard _l(mutex());
1742 if (effectCreated) {
1743 chain->removeEffect(effect);
1744 }
1745 if (chainCreated) {
1746 removeEffectChain_l(chain);
1747 }
1748 // handle must be cleared by caller to avoid deadlock.
1749 }
1750
1751 *status = lStatus;
1752 return handle;
1753 }
1754
disconnectEffectHandle(IAfEffectHandle * handle,bool unpinIfLast)1755 void ThreadBase::disconnectEffectHandle(IAfEffectHandle* handle,
1756 bool unpinIfLast)
1757 {
1758 bool remove = false;
1759 sp<IAfEffectModule> effect;
1760 {
1761 audio_utils::lock_guard _l(mutex());
1762 sp<IAfEffectBase> effectBase = handle->effect().promote();
1763 if (effectBase == nullptr) {
1764 return;
1765 }
1766 effect = effectBase->asEffectModule();
1767 if (effect == nullptr) {
1768 return;
1769 }
1770 // restore suspended effects if the disconnected handle was enabled and the last one.
1771 remove = (effect->removeHandle(handle) == 0) && (!effect->isPinned() || unpinIfLast);
1772 if (remove) {
1773 removeEffect_l(effect, true);
1774 }
1775 sendCheckOutputStageEffectsEvent_l();
1776 }
1777 if (remove) {
1778 mAfThreadCallback->updateOrphanEffectChains(effect);
1779 if (handle->enabled()) {
1780 effect->checkSuspendOnEffectEnabled(false, false /*threadLocked*/);
1781 }
1782 }
1783 }
1784
onEffectEnable(const sp<IAfEffectModule> & effect)1785 void ThreadBase::onEffectEnable(const sp<IAfEffectModule>& effect) {
1786 if (isOffloadOrMmap()) {
1787 audio_utils::lock_guard _l(mutex());
1788 broadcast_l();
1789 }
1790 if (!effect->isOffloadable()) {
1791 if (mType == ThreadBase::OFFLOAD) {
1792 PlaybackThread *t = (PlaybackThread *)this;
1793 t->invalidateTracks(AUDIO_STREAM_MUSIC);
1794 }
1795 if (effect->sessionId() == AUDIO_SESSION_OUTPUT_MIX) {
1796 mAfThreadCallback->onNonOffloadableGlobalEffectEnable();
1797 }
1798 }
1799 }
1800
onEffectDisable()1801 void ThreadBase::onEffectDisable() {
1802 if (isOffloadOrMmap()) {
1803 audio_utils::lock_guard _l(mutex());
1804 broadcast_l();
1805 }
1806 }
1807
getEffect(audio_session_t sessionId,int effectId) const1808 sp<IAfEffectModule> ThreadBase::getEffect(audio_session_t sessionId,
1809 int effectId) const
1810 {
1811 audio_utils::lock_guard _l(mutex());
1812 return getEffect_l(sessionId, effectId);
1813 }
1814
getEffect_l(audio_session_t sessionId,int effectId) const1815 sp<IAfEffectModule> ThreadBase::getEffect_l(audio_session_t sessionId,
1816 int effectId) const
1817 {
1818 sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
1819 return chain != 0 ? chain->getEffectFromId_l(effectId) : 0;
1820 }
1821
getEffectIds_l(audio_session_t sessionId) const1822 std::vector<int> ThreadBase::getEffectIds_l(audio_session_t sessionId) const
1823 {
1824 sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
1825 return chain != nullptr ? chain->getEffectIds_l() : std::vector<int>{};
1826 }
1827
1828 // PlaybackThread::addEffect_ll() must be called with AudioFlinger::mutex() and
1829 // ThreadBase::mutex() held
addEffect_ll(const sp<IAfEffectModule> & effect)1830 status_t ThreadBase::addEffect_ll(const sp<IAfEffectModule>& effect)
1831 {
1832 // check for existing effect chain with the requested audio session
1833 audio_session_t sessionId = effect->sessionId();
1834 sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
1835 bool chainCreated = false;
1836
1837 ALOGD_IF((mType == OFFLOAD) && !effect->isOffloadable(),
1838 "%s: on offloaded thread %p: effect %s does not support offload flags %#x",
1839 __func__, this, effect->desc().name, effect->desc().flags);
1840
1841 if (chain == 0) {
1842 // create a new chain for this session
1843 ALOGV("%s: new effect chain for session %d", __func__, sessionId);
1844 chain = IAfEffectChain::create(this, sessionId, mAfThreadCallback);
1845 addEffectChain_l(chain);
1846 chain->setStrategy(getStrategyForSession_l(sessionId));
1847 chainCreated = true;
1848 }
1849 ALOGV("%s: %p chain %p effect %p", __func__, this, chain.get(), effect.get());
1850
1851 if (chain->getEffectFromId_l(effect->id()) != 0) {
1852 ALOGW("%s: %p effect %s already present in chain %p",
1853 __func__, this, effect->desc().name, chain.get());
1854 return BAD_VALUE;
1855 }
1856
1857 effect->setOffloaded_l(mType == OFFLOAD, mId);
1858
1859 status_t status = chain->addEffect(effect);
1860 if (status != NO_ERROR) {
1861 if (chainCreated) {
1862 removeEffectChain_l(chain);
1863 }
1864 return status;
1865 }
1866
1867 effect->setDevices(outDeviceTypeAddrs());
1868 effect->setInputDevice(inDeviceTypeAddr());
1869 effect->setMode(mAfThreadCallback->getMode());
1870 effect->setAudioSource(mAudioSource);
1871
1872 return NO_ERROR;
1873 }
1874
removeEffect_l(const sp<IAfEffectModule> & effect,bool release)1875 void ThreadBase::removeEffect_l(const sp<IAfEffectModule>& effect, bool release) {
1876
1877 ALOGV("%s %p effect %p", __FUNCTION__, this, effect.get());
1878 effect_descriptor_t desc = effect->desc();
1879 if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
1880 detachAuxEffect_l(effect->id());
1881 }
1882
1883 sp<IAfEffectChain> chain = effect->getCallback()->chain().promote();
1884 if (chain != 0) {
1885 // remove effect chain if removing last effect
1886 if (chain->removeEffect(effect, release) == 0) {
1887 removeEffectChain_l(chain);
1888 }
1889 } else {
1890 ALOGW("removeEffect_l() %p cannot promote chain for effect %p", this, effect.get());
1891 }
1892 }
1893
lockEffectChains_l(Vector<sp<IAfEffectChain>> & effectChains)1894 void ThreadBase::lockEffectChains_l(Vector<sp<IAfEffectChain>>& effectChains)
1895 NO_THREAD_SAFETY_ANALYSIS // calls EffectChain::lock()
1896 {
1897 effectChains = mEffectChains;
1898 for (const auto& effectChain : effectChains) {
1899 effectChain->mutex().lock();
1900 }
1901 }
1902
unlockEffectChains(const Vector<sp<IAfEffectChain>> & effectChains)1903 void ThreadBase::unlockEffectChains(const Vector<sp<IAfEffectChain>>& effectChains)
1904 NO_THREAD_SAFETY_ANALYSIS // calls EffectChain::unlock()
1905 {
1906 for (const auto& effectChain : effectChains) {
1907 effectChain->mutex().unlock();
1908 }
1909 }
1910
getEffectChain(audio_session_t sessionId) const1911 sp<IAfEffectChain> ThreadBase::getEffectChain(audio_session_t sessionId) const
1912 {
1913 audio_utils::lock_guard _l(mutex());
1914 return getEffectChain_l(sessionId);
1915 }
1916
getEffectChain_l(audio_session_t sessionId) const1917 sp<IAfEffectChain> ThreadBase::getEffectChain_l(audio_session_t sessionId)
1918 const
1919 {
1920 size_t size = mEffectChains.size();
1921 for (size_t i = 0; i < size; i++) {
1922 if (mEffectChains[i]->sessionId() == sessionId) {
1923 return mEffectChains[i];
1924 }
1925 }
1926 return 0;
1927 }
1928
setMode(audio_mode_t mode)1929 void ThreadBase::setMode(audio_mode_t mode)
1930 {
1931 audio_utils::lock_guard _l(mutex());
1932 size_t size = mEffectChains.size();
1933 for (size_t i = 0; i < size; i++) {
1934 mEffectChains[i]->setMode_l(mode);
1935 }
1936 }
1937
toAudioPortConfig(struct audio_port_config * config)1938 void ThreadBase::toAudioPortConfig(struct audio_port_config* config)
1939 {
1940 config->type = AUDIO_PORT_TYPE_MIX;
1941 config->ext.mix.handle = mId;
1942 config->sample_rate = mSampleRate;
1943 config->format = mHALFormat;
1944 config->channel_mask = mChannelMask;
1945 config->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
1946 AUDIO_PORT_CONFIG_FORMAT;
1947 }
1948
systemReady()1949 void ThreadBase::systemReady()
1950 {
1951 audio_utils::lock_guard _l(mutex());
1952 if (mSystemReady) {
1953 return;
1954 }
1955 mSystemReady = true;
1956
1957 for (size_t i = 0; i < mPendingConfigEvents.size(); i++) {
1958 sendConfigEvent_l(mPendingConfigEvents.editItemAt(i));
1959 }
1960 mPendingConfigEvents.clear();
1961 }
1962
1963 template <typename T>
add(const sp<T> & track)1964 ssize_t ThreadBase::ActiveTracks<T>::add(const sp<T>& track) {
1965 ssize_t index = mActiveTracks.indexOf(track);
1966 if (index >= 0) {
1967 ALOGW("ActiveTracks<T>::add track %p already there", track.get());
1968 return index;
1969 }
1970 logTrack("add", track);
1971 mActiveTracksGeneration++;
1972 mLatestActiveTrack = track;
1973 track->beginBatteryAttribution();
1974 mHasChanged = true;
1975 return mActiveTracks.add(track);
1976 }
1977
1978 template <typename T>
remove(const sp<T> & track)1979 ssize_t ThreadBase::ActiveTracks<T>::remove(const sp<T>& track) {
1980 ssize_t index = mActiveTracks.remove(track);
1981 if (index < 0) {
1982 ALOGW("ActiveTracks<T>::remove nonexistent track %p", track.get());
1983 return index;
1984 }
1985 logTrack("remove", track);
1986 mActiveTracksGeneration++;
1987 track->endBatteryAttribution();
1988 // mLatestActiveTrack is not cleared even if is the same as track.
1989 mHasChanged = true;
1990 #ifdef TEE_SINK
1991 track->dumpTee(-1 /* fd */, "_REMOVE");
1992 #endif
1993 track->logEndInterval(); // log to MediaMetrics
1994 return index;
1995 }
1996
1997 template <typename T>
clear()1998 void ThreadBase::ActiveTracks<T>::clear() {
1999 for (const sp<T> &track : mActiveTracks) {
2000 track->endBatteryAttribution();
2001 logTrack("clear", track);
2002 }
2003 mLastActiveTracksGeneration = mActiveTracksGeneration;
2004 if (!mActiveTracks.empty()) { mHasChanged = true; }
2005 mActiveTracks.clear();
2006 mLatestActiveTrack.clear();
2007 }
2008
2009 template <typename T>
updatePowerState_l(const sp<ThreadBase> & thread,bool force)2010 void ThreadBase::ActiveTracks<T>::updatePowerState_l(
2011 const sp<ThreadBase>& thread, bool force) {
2012 // Updates ActiveTracks client uids to the thread wakelock.
2013 if (mActiveTracksGeneration != mLastActiveTracksGeneration || force) {
2014 thread->updateWakeLockUids_l(getWakeLockUids());
2015 mLastActiveTracksGeneration = mActiveTracksGeneration;
2016 }
2017 }
2018
2019 template <typename T>
readAndClearHasChanged()2020 bool ThreadBase::ActiveTracks<T>::readAndClearHasChanged() {
2021 bool hasChanged = mHasChanged;
2022 mHasChanged = false;
2023
2024 for (const sp<T> &track : mActiveTracks) {
2025 // Do not short-circuit as all hasChanged states must be reset
2026 // as all the metadata are going to be sent
2027 hasChanged |= track->readAndClearHasChanged();
2028 }
2029 return hasChanged;
2030 }
2031
2032 template <typename T>
logTrack(const char * funcName,const sp<T> & track) const2033 void ThreadBase::ActiveTracks<T>::logTrack(
2034 const char *funcName, const sp<T> &track) const {
2035 if (mLocalLog != nullptr) {
2036 String8 result;
2037 track->appendDump(result, false /* active */);
2038 mLocalLog->log("AT::%-10s(%p) %s", funcName, track.get(), result.c_str());
2039 }
2040 }
2041
broadcast_l()2042 void ThreadBase::broadcast_l()
2043 {
2044 // Thread could be blocked waiting for async
2045 // so signal it to handle state changes immediately
2046 // If threadLoop is currently unlocked a signal of mWaitWorkCV will
2047 // be lost so we also flag to prevent it blocking on mWaitWorkCV
2048 mSignalPending = true;
2049 mWaitWorkCV.notify_all();
2050 }
2051
2052 // Call only from threadLoop() or when it is idle.
2053 // Do not call from high performance code as this may do binder rpc to the MediaMetrics service.
sendStatistics(bool force)2054 void ThreadBase::sendStatistics(bool force)
2055 NO_THREAD_SAFETY_ANALYSIS
2056 {
2057 // Do not log if we have no stats.
2058 // We choose the timestamp verifier because it is the most likely item to be present.
2059 const int64_t nstats = mTimestampVerifier.getN() - mLastRecordedTimestampVerifierN;
2060 if (nstats == 0) {
2061 return;
2062 }
2063
2064 // Don't log more frequently than once per 12 hours.
2065 // We use BOOTTIME to include suspend time.
2066 const int64_t timeNs = systemTime(SYSTEM_TIME_BOOTTIME);
2067 const int64_t sinceNs = timeNs - mLastRecordedTimeNs; // ok if mLastRecordedTimeNs = 0
2068 if (!force && sinceNs <= 12 * NANOS_PER_HOUR) {
2069 return;
2070 }
2071
2072 mLastRecordedTimestampVerifierN = mTimestampVerifier.getN();
2073 mLastRecordedTimeNs = timeNs;
2074
2075 std::unique_ptr<mediametrics::Item> item(mediametrics::Item::create("audiothread"));
2076
2077 #define MM_PREFIX "android.media.audiothread." // avoid cut-n-paste errors.
2078
2079 // thread configuration
2080 item->setInt32(MM_PREFIX "id", (int32_t)mId); // IO handle
2081 // item->setInt32(MM_PREFIX "portId", (int32_t)mPortId);
2082 item->setCString(MM_PREFIX "type", threadTypeToString(mType));
2083 item->setInt32(MM_PREFIX "sampleRate", (int32_t)mSampleRate);
2084 item->setInt64(MM_PREFIX "channelMask", (int64_t)mChannelMask);
2085 item->setCString(MM_PREFIX "encoding", toString(mFormat).c_str());
2086 item->setInt32(MM_PREFIX "frameCount", (int32_t)mFrameCount);
2087 item->setCString(MM_PREFIX "outDevice", toString(outDeviceTypes_l()).c_str());
2088 item->setCString(MM_PREFIX "inDevice", toString(inDeviceType_l()).c_str());
2089
2090 // thread statistics
2091 if (mIoJitterMs.getN() > 0) {
2092 item->setDouble(MM_PREFIX "ioJitterMs.mean", mIoJitterMs.getMean());
2093 item->setDouble(MM_PREFIX "ioJitterMs.std", mIoJitterMs.getStdDev());
2094 }
2095 if (mProcessTimeMs.getN() > 0) {
2096 item->setDouble(MM_PREFIX "processTimeMs.mean", mProcessTimeMs.getMean());
2097 item->setDouble(MM_PREFIX "processTimeMs.std", mProcessTimeMs.getStdDev());
2098 }
2099 const auto tsjitter = mTimestampVerifier.getJitterMs();
2100 if (tsjitter.getN() > 0) {
2101 item->setDouble(MM_PREFIX "timestampJitterMs.mean", tsjitter.getMean());
2102 item->setDouble(MM_PREFIX "timestampJitterMs.std", tsjitter.getStdDev());
2103 }
2104 if (mLatencyMs.getN() > 0) {
2105 item->setDouble(MM_PREFIX "latencyMs.mean", mLatencyMs.getMean());
2106 item->setDouble(MM_PREFIX "latencyMs.std", mLatencyMs.getStdDev());
2107 }
2108 if (mMonopipePipeDepthStats.getN() > 0) {
2109 item->setDouble(MM_PREFIX "monopipePipeDepthStats.mean",
2110 mMonopipePipeDepthStats.getMean());
2111 item->setDouble(MM_PREFIX "monopipePipeDepthStats.std",
2112 mMonopipePipeDepthStats.getStdDev());
2113 }
2114
2115 item->selfrecord();
2116 }
2117
getStrategyForStream(audio_stream_type_t stream) const2118 product_strategy_t ThreadBase::getStrategyForStream(audio_stream_type_t stream) const
2119 {
2120 if (!mAfThreadCallback->isAudioPolicyReady()) {
2121 return PRODUCT_STRATEGY_NONE;
2122 }
2123 return AudioSystem::getStrategyForStream(stream);
2124 }
2125
2126 // startMelComputation_l() must be called with AudioFlinger::mutex() held
startMelComputation_l(const sp<audio_utils::MelProcessor> &)2127 void ThreadBase::startMelComputation_l(
2128 const sp<audio_utils::MelProcessor>& /*processor*/)
2129 {
2130 // Do nothing
2131 ALOGW("%s: ThreadBase does not support CSD", __func__);
2132 }
2133
2134 // stopMelComputation_l() must be called with AudioFlinger::mutex() held
stopMelComputation_l()2135 void ThreadBase::stopMelComputation_l()
2136 {
2137 // Do nothing
2138 ALOGW("%s: ThreadBase does not support CSD", __func__);
2139 }
2140
2141 // ----------------------------------------------------------------------------
2142 // Playback
2143 // ----------------------------------------------------------------------------
2144
PlaybackThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,type_t type,bool systemReady,audio_config_base_t * mixerConfig)2145 PlaybackThread::PlaybackThread(const sp<IAfThreadCallback>& afThreadCallback,
2146 AudioStreamOut* output,
2147 audio_io_handle_t id,
2148 type_t type,
2149 bool systemReady,
2150 audio_config_base_t *mixerConfig)
2151 : ThreadBase(afThreadCallback, id, type, systemReady, true /* isOut */),
2152 mNormalFrameCount(0), mSinkBuffer(NULL),
2153 mMixerBufferEnabled(kEnableExtendedPrecision || type == SPATIALIZER),
2154 mMixerBuffer(NULL),
2155 mMixerBufferSize(0),
2156 mMixerBufferFormat(AUDIO_FORMAT_INVALID),
2157 mMixerBufferValid(false),
2158 mEffectBufferEnabled(kEnableExtendedPrecision || type == SPATIALIZER),
2159 mEffectBuffer(NULL),
2160 mEffectBufferSize(0),
2161 mEffectBufferFormat(AUDIO_FORMAT_INVALID),
2162 mEffectBufferValid(false),
2163 mSuspended(0), mBytesWritten(0),
2164 mFramesWritten(0),
2165 mSuspendedFrames(0),
2166 mActiveTracks(&this->mLocalLog),
2167 // mStreamTypes[] initialized in constructor body
2168 mTracks(type == MIXER),
2169 mOutput(output),
2170 mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
2171 mMixerStatus(MIXER_IDLE),
2172 mMixerStatusIgnoringFastTracks(MIXER_IDLE),
2173 mStandbyDelayNs(getStandbyTimeInNanos()),
2174 mBytesRemaining(0),
2175 mCurrentWriteLength(0),
2176 mUseAsyncWrite(false),
2177 mWriteAckSequence(0),
2178 mDrainSequence(0),
2179 mScreenState(mAfThreadCallback->getScreenState()),
2180 // index 0 is reserved for normal mixer's submix
2181 mFastTrackAvailMask(((1 << FastMixerState::sMaxFastTracks) - 1) & ~1),
2182 mHwSupportsPause(false), mHwPaused(false), mFlushPending(false),
2183 mLeftVolFloat(-1.0), mRightVolFloat(-1.0),
2184 mDownStreamPatch{},
2185 mIsTimestampAdvancing(kMinimumTimeBetweenTimestampChecksNs)
2186 {
2187 snprintf(mThreadName, kThreadNameLength, "AudioOut_%X", id);
2188 mFlagsAsString = toString(output->flags);
2189 mNBLogWriter = afThreadCallback->newWriter_l(kLogSize, mThreadName);
2190
2191 // Assumes constructor is called by AudioFlinger with its mutex() held, but
2192 // it would be safer to explicitly pass initial masterVolume/masterMute as
2193 // parameter.
2194 //
2195 // If the HAL we are using has support for master volume or master mute,
2196 // then do not attenuate or mute during mixing (just leave the volume at 1.0
2197 // and the mute set to false).
2198 mMasterVolume = afThreadCallback->masterVolume_l();
2199 mMasterMute = afThreadCallback->masterMute_l();
2200 if (mOutput->audioHwDev) {
2201 if (mOutput->audioHwDev->canSetMasterVolume()) {
2202 mMasterVolume = 1.0;
2203 }
2204
2205 if (mOutput->audioHwDev->canSetMasterMute()) {
2206 mMasterMute = false;
2207 }
2208 mIsMsdDevice = strcmp(
2209 mOutput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
2210 }
2211
2212 if (mixerConfig != nullptr && mixerConfig->channel_mask != AUDIO_CHANNEL_NONE) {
2213 mMixerChannelMask = mixerConfig->channel_mask;
2214 }
2215
2216 readOutputParameters_l();
2217
2218 if (mType != SPATIALIZER
2219 && mMixerChannelMask != mChannelMask) {
2220 LOG_ALWAYS_FATAL("HAL channel mask %#x does not match mixer channel mask %#x",
2221 mChannelMask, mMixerChannelMask);
2222 }
2223
2224 // TODO: We may also match on address as well as device type for
2225 // AUDIO_DEVICE_OUT_BUS, AUDIO_DEVICE_OUT_ALL_A2DP, AUDIO_DEVICE_OUT_REMOTE_SUBMIX
2226 if (type == MIXER || type == DIRECT || type == OFFLOAD) {
2227 // TODO: This property should be ensure that only contains one single device type.
2228 mTimestampCorrectedDevice = (audio_devices_t)property_get_int64(
2229 "audio.timestamp.corrected_output_device",
2230 (int64_t)(mIsMsdDevice ? AUDIO_DEVICE_OUT_BUS // turn on by default for MSD
2231 : AUDIO_DEVICE_NONE));
2232 }
2233 if (!audioserver_flags::portid_volume_management()) {
2234 for (int i = AUDIO_STREAM_MIN; i < AUDIO_STREAM_FOR_POLICY_CNT; ++i) {
2235 const audio_stream_type_t stream{static_cast<audio_stream_type_t>(i)};
2236 mStreamTypes[stream].volume = 0.0f;
2237 mStreamTypes[stream].mute = mAfThreadCallback->streamMute_l(stream);
2238 }
2239 // Audio patch and call assistant volume are always max
2240 mStreamTypes[AUDIO_STREAM_PATCH].volume = 1.0f;
2241 mStreamTypes[AUDIO_STREAM_PATCH].mute = false;
2242 mStreamTypes[AUDIO_STREAM_CALL_ASSISTANT].volume = 1.0f;
2243 mStreamTypes[AUDIO_STREAM_CALL_ASSISTANT].mute = false;
2244 }
2245 }
2246
~PlaybackThread()2247 PlaybackThread::~PlaybackThread()
2248 {
2249 mAfThreadCallback->unregisterWriter(mNBLogWriter);
2250 free(mSinkBuffer);
2251 free(mMixerBuffer);
2252 free(mEffectBuffer);
2253 free(mPostSpatializerBuffer);
2254 }
2255
2256 // Thread virtuals
2257
onFirstRef()2258 void PlaybackThread::onFirstRef()
2259 {
2260 if (!isStreamInitialized()) {
2261 ALOGE("The stream is not open yet"); // This should not happen.
2262 } else {
2263 // Callbacks take strong or weak pointers as a parameter.
2264 // Since PlaybackThread passes itself as a callback handler, it can only
2265 // be done outside of the constructor. Creating weak and especially strong
2266 // pointers to a refcounted object in its own constructor is strongly
2267 // discouraged, see comments in system/core/libutils/include/utils/RefBase.h.
2268 // Even if a function takes a weak pointer, it is possible that it will
2269 // need to convert it to a strong pointer down the line.
2270 if (mOutput->flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING &&
2271 mOutput->stream->setCallback(this) == OK) {
2272 mUseAsyncWrite = true;
2273 mCallbackThread = sp<AsyncCallbackThread>::make(this);
2274 }
2275
2276 if (mOutput->stream->setEventCallback(this) != OK) {
2277 ALOGD("Failed to add event callback");
2278 }
2279 }
2280 run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO);
2281 mThreadSnapshot.setTid(getTid());
2282 }
2283
2284 // ThreadBase virtuals
preExit()2285 void PlaybackThread::preExit()
2286 {
2287 ALOGV(" preExit()");
2288 status_t result = mOutput->stream->exit();
2289 ALOGE_IF(result != OK, "Error when calling exit(): %d", result);
2290 }
2291
dumpTracks_l(int fd,const Vector<String16> &)2292 void PlaybackThread::dumpTracks_l(int fd, const Vector<String16>& /* args */)
2293 {
2294 String8 result;
2295 if (!audioserver_flags::portid_volume_management()) {
2296 result.appendFormat(" Stream volumes in dB: ");
2297 for (int i = 0; i < AUDIO_STREAM_CNT; ++i) {
2298 const stream_type_t *st = &mStreamTypes[i];
2299 if (i > 0) {
2300 result.appendFormat(", ");
2301 }
2302 result.appendFormat("%d:%.2g", i, 20.0 * log10(st->volume));
2303 if (st->mute) {
2304 result.append("M");
2305 }
2306 }
2307 }
2308 result.append("\n");
2309 write(fd, result.c_str(), result.length());
2310 result.clear();
2311
2312 // These values are "raw"; they will wrap around. See prepareTracks_l() for a better way.
2313 FastTrackUnderruns underruns = getFastTrackUnderruns(0);
2314 dprintf(fd, " Normal mixer raw underrun counters: partial=%u empty=%u\n",
2315 underruns.mBitFields.mPartial, underruns.mBitFields.mEmpty);
2316
2317 size_t numtracks = mTracks.size();
2318 size_t numactive = mActiveTracks.size();
2319 dprintf(fd, " %zu Tracks", numtracks);
2320 size_t numactiveseen = 0;
2321 const char *prefix = " ";
2322 if (numtracks) {
2323 dprintf(fd, " of which %zu are active\n", numactive);
2324 result.append(prefix);
2325 mTracks[0]->appendDumpHeader(result);
2326 for (size_t i = 0; i < numtracks; ++i) {
2327 sp<IAfTrack> track = mTracks[i];
2328 if (track != 0) {
2329 bool active = mActiveTracks.indexOf(track) >= 0;
2330 if (active) {
2331 numactiveseen++;
2332 }
2333 result.append(prefix);
2334 track->appendDump(result, active);
2335 }
2336 }
2337 } else {
2338 result.append("\n");
2339 }
2340 if (numactiveseen != numactive) {
2341 // some tracks in the active list were not in the tracks list
2342 result.append(" The following tracks are in the active list but"
2343 " not in the track list\n");
2344 result.append(prefix);
2345 mActiveTracks[0]->appendDumpHeader(result);
2346 for (size_t i = 0; i < numactive; ++i) {
2347 sp<IAfTrack> track = mActiveTracks[i];
2348 if (mTracks.indexOf(track) < 0) {
2349 result.append(prefix);
2350 track->appendDump(result, true /* active */);
2351 }
2352 }
2353 }
2354
2355 write(fd, result.c_str(), result.size());
2356 }
2357
dumpInternals_l(int fd,const Vector<String16> & args)2358 void PlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args)
2359 {
2360 dprintf(fd, " Master volume: %f\n", mMasterVolume);
2361 dprintf(fd, " Master mute: %s\n", mMasterMute ? "on" : "off");
2362 dprintf(fd, " Mixer channel Mask: %#x (%s)\n",
2363 mMixerChannelMask, channelMaskToString(mMixerChannelMask, true /* output */).c_str());
2364 if (mHapticChannelMask != AUDIO_CHANNEL_NONE) {
2365 dprintf(fd, " Haptic channel mask: %#x (%s)\n", mHapticChannelMask,
2366 channelMaskToString(mHapticChannelMask, true /* output */).c_str());
2367 }
2368 dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount);
2369 dprintf(fd, " Total writes: %d\n", mNumWrites);
2370 dprintf(fd, " Delayed writes: %d\n", mNumDelayedWrites);
2371 dprintf(fd, " Blocked in write: %s\n", mInWrite ? "yes" : "no");
2372 dprintf(fd, " Suspend count: %d\n", (int32_t)mSuspended);
2373 dprintf(fd, " Fast track availMask=%#x\n", mFastTrackAvailMask);
2374 dprintf(fd, " Standby delay ns=%lld\n", (long long)mStandbyDelayNs);
2375 AudioStreamOut *output = mOutput;
2376 audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE;
2377 dprintf(fd, " AudioStreamOut: %p flags %#x (%s)\n",
2378 output, flags, toString(flags).c_str());
2379 dprintf(fd, " Frames written: %lld\n", (long long)mFramesWritten);
2380 dprintf(fd, " Suspended frames: %lld\n", (long long)mSuspendedFrames);
2381 if (mPipeSink.get() != nullptr) {
2382 dprintf(fd, " PipeSink frames written: %lld\n", (long long)mPipeSink->framesWritten());
2383 }
2384 if (output != nullptr) {
2385 dprintf(fd, " Hal stream dump:\n");
2386 (void)output->stream->dump(fd, args);
2387 }
2388 }
2389
2390 // PlaybackThread::createTrack_l() must be called with AudioFlinger::mutex() held
createTrack_l(const sp<Client> & client,audio_stream_type_t streamType,const audio_attributes_t & attr,uint32_t * pSampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t * pFrameCount,size_t * pNotificationFrameCount,uint32_t notificationsPerBuffer,float speed,const sp<IMemory> & sharedBuffer,audio_session_t sessionId,audio_output_flags_t * flags,pid_t creatorPid,const AttributionSourceState & attributionSource,pid_t tid,status_t * status,audio_port_handle_t portId,const sp<media::IAudioTrackCallback> & callback,bool isSpatialized,bool isBitPerfect,audio_output_flags_t * afTrackFlags,float volume,bool muted)2391 sp<IAfTrack> PlaybackThread::createTrack_l(
2392 const sp<Client>& client,
2393 audio_stream_type_t streamType,
2394 const audio_attributes_t& attr,
2395 uint32_t *pSampleRate,
2396 audio_format_t format,
2397 audio_channel_mask_t channelMask,
2398 size_t *pFrameCount,
2399 size_t *pNotificationFrameCount,
2400 uint32_t notificationsPerBuffer,
2401 float speed,
2402 const sp<IMemory>& sharedBuffer,
2403 audio_session_t sessionId,
2404 audio_output_flags_t *flags,
2405 pid_t creatorPid,
2406 const AttributionSourceState& attributionSource,
2407 pid_t tid,
2408 status_t *status,
2409 audio_port_handle_t portId,
2410 const sp<media::IAudioTrackCallback>& callback,
2411 bool isSpatialized,
2412 bool isBitPerfect,
2413 audio_output_flags_t *afTrackFlags,
2414 float volume,
2415 bool muted)
2416 {
2417 size_t frameCount = *pFrameCount;
2418 size_t notificationFrameCount = *pNotificationFrameCount;
2419 sp<IAfTrack> track;
2420 status_t lStatus;
2421 audio_output_flags_t outputFlags = mOutput->flags;
2422 audio_output_flags_t requestedFlags = *flags;
2423 uint32_t sampleRate;
2424
2425 if (sharedBuffer != 0 && checkIMemory(sharedBuffer) != NO_ERROR) {
2426 lStatus = BAD_VALUE;
2427 goto Exit;
2428 }
2429
2430 if (*pSampleRate == 0) {
2431 *pSampleRate = mSampleRate;
2432 }
2433 sampleRate = *pSampleRate;
2434
2435 // special case for FAST flag considered OK if fast mixer is present
2436 if (hasFastMixer()) {
2437 outputFlags = (audio_output_flags_t)(outputFlags | AUDIO_OUTPUT_FLAG_FAST);
2438 }
2439
2440 // Check if requested flags are compatible with output stream flags
2441 if ((*flags & outputFlags) != *flags) {
2442 ALOGW("createTrack_l(): mismatch between requested flags (%08x) and output flags (%08x)",
2443 *flags, outputFlags);
2444 *flags = (audio_output_flags_t)(*flags & outputFlags);
2445 }
2446
2447 if (isBitPerfect) {
2448 audio_utils::lock_guard _l(mutex());
2449 sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
2450 if (chain.get() != nullptr) {
2451 // Bit-perfect is required according to the configuration and preferred mixer
2452 // attributes, but it is not in the output flag from the client's request. Explicitly
2453 // adding bit-perfect flag to check the compatibility
2454 audio_output_flags_t flagsToCheck =
2455 (audio_output_flags_t)(*flags & AUDIO_OUTPUT_FLAG_BIT_PERFECT);
2456 chain->checkOutputFlagCompatibility(&flagsToCheck);
2457 if ((flagsToCheck & AUDIO_OUTPUT_FLAG_BIT_PERFECT) == AUDIO_OUTPUT_FLAG_NONE) {
2458 ALOGE("%s cannot create track as there is data-processing effect attached to "
2459 "given session id(%d)", __func__, sessionId);
2460 lStatus = BAD_VALUE;
2461 goto Exit;
2462 }
2463 *flags = flagsToCheck;
2464 }
2465 }
2466
2467 // client expresses a preference for FAST, but we get the final say
2468 if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
2469 if (
2470 // PCM data
2471 audio_is_linear_pcm(format) &&
2472 // TODO: extract as a data library function that checks that a computationally
2473 // expensive downmixer is not required: isFastOutputChannelConversion()
2474 (channelMask == (mChannelMask | mHapticChannelMask) ||
2475 mChannelMask != AUDIO_CHANNEL_OUT_STEREO ||
2476 (channelMask == AUDIO_CHANNEL_OUT_MONO
2477 /* && mChannelMask == AUDIO_CHANNEL_OUT_STEREO */)) &&
2478 // hardware sample rate
2479 (sampleRate == mSampleRate) &&
2480 // normal mixer has an associated fast mixer
2481 hasFastMixer() &&
2482 // there are sufficient fast track slots available
2483 (mFastTrackAvailMask != 0)
2484 // FIXME test that MixerThread for this fast track has a capable output HAL
2485 // FIXME add a permission test also?
2486 ) {
2487 // static tracks can have any nonzero framecount, streaming tracks check against minimum.
2488 if (sharedBuffer == 0) {
2489 // read the fast track multiplier property the first time it is needed
2490 int ok = pthread_once(&sFastTrackMultiplierOnce, sFastTrackMultiplierInit);
2491 if (ok != 0) {
2492 ALOGE("%s pthread_once failed: %d", __func__, ok);
2493 }
2494 frameCount = max(frameCount, mFrameCount * sFastTrackMultiplier); // incl framecount 0
2495 }
2496
2497 // check compatibility with audio effects.
2498 { // scope for mutex()
2499 audio_utils::lock_guard _l(mutex());
2500 for (audio_session_t session : {
2501 AUDIO_SESSION_DEVICE,
2502 AUDIO_SESSION_OUTPUT_STAGE,
2503 AUDIO_SESSION_OUTPUT_MIX,
2504 sessionId,
2505 }) {
2506 sp<IAfEffectChain> chain = getEffectChain_l(session);
2507 if (chain.get() != nullptr) {
2508 audio_output_flags_t old = *flags;
2509 chain->checkOutputFlagCompatibility(flags);
2510 if (old != *flags) {
2511 ALOGV("AUDIO_OUTPUT_FLAGS denied by effect, session=%d old=%#x new=%#x",
2512 (int)session, (int)old, (int)*flags);
2513 }
2514 }
2515 }
2516 }
2517 ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_FAST) != 0,
2518 "AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
2519 frameCount, mFrameCount);
2520 } else {
2521 ALOGD("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
2522 "mFrameCount=%zu format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
2523 "sampleRate=%u mSampleRate=%u "
2524 "hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
2525 sharedBuffer.get(), frameCount, mFrameCount, format, mFormat,
2526 audio_is_linear_pcm(format), channelMask, sampleRate,
2527 mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
2528 *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST);
2529 }
2530 }
2531
2532 if (!audio_has_proportional_frames(format)) {
2533 if (sharedBuffer != 0) {
2534 // Same comment as below about ignoring frameCount parameter for set()
2535 frameCount = sharedBuffer->size();
2536 } else if (frameCount == 0) {
2537 frameCount = mNormalFrameCount;
2538 }
2539 if (notificationFrameCount != frameCount) {
2540 notificationFrameCount = frameCount;
2541 }
2542 } else if (sharedBuffer != 0) {
2543 // FIXME: Ensure client side memory buffers need
2544 // not have additional alignment beyond sample
2545 // (e.g. 16 bit stereo accessed as 32 bit frame).
2546 size_t alignment = audio_bytes_per_sample(format);
2547 if (alignment & 1) {
2548 // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
2549 alignment = 1;
2550 }
2551 uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
2552 size_t frameSize = channelCount * audio_bytes_per_sample(format);
2553 if (channelCount > 1) {
2554 // More than 2 channels does not require stronger alignment than stereo
2555 alignment <<= 1;
2556 }
2557 if (((uintptr_t)sharedBuffer->unsecurePointer() & (alignment - 1)) != 0) {
2558 ALOGE("Invalid buffer alignment: address %p, channel count %u",
2559 sharedBuffer->unsecurePointer(), channelCount);
2560 lStatus = BAD_VALUE;
2561 goto Exit;
2562 }
2563
2564 // When initializing a shared buffer AudioTrack via constructors,
2565 // there's no frameCount parameter.
2566 // But when initializing a shared buffer AudioTrack via set(),
2567 // there _is_ a frameCount parameter. We silently ignore it.
2568 frameCount = sharedBuffer->size() / frameSize;
2569 } else {
2570 size_t minFrameCount = 0;
2571 // For fast tracks we try to respect the application's request for notifications per buffer.
2572 if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
2573 if (notificationsPerBuffer > 0) {
2574 // Avoid possible arithmetic overflow during multiplication.
2575 if (notificationsPerBuffer > SIZE_MAX / mFrameCount) {
2576 ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
2577 notificationsPerBuffer, mFrameCount);
2578 } else {
2579 minFrameCount = mFrameCount * notificationsPerBuffer;
2580 }
2581 }
2582 } else {
2583 // For normal PCM streaming tracks, update minimum frame count.
2584 // Buffer depth is forced to be at least 2 x the normal mixer frame count and
2585 // cover audio hardware latency.
2586 // This is probably too conservative, but legacy application code may depend on it.
2587 // If you change this calculation, also review the start threshold which is related.
2588 uint32_t latencyMs = latency_l();
2589 if (latencyMs == 0) {
2590 ALOGE("Error when retrieving output stream latency");
2591 lStatus = UNKNOWN_ERROR;
2592 goto Exit;
2593 }
2594
2595 minFrameCount = AudioSystem::calculateMinFrameCount(latencyMs, mNormalFrameCount,
2596 mSampleRate, sampleRate, speed /*, 0 mNotificationsPerBufferReq*/);
2597
2598 }
2599 if (frameCount < minFrameCount) {
2600 frameCount = minFrameCount;
2601 }
2602 }
2603
2604 // Make sure that application is notified with sufficient margin before underrun.
2605 // The client can divide the AudioTrack buffer into sub-buffers,
2606 // and expresses its desire to server as the notification frame count.
2607 if (sharedBuffer == 0 && audio_is_linear_pcm(format)) {
2608 size_t maxNotificationFrames;
2609 if (*flags & AUDIO_OUTPUT_FLAG_FAST) {
2610 // notify every HAL buffer, regardless of the size of the track buffer
2611 maxNotificationFrames = mFrameCount;
2612 } else {
2613 // Triple buffer the notification period for a triple buffered mixer period;
2614 // otherwise, double buffering for the notification period is fine.
2615 //
2616 // TODO: This should be moved to AudioTrack to modify the notification period
2617 // on AudioTrack::setBufferSizeInFrames() changes.
2618 const int nBuffering =
2619 (uint64_t{frameCount} * mSampleRate)
2620 / (uint64_t{mNormalFrameCount} * sampleRate) == 3 ? 3 : 2;
2621
2622 maxNotificationFrames = frameCount / nBuffering;
2623 // If client requested a fast track but this was denied, then use the smaller maximum.
2624 if (requestedFlags & AUDIO_OUTPUT_FLAG_FAST) {
2625 size_t maxNotificationFramesFastDenied = FMS_20 * sampleRate / 1000;
2626 if (maxNotificationFrames > maxNotificationFramesFastDenied) {
2627 maxNotificationFrames = maxNotificationFramesFastDenied;
2628 }
2629 }
2630 }
2631 if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) {
2632 if (notificationFrameCount == 0) {
2633 ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
2634 maxNotificationFrames, frameCount);
2635 } else {
2636 ALOGW("Client adjusted notificationFrames from %zu to %zu for frameCount %zu",
2637 notificationFrameCount, maxNotificationFrames, frameCount);
2638 }
2639 notificationFrameCount = maxNotificationFrames;
2640 }
2641 }
2642
2643 *pFrameCount = frameCount;
2644 *pNotificationFrameCount = notificationFrameCount;
2645
2646 switch (mType) {
2647 case BIT_PERFECT:
2648 if (isBitPerfect) {
2649 if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
2650 ALOGE("%s, bad parameter when request streaming bit-perfect, sampleRate=%u, "
2651 "format=%#x, channelMask=%#x, mSampleRate=%u, mFormat=%#x, mChannelMask=%#x",
2652 __func__, sampleRate, format, channelMask, mSampleRate, mFormat,
2653 mChannelMask);
2654 lStatus = BAD_VALUE;
2655 goto Exit;
2656 }
2657 }
2658 break;
2659
2660 case DIRECT:
2661 if (audio_is_linear_pcm(format)) { // TODO maybe use audio_has_proportional_frames()?
2662 if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
2663 ALOGE("createTrack_l() Bad parameter: sampleRate %u format %#x, channelMask 0x%08x "
2664 "for output %p with format %#x",
2665 sampleRate, format, channelMask, mOutput, mFormat);
2666 lStatus = BAD_VALUE;
2667 goto Exit;
2668 }
2669 }
2670 break;
2671
2672 case OFFLOAD:
2673 if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
2674 ALOGE("createTrack_l() Bad parameter: sampleRate %d format %#x, channelMask 0x%08x \""
2675 "for output %p with format %#x",
2676 sampleRate, format, channelMask, mOutput, mFormat);
2677 lStatus = BAD_VALUE;
2678 goto Exit;
2679 }
2680 break;
2681
2682 default:
2683 if (!audio_is_linear_pcm(format)) {
2684 ALOGE("createTrack_l() Bad parameter: format %#x \""
2685 "for output %p with format %#x",
2686 format, mOutput, mFormat);
2687 lStatus = BAD_VALUE;
2688 goto Exit;
2689 }
2690 if (sampleRate > mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
2691 ALOGE("Sample rate out of range: %u mSampleRate %u", sampleRate, mSampleRate);
2692 lStatus = BAD_VALUE;
2693 goto Exit;
2694 }
2695 break;
2696
2697 }
2698
2699 lStatus = initCheck();
2700 if (lStatus != NO_ERROR) {
2701 ALOGE("createTrack_l() audio driver not initialized");
2702 goto Exit;
2703 }
2704
2705 { // scope for mutex()
2706 audio_utils::lock_guard _l(mutex());
2707
2708 // all tracks in same audio session must share the same routing strategy otherwise
2709 // conflicts will happen when tracks are moved from one output to another by audio policy
2710 // manager
2711 product_strategy_t strategy = getStrategyForStream(streamType);
2712 for (size_t i = 0; i < mTracks.size(); ++i) {
2713 sp<IAfTrack> t = mTracks[i];
2714 if (t != 0 && t->isExternalTrack()) {
2715 product_strategy_t actual = getStrategyForStream(t->streamType());
2716 if (sessionId == t->sessionId() && strategy != actual) {
2717 ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
2718 strategy, actual);
2719 lStatus = BAD_VALUE;
2720 goto Exit;
2721 }
2722 }
2723 }
2724
2725 // Set DIRECT/OFFLOAD flag if current thread is DirectOutputThread/OffloadThread.
2726 // This can happen when the playback is rerouted to direct output/offload thread by
2727 // dynamic audio policy.
2728 // Do NOT report the flag changes back to client, since the client
2729 // doesn't explicitly request a direct/offload flag.
2730 audio_output_flags_t trackFlags = *flags;
2731 if (mType == DIRECT) {
2732 trackFlags = static_cast<audio_output_flags_t>(trackFlags | AUDIO_OUTPUT_FLAG_DIRECT);
2733 } else if (mType == OFFLOAD) {
2734 trackFlags = static_cast<audio_output_flags_t>(trackFlags |
2735 AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT);
2736 }
2737 *afTrackFlags = trackFlags;
2738
2739 track = IAfTrack::create(this, client, streamType, attr, sampleRate, format,
2740 channelMask, frameCount,
2741 nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
2742 sessionId, creatorPid, attributionSource, trackFlags,
2743 IAfTrackBase::TYPE_DEFAULT, portId, SIZE_MAX /*frameCountToBeReady*/,
2744 speed, isSpatialized, isBitPerfect, volume, muted);
2745
2746 lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
2747 if (lStatus != NO_ERROR) {
2748 ALOGE("createTrack_l() initCheck failed %d; no control block?", lStatus);
2749 // track must be cleared from the caller as the caller has the AF lock
2750 goto Exit;
2751 }
2752 mTracks.add(track);
2753 {
2754 audio_utils::lock_guard _atCbL(audioTrackCbMutex());
2755 if (callback.get() != nullptr) {
2756 mAudioTrackCallbacks.emplace(track, callback);
2757 }
2758 }
2759
2760 sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
2761 if (chain != 0) {
2762 ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
2763 track->setMainBuffer(chain->inBuffer());
2764 chain->setStrategy(getStrategyForStream(track->streamType()));
2765 chain->incTrackCnt();
2766 }
2767
2768 if ((*flags & AUDIO_OUTPUT_FLAG_FAST) && (tid != -1)) {
2769 pid_t callingPid = IPCThreadState::self()->getCallingPid();
2770 // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
2771 // so ask activity manager to do this on our behalf
2772 sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp, true /*forApp*/);
2773 }
2774 }
2775
2776 lStatus = NO_ERROR;
2777
2778 Exit:
2779 *status = lStatus;
2780 return track;
2781 }
2782
2783 template<typename T>
remove(const sp<T> & track)2784 ssize_t PlaybackThread::Tracks<T>::remove(const sp<T>& track)
2785 {
2786 const int trackId = track->id();
2787 const ssize_t index = mTracks.remove(track);
2788 if (index >= 0) {
2789 if (mSaveDeletedTrackIds) {
2790 // We can't directly access mAudioMixer since the caller may be outside of threadLoop.
2791 // Instead, we add to mDeletedTrackIds which is solely used for mAudioMixer update,
2792 // to be handled when MixerThread::prepareTracks_l() next changes mAudioMixer.
2793 mDeletedTrackIds.emplace(trackId);
2794 }
2795 }
2796 return index;
2797 }
2798
correctLatency_l(uint32_t latency) const2799 uint32_t PlaybackThread::correctLatency_l(uint32_t latency) const
2800 {
2801 return latency;
2802 }
2803
latency() const2804 uint32_t PlaybackThread::latency() const
2805 {
2806 audio_utils::lock_guard _l(mutex());
2807 return latency_l();
2808 }
latency_l() const2809 uint32_t PlaybackThread::latency_l() const
2810 NO_THREAD_SAFETY_ANALYSIS
2811 // Fix later.
2812 {
2813 uint32_t latency;
2814 if (initCheck() == NO_ERROR && mOutput->stream->getLatency(&latency) == OK) {
2815 return correctLatency_l(latency);
2816 }
2817 return 0;
2818 }
2819
setMasterVolume(float value)2820 void PlaybackThread::setMasterVolume(float value)
2821 {
2822 audio_utils::lock_guard _l(mutex());
2823 // Don't apply master volume in SW if our HAL can do it for us.
2824 if (mOutput && mOutput->audioHwDev &&
2825 mOutput->audioHwDev->canSetMasterVolume()) {
2826 mMasterVolume = 1.0;
2827 } else {
2828 mMasterVolume = value;
2829 }
2830 }
2831
setMasterBalance(float balance)2832 void PlaybackThread::setMasterBalance(float balance)
2833 {
2834 mMasterBalance.store(balance);
2835 }
2836
setMasterMute(bool muted)2837 void PlaybackThread::setMasterMute(bool muted)
2838 {
2839 if (isDuplicating()) {
2840 return;
2841 }
2842 audio_utils::lock_guard _l(mutex());
2843 // Don't apply master mute in SW if our HAL can do it for us.
2844 if (mOutput && mOutput->audioHwDev &&
2845 mOutput->audioHwDev->canSetMasterMute()) {
2846 mMasterMute = false;
2847 } else {
2848 mMasterMute = muted;
2849 }
2850 }
2851
setStreamVolume(audio_stream_type_t stream,float value,bool muted)2852 void PlaybackThread::setStreamVolume(audio_stream_type_t stream, float value, bool muted)
2853 {
2854 ALOGV("%s: stream %d value %f muted %d", __func__, stream, value, muted);
2855 audio_utils::lock_guard _l(mutex());
2856 mStreamTypes[stream].volume = value;
2857 if (com_android_media_audio_ring_my_car()) {
2858 mStreamTypes[stream].mute = muted;
2859 }
2860 broadcast_l();
2861 }
2862
setStreamMute(audio_stream_type_t stream,bool muted)2863 void PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
2864 {
2865 audio_utils::lock_guard _l(mutex());
2866 mStreamTypes[stream].mute = muted;
2867 broadcast_l();
2868 }
2869
streamVolume(audio_stream_type_t stream) const2870 float PlaybackThread::streamVolume(audio_stream_type_t stream) const
2871 {
2872 audio_utils::lock_guard _l(mutex());
2873 return mStreamTypes[stream].volume;
2874 }
2875
setPortsVolume(const std::vector<audio_port_handle_t> & portIds,float volume,bool muted)2876 status_t PlaybackThread::setPortsVolume(
2877 const std::vector<audio_port_handle_t>& portIds, float volume, bool muted) {
2878 audio_utils::lock_guard _l(mutex());
2879 for (const auto& portId : portIds) {
2880 for (size_t i = 0; i < mTracks.size(); i++) {
2881 sp<IAfTrack> track = mTracks[i].get();
2882 if (portId == track->portId()) {
2883 track->setPortVolume(volume);
2884 track->setPortMute(muted);
2885 break;
2886 }
2887 }
2888 }
2889 broadcast_l();
2890 return NO_ERROR;
2891 }
2892
setVolumeForOutput_l(float left,float right) const2893 void PlaybackThread::setVolumeForOutput_l(float left, float right) const
2894 {
2895 mOutput->stream->setVolume(left, right);
2896 }
2897
2898 // addTrack_l() must be called with ThreadBase::mutex() held
addTrack_l(const sp<IAfTrack> & track)2899 status_t PlaybackThread::addTrack_l(const sp<IAfTrack>& track)
2900 {
2901 status_t status = ALREADY_EXISTS;
2902
2903 if (mActiveTracks.indexOf(track) < 0) {
2904 // the track is newly added, make sure it fills up all its
2905 // buffers before playing. This is to ensure the client will
2906 // effectively get the latency it requested.
2907 if (track->isExternalTrack()) {
2908 IAfTrackBase::track_state state = track->state();
2909 // Because the track is not on the ActiveTracks,
2910 // at this point, only the TrackHandle will be adding the track.
2911 mutex().unlock();
2912 status = AudioSystem::startOutput(track->portId());
2913 mutex().lock();
2914 // abort track was stopped/paused while we released the lock
2915 if (state != track->state()) {
2916 if (status == NO_ERROR) {
2917 mutex().unlock();
2918 AudioSystem::stopOutput(track->portId());
2919 mutex().lock();
2920 }
2921 return INVALID_OPERATION;
2922 }
2923 // abort if start is rejected by audio policy manager
2924 if (status != NO_ERROR) {
2925 // Do not replace the error if it is DEAD_OBJECT. When this happens, it indicates
2926 // current playback thread is reopened, which may happen when clients set preferred
2927 // mixer configuration. Returning DEAD_OBJECT will make the client restore track
2928 // immediately.
2929 return status == DEAD_OBJECT ? status : PERMISSION_DENIED;
2930 }
2931 #ifdef ADD_BATTERY_DATA
2932 // to track the speaker usage
2933 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
2934 #endif
2935 sendIoConfigEvent_l(AUDIO_CLIENT_STARTED, track->creatorPid(), track->portId());
2936 }
2937
2938 // set retry count for buffer fill
2939 if (track->isOffloaded()) {
2940 if (track->isStopping_1()) {
2941 track->retryCount() = kMaxTrackStopRetriesOffload;
2942 } else {
2943 track->retryCount() = kMaxTrackStartupRetriesOffload;
2944 }
2945 track->fillingStatus() = mStandby ? IAfTrack::FS_FILLING : IAfTrack::FS_FILLED;
2946 } else {
2947 track->retryCount() = kMaxTrackStartupRetries;
2948 track->fillingStatus() =
2949 track->sharedBuffer() != 0 ? IAfTrack::FS_FILLED : IAfTrack::FS_FILLING;
2950 }
2951
2952 sp<IAfEffectChain> chain = getEffectChain_l(track->sessionId());
2953 if (mHapticChannelMask != AUDIO_CHANNEL_NONE
2954 && ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
2955 || (chain != nullptr && chain->containsHapticGeneratingEffect()))) {
2956 // Unlock due to VibratorService will lock for this call and will
2957 // call Tracks.mute/unmute which also require thread's lock.
2958 mutex().unlock();
2959 const os::HapticScale hapticScale = afutils::onExternalVibrationStart(
2960 track->getExternalVibration());
2961 std::optional<media::AudioVibratorInfo> vibratorInfo;
2962 {
2963 // TODO(b/184194780): Use the vibrator information from the vibrator that will be
2964 // used to play this track.
2965 audio_utils::lock_guard _l(mAfThreadCallback->mutex());
2966 vibratorInfo = mAfThreadCallback->getDefaultVibratorInfo_l();
2967 }
2968 mutex().lock();
2969 track->setHapticScale(hapticScale);
2970 if (vibratorInfo) {
2971 track->setHapticMaxAmplitude(vibratorInfo->maxAmplitude);
2972 }
2973
2974 // Haptic playback should be enabled by vibrator service.
2975 if (track->getHapticPlaybackEnabled()) {
2976 // Disable haptic playback of all active track to ensure only
2977 // one track playing haptic if current track should play haptic.
2978 for (const auto &t : mActiveTracks) {
2979 t->setHapticPlaybackEnabled(false);
2980 }
2981 }
2982
2983 // Set haptic intensity for effect
2984 if (chain != nullptr) {
2985 chain->setHapticScale_l(track->id(), hapticScale);
2986 }
2987 }
2988
2989 track->setResetDone(false);
2990 track->resetPresentationComplete();
2991
2992 // Do not release the ThreadBase mutex after the track is added to mActiveTracks unless
2993 // all key changes are complete. It is possible that the threadLoop will begin
2994 // processing the added track immediately after the ThreadBase mutex is released.
2995 mActiveTracks.add(track);
2996
2997 if (chain != 0) {
2998 ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(),
2999 track->sessionId());
3000 chain->incActiveTrackCnt();
3001 }
3002
3003 track->logBeginInterval(patchSinksToString(&mPatch)); // log to MediaMetrics
3004 status = NO_ERROR;
3005 }
3006
3007 onAddNewTrack_l();
3008 return status;
3009 }
3010
destroyTrack_l(const sp<IAfTrack> & track)3011 bool PlaybackThread::destroyTrack_l(const sp<IAfTrack>& track)
3012 {
3013 track->terminate();
3014 // active tracks are removed by threadLoop()
3015 bool trackActive = (mActiveTracks.indexOf(track) >= 0);
3016 track->setState(IAfTrackBase::STOPPED);
3017 if (!trackActive) {
3018 removeTrack_l(track);
3019 } else if (track->isFastTrack() || track->isOffloaded() || track->isDirect()) {
3020 if (track->isPausePending()) {
3021 track->pauseAck();
3022 }
3023 track->setState(IAfTrackBase::STOPPING_1);
3024 }
3025
3026 return trackActive;
3027 }
3028
removeTrack_l(const sp<IAfTrack> & track)3029 void PlaybackThread::removeTrack_l(const sp<IAfTrack>& track)
3030 {
3031 track->triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
3032
3033 String8 result;
3034 track->appendDump(result, false /* active */);
3035 mLocalLog.log("removeTrack_l (%p) %s", track.get(), result.c_str());
3036
3037 mTracks.remove(track);
3038 {
3039 audio_utils::lock_guard _atCbL(audioTrackCbMutex());
3040 mAudioTrackCallbacks.erase(track);
3041 }
3042 if (track->isFastTrack()) {
3043 int index = track->fastIndex();
3044 ALOG_ASSERT(0 < index && index < (int)FastMixerState::sMaxFastTracks);
3045 ALOG_ASSERT(!(mFastTrackAvailMask & (1 << index)));
3046 mFastTrackAvailMask |= 1 << index;
3047 // redundant as track is about to be destroyed, for dumpsys only
3048 track->fastIndex() = -1;
3049 }
3050 sp<IAfEffectChain> chain = getEffectChain_l(track->sessionId());
3051 if (chain != 0) {
3052 chain->decTrackCnt();
3053 }
3054 }
3055
getTrackPortIds_l()3056 std::set<audio_port_handle_t> PlaybackThread::getTrackPortIds_l()
3057 {
3058 std::set<int32_t> result;
3059 for (const auto& t : mTracks) {
3060 if (t->isExternalTrack()) {
3061 result.insert(t->portId());
3062 }
3063 }
3064 return result;
3065 }
3066
getTrackPortIds()3067 std::set<audio_port_handle_t> PlaybackThread::getTrackPortIds()
3068 {
3069 audio_utils::lock_guard _l(mutex());
3070 return getTrackPortIds_l();
3071 }
3072
getParameters(const String8 & keys)3073 String8 PlaybackThread::getParameters(const String8& keys)
3074 {
3075 audio_utils::lock_guard _l(mutex());
3076 String8 out_s8;
3077 if (initCheck() == NO_ERROR && mOutput->stream->getParameters(keys, &out_s8) == OK) {
3078 return out_s8;
3079 }
3080 return {};
3081 }
3082
selectPresentation(int presentationId,int programId)3083 status_t DirectOutputThread::selectPresentation(int presentationId, int programId) {
3084 audio_utils::lock_guard _l(mutex());
3085 if (!isStreamInitialized()) {
3086 return NO_INIT;
3087 }
3088 return mOutput->stream->selectPresentation(presentationId, programId);
3089 }
3090
ioConfigChanged_l(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId)3091 void PlaybackThread::ioConfigChanged_l(audio_io_config_event_t event, pid_t pid,
3092 audio_port_handle_t portId) {
3093 ALOGV("PlaybackThread::ioConfigChanged, thread %p, event %d", this, event);
3094 sp<AudioIoDescriptor> desc;
3095 const struct audio_patch patch = isMsdDevice() ? mDownStreamPatch : mPatch;
3096 switch (event) {
3097 case AUDIO_OUTPUT_OPENED:
3098 case AUDIO_OUTPUT_REGISTERED:
3099 case AUDIO_OUTPUT_CONFIG_CHANGED:
3100 desc = sp<AudioIoDescriptor>::make(mId, patch, false /*isInput*/,
3101 mSampleRate, mFormat, mChannelMask,
3102 // FIXME AudioFlinger::frameCount(audio_io_handle_t) instead of mNormalFrameCount?
3103 mNormalFrameCount, mFrameCount, latency_l());
3104 break;
3105 case AUDIO_CLIENT_STARTED:
3106 desc = sp<AudioIoDescriptor>::make(mId, patch, portId);
3107 break;
3108 case AUDIO_OUTPUT_CLOSED:
3109 default:
3110 desc = sp<AudioIoDescriptor>::make(mId);
3111 break;
3112 }
3113 mAfThreadCallback->ioConfigChanged_l(event, desc, pid);
3114 }
3115
onWriteReady()3116 void PlaybackThread::onWriteReady()
3117 {
3118 mCallbackThread->resetWriteBlocked();
3119 }
3120
onDrainReady()3121 void PlaybackThread::onDrainReady()
3122 {
3123 mCallbackThread->resetDraining();
3124 }
3125
onError(bool isHardError)3126 void PlaybackThread::onError(bool isHardError)
3127 {
3128 mCallbackThread->setAsyncError(isHardError);
3129 }
3130
onCodecFormatChanged(const std::vector<uint8_t> & metadataBs)3131 void PlaybackThread::onCodecFormatChanged(
3132 const std::vector<uint8_t>& metadataBs)
3133 {
3134 const auto weakPointerThis = wp<PlaybackThread>::fromExisting(this);
3135 std::thread([this, metadataBs, weakPointerThis]() {
3136 const sp<PlaybackThread> playbackThread = weakPointerThis.promote();
3137 if (playbackThread == nullptr) {
3138 ALOGW("PlaybackThread was destroyed, skip codec format change event");
3139 return;
3140 }
3141
3142 audio_utils::metadata::Data metadata =
3143 audio_utils::metadata::dataFromByteString(metadataBs);
3144 if (metadata.empty()) {
3145 ALOGW("Can not transform the buffer to audio metadata, %s, %d",
3146 reinterpret_cast<char*>(const_cast<uint8_t*>(metadataBs.data())),
3147 (int)metadataBs.size());
3148 return;
3149 }
3150
3151 audio_utils::metadata::ByteString metaDataStr =
3152 audio_utils::metadata::byteStringFromData(metadata);
3153 std::vector metadataVec(metaDataStr.begin(), metaDataStr.end());
3154 audio_utils::lock_guard _l(audioTrackCbMutex());
3155 for (const auto& callbackPair : mAudioTrackCallbacks) {
3156 callbackPair.second->onCodecFormatChanged(metadataVec);
3157 }
3158 }).detach();
3159 }
3160
resetWriteBlocked(uint32_t sequence)3161 void PlaybackThread::resetWriteBlocked(uint32_t sequence)
3162 {
3163 audio_utils::lock_guard _l(mutex());
3164 // reject out of sequence requests
3165 if ((mWriteAckSequence & 1) && (sequence == mWriteAckSequence)) {
3166 mWriteAckSequence &= ~1;
3167 mWaitWorkCV.notify_one();
3168 }
3169 }
3170
resetDraining(uint32_t sequence)3171 void PlaybackThread::resetDraining(uint32_t sequence)
3172 {
3173 audio_utils::lock_guard _l(mutex());
3174 // reject out of sequence requests
3175 if ((mDrainSequence & 1) && (sequence == mDrainSequence)) {
3176 // Register discontinuity when HW drain is completed because that can cause
3177 // the timestamp frame position to reset to 0 for direct and offload threads.
3178 // (Out of sequence requests are ignored, since the discontinuity would be handled
3179 // elsewhere, e.g. in flush).
3180 mTimestampVerifier.discontinuity(mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
3181 mDrainSequence &= ~1;
3182 mWaitWorkCV.notify_one();
3183 }
3184 }
3185
readOutputParameters_l()3186 void PlaybackThread::readOutputParameters_l()
3187 NO_THREAD_SAFETY_ANALYSIS
3188 // 'moveEffectChain_ll' requires holding mutex 'AudioFlinger_Mutex' exclusively
3189 {
3190 // unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL
3191 const audio_config_base_t audioConfig = mOutput->getAudioProperties();
3192 mSampleRate = audioConfig.sample_rate;
3193 mChannelMask = audioConfig.channel_mask;
3194 if (!audio_is_output_channel(mChannelMask)) {
3195 LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
3196 }
3197 if (hasMixer() && !isValidPcmSinkChannelMask(mChannelMask)) {
3198 LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output",
3199 mChannelMask);
3200 }
3201
3202 if (mMixerChannelMask == AUDIO_CHANNEL_NONE) {
3203 mMixerChannelMask = mChannelMask;
3204 }
3205
3206 mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
3207 mBalance.setChannelMask(mChannelMask);
3208
3209 uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mMixerChannelMask);
3210
3211 // Get actual HAL format.
3212 status_t result = mOutput->stream->getAudioProperties(nullptr, nullptr, &mHALFormat);
3213 LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving output stream format: %d", result);
3214 // Get format from the shim, which will be different than the HAL format
3215 // if playing compressed audio over HDMI passthrough.
3216 mFormat = audioConfig.format;
3217 if (!audio_is_valid_format(mFormat)) {
3218 LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat);
3219 }
3220 if (hasMixer() && !isValidPcmSinkFormat(mFormat)) {
3221 LOG_FATAL("HAL format %#x not supported for mixed output",
3222 mFormat);
3223 }
3224 mFrameSize = mOutput->getFrameSize();
3225 result = mOutput->stream->getBufferSize(&mBufferSize);
3226 LOG_ALWAYS_FATAL_IF(result != OK,
3227 "Error when retrieving output stream buffer size: %d", result);
3228 mFrameCount = mBufferSize / mFrameSize;
3229 if (hasMixer() && (mFrameCount & 15)) {
3230 ALOGW("HAL output buffer size is %zu frames but AudioMixer requires multiples of 16 frames",
3231 mFrameCount);
3232 }
3233
3234 mHwSupportsPause = false;
3235 if (mOutput->flags & AUDIO_OUTPUT_FLAG_DIRECT) {
3236 bool supportsPause = false, supportsResume = false;
3237 if (mOutput->stream->supportsPauseAndResume(&supportsPause, &supportsResume) == OK) {
3238 if (supportsPause && supportsResume) {
3239 mHwSupportsPause = true;
3240 } else if (supportsPause) {
3241 ALOGW("direct output implements pause but not resume");
3242 } else if (supportsResume) {
3243 ALOGW("direct output implements resume but not pause");
3244 }
3245 }
3246 }
3247 if (!mHwSupportsPause && mOutput->flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) {
3248 LOG_ALWAYS_FATAL("HW_AV_SYNC requested but HAL does not implement pause and resume");
3249 }
3250
3251 if (mType == DUPLICATING && mMixerBufferEnabled && mEffectBufferEnabled) {
3252 // For best precision, we use float instead of the associated output
3253 // device format (typically PCM 16 bit).
3254
3255 mFormat = AUDIO_FORMAT_PCM_FLOAT;
3256 mFrameSize = mChannelCount * audio_bytes_per_sample(mFormat);
3257 mBufferSize = mFrameSize * mFrameCount;
3258
3259 // TODO: We currently use the associated output device channel mask and sample rate.
3260 // (1) Perhaps use the ORed channel mask of all downstream MixerThreads
3261 // (if a valid mask) to avoid premature downmix.
3262 // (2) Perhaps use the maximum sample rate of all downstream MixerThreads
3263 // instead of the output device sample rate to avoid loss of high frequency information.
3264 // This may need to be updated as MixerThread/OutputTracks are added and not here.
3265 }
3266
3267 // Calculate size of normal sink buffer relative to the HAL output buffer size
3268 double multiplier = 1.0;
3269 // Note: mType == SPATIALIZER does not support FastMixer and DEEP is by definition not "fast"
3270 if ((mType == MIXER && !(mOutput->flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER)) &&
3271 (kUseFastMixer == FastMixer_Static || kUseFastMixer == FastMixer_Dynamic)) {
3272 size_t minNormalFrameCount = (kMinNormalSinkBufferSizeMs * mSampleRate) / 1000;
3273 size_t maxNormalFrameCount = (kMaxNormalSinkBufferSizeMs * mSampleRate) / 1000;
3274
3275 // round up minimum and round down maximum to nearest 16 frames to satisfy AudioMixer
3276 minNormalFrameCount = (minNormalFrameCount + 15) & ~15;
3277 maxNormalFrameCount = maxNormalFrameCount & ~15;
3278 if (maxNormalFrameCount < minNormalFrameCount) {
3279 maxNormalFrameCount = minNormalFrameCount;
3280 }
3281 multiplier = (double) minNormalFrameCount / (double) mFrameCount;
3282 if (multiplier <= 1.0) {
3283 multiplier = 1.0;
3284 } else if (multiplier <= 2.0) {
3285 if (2 * mFrameCount <= maxNormalFrameCount) {
3286 multiplier = 2.0;
3287 } else {
3288 multiplier = (double) maxNormalFrameCount / (double) mFrameCount;
3289 }
3290 } else {
3291 multiplier = floor(multiplier);
3292 }
3293 }
3294 mNormalFrameCount = multiplier * mFrameCount;
3295 // round up to nearest 16 frames to satisfy AudioMixer
3296 if (hasMixer()) {
3297 mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
3298 }
3299 ALOGI("HAL output buffer size %zu frames, normal sink buffer size %zu frames",
3300 (size_t)mFrameCount, mNormalFrameCount);
3301
3302 // Check if we want to throttle the processing to no more than 2x normal rate
3303 mThreadThrottle = property_get_bool("af.thread.throttle", true /* default_value */);
3304 mThreadThrottleTimeMs = 0;
3305 mThreadThrottleEndMs = 0;
3306 mHalfBufferMs = mNormalFrameCount * 1000 / (2 * mSampleRate);
3307
3308 // mSinkBuffer is the sink buffer. Size is always multiple-of-16 frames.
3309 // Originally this was int16_t[] array, need to remove legacy implications.
3310 free(mSinkBuffer);
3311 mSinkBuffer = NULL;
3312
3313 // For sink buffer size, we use the frame size from the downstream sink to avoid problems
3314 // with non PCM formats for compressed music, e.g. AAC, and Offload threads.
3315 const size_t sinkBufferSize = mNormalFrameCount * mFrameSize;
3316 (void)posix_memalign(&mSinkBuffer, 32, sinkBufferSize);
3317
3318 // We resize the mMixerBuffer according to the requirements of the sink buffer which
3319 // drives the output.
3320 free(mMixerBuffer);
3321 mMixerBuffer = NULL;
3322 if (mMixerBufferEnabled) {
3323 mMixerBufferFormat = AUDIO_FORMAT_PCM_FLOAT; // no longer valid: AUDIO_FORMAT_PCM_16_BIT.
3324 mMixerBufferSize = mNormalFrameCount * mixerChannelCount
3325 * audio_bytes_per_sample(mMixerBufferFormat);
3326 (void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize);
3327 }
3328 free(mEffectBuffer);
3329 mEffectBuffer = NULL;
3330 if (mEffectBufferEnabled) {
3331 mEffectBufferFormat = AUDIO_FORMAT_PCM_FLOAT;
3332 mEffectBufferSize = mNormalFrameCount * mixerChannelCount
3333 * audio_bytes_per_sample(mEffectBufferFormat);
3334 (void)posix_memalign(&mEffectBuffer, 32, mEffectBufferSize);
3335 }
3336
3337 if (mType == SPATIALIZER) {
3338 free(mPostSpatializerBuffer);
3339 mPostSpatializerBuffer = nullptr;
3340 mPostSpatializerBufferSize = mNormalFrameCount * mChannelCount
3341 * audio_bytes_per_sample(mEffectBufferFormat);
3342 (void)posix_memalign(&mPostSpatializerBuffer, 32, mPostSpatializerBufferSize);
3343 }
3344
3345 mHapticChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
3346 mChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & ~mHapticChannelMask);
3347 mHapticChannelCount = audio_channel_count_from_out_mask(mHapticChannelMask);
3348 mChannelCount -= mHapticChannelCount;
3349 mMixerChannelMask = static_cast<audio_channel_mask_t>(mMixerChannelMask & ~mHapticChannelMask);
3350
3351 // force reconfiguration of effect chains and engines to take new buffer size and audio
3352 // parameters into account
3353 // Note that mutex() is not held when readOutputParameters_l() is called from the constructor
3354 // but in this case nothing is done below as no audio sessions have effect yet so it doesn't
3355 // matter.
3356 // create a copy of mEffectChains as calling moveEffectChain_ll()
3357 // can reorder some effect chains
3358 Vector<sp<IAfEffectChain>> effectChains = mEffectChains;
3359 for (size_t i = 0; i < effectChains.size(); i ++) {
3360 mAfThreadCallback->moveEffectChain_ll(effectChains[i]->sessionId(),
3361 this/* srcThread */, this/* dstThread */);
3362 }
3363
3364 audio_output_flags_t flags = mOutput->flags;
3365 mediametrics::LogItem item(mThreadMetrics.getMetricsId()); // TODO: method in ThreadMetrics?
3366 item.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_READPARAMETERS)
3367 .set(AMEDIAMETRICS_PROP_ENCODING, IAfThreadBase::formatToString(mFormat).c_str())
3368 .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
3369 .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
3370 .set(AMEDIAMETRICS_PROP_CHANNELCOUNT, (int32_t)mChannelCount)
3371 .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mNormalFrameCount)
3372 .set(AMEDIAMETRICS_PROP_FLAGS, toString(flags).c_str())
3373 .set(AMEDIAMETRICS_PROP_PREFIX_HAPTIC AMEDIAMETRICS_PROP_CHANNELMASK,
3374 (int32_t)mHapticChannelMask)
3375 .set(AMEDIAMETRICS_PROP_PREFIX_HAPTIC AMEDIAMETRICS_PROP_CHANNELCOUNT,
3376 (int32_t)mHapticChannelCount)
3377 .set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_ENCODING,
3378 IAfThreadBase::formatToString(mHALFormat).c_str())
3379 .set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_FRAMECOUNT,
3380 (int32_t)mFrameCount) // sic - added HAL
3381 ;
3382 uint32_t latencyMs;
3383 if (mOutput->stream->getLatency(&latencyMs) == NO_ERROR) {
3384 item.set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_LATENCYMS, (double)latencyMs);
3385 }
3386 item.record();
3387 }
3388
updateMetadata_l()3389 ThreadBase::MetadataUpdate PlaybackThread::updateMetadata_l()
3390 {
3391 if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
3392 return {}; // nothing to do
3393 }
3394 StreamOutHalInterface::SourceMetadata metadata;
3395 static const bool stereo_spatialization_property =
3396 property_get_bool("ro.audio.stereo_spatialization_enabled", false);
3397 const bool stereo_spatialization_enabled =
3398 stereo_spatialization_property && com_android_media_audio_stereo_spatialization();
3399 if (stereo_spatialization_enabled) {
3400 std::map<audio_session_t, std::vector<playback_track_metadata_v7_t> >allSessionsMetadata;
3401 for (const sp<IAfTrack>& track : mActiveTracks) {
3402 std::vector<playback_track_metadata_v7_t>& sessionMetadata =
3403 allSessionsMetadata[track->sessionId()];
3404 auto backInserter = std::back_inserter(sessionMetadata);
3405 // No track is invalid as this is called after prepareTrack_l in the same
3406 // critical section
3407 track->copyMetadataTo(backInserter);
3408 }
3409 std::vector<playback_track_metadata_v7_t> spatializedTracksMetaData;
3410 for (const auto& [session, sessionTrackMetadata] : allSessionsMetadata) {
3411 metadata.tracks.insert(metadata.tracks.end(),
3412 sessionTrackMetadata.begin(), sessionTrackMetadata.end());
3413 if (auto chain = getEffectChain_l(session) ; chain != nullptr) {
3414 chain->sendMetadata_l(sessionTrackMetadata, {});
3415 }
3416 if ((hasAudioSession_l(session) & IAfThreadBase::SPATIALIZED_SESSION) != 0) {
3417 spatializedTracksMetaData.insert(spatializedTracksMetaData.end(),
3418 sessionTrackMetadata.begin(), sessionTrackMetadata.end());
3419 }
3420 }
3421 if (auto chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX); chain != nullptr) {
3422 chain->sendMetadata_l(metadata.tracks, {});
3423 }
3424 if (auto chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE); chain != nullptr) {
3425 chain->sendMetadata_l(metadata.tracks, spatializedTracksMetaData);
3426 }
3427 if (auto chain = getEffectChain_l(AUDIO_SESSION_DEVICE); chain != nullptr) {
3428 chain->sendMetadata_l(metadata.tracks, {});
3429 }
3430 } else {
3431 auto backInserter = std::back_inserter(metadata.tracks);
3432 for (const sp<IAfTrack>& track : mActiveTracks) {
3433 // No track is invalid as this is called after prepareTrack_l in the same
3434 // critical section
3435 track->copyMetadataTo(backInserter);
3436 }
3437 }
3438 sendMetadataToBackend_l(metadata);
3439 MetadataUpdate change;
3440 change.playbackMetadataUpdate = metadata.tracks;
3441 return change;
3442 }
3443
sendMetadataToBackend_l(const StreamOutHalInterface::SourceMetadata & metadata)3444 void PlaybackThread::sendMetadataToBackend_l(
3445 const StreamOutHalInterface::SourceMetadata& metadata)
3446 {
3447 mOutput->stream->updateSourceMetadata(metadata);
3448 };
3449
getRenderPosition(uint32_t * halFrames,uint32_t * dspFrames) const3450 status_t PlaybackThread::getRenderPosition(
3451 uint32_t* halFrames, uint32_t* dspFrames) const
3452 {
3453 if (halFrames == NULL || dspFrames == NULL) {
3454 return BAD_VALUE;
3455 }
3456 audio_utils::lock_guard _l(mutex());
3457 if (initCheck() != NO_ERROR) {
3458 return INVALID_OPERATION;
3459 }
3460 int64_t framesWritten = mBytesWritten / mFrameSize;
3461 *halFrames = framesWritten;
3462
3463 if (isSuspended()) {
3464 // return an estimation of rendered frames when the output is suspended
3465 size_t latencyFrames = (latency_l() * mSampleRate) / 1000;
3466 *dspFrames = (uint32_t)
3467 (framesWritten >= (int64_t)latencyFrames ? framesWritten - latencyFrames : 0);
3468 return NO_ERROR;
3469 } else {
3470 status_t status;
3471 uint64_t frames = 0;
3472 status = mOutput->getRenderPosition(&frames);
3473 *dspFrames = (uint32_t)frames;
3474 return status;
3475 }
3476 }
3477
getStrategyForSession_l(audio_session_t sessionId) const3478 product_strategy_t PlaybackThread::getStrategyForSession_l(audio_session_t sessionId) const
3479 {
3480 // session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
3481 // it is moved to correct output by audio policy manager when A2DP is connected or disconnected
3482 if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
3483 return getStrategyForStream(AUDIO_STREAM_MUSIC);
3484 }
3485 for (size_t i = 0; i < mTracks.size(); i++) {
3486 sp<IAfTrack> track = mTracks[i];
3487 if (sessionId == track->sessionId() && !track->isInvalid()) {
3488 return getStrategyForStream(track->streamType());
3489 }
3490 }
3491 return getStrategyForStream(AUDIO_STREAM_MUSIC);
3492 }
3493
3494
getOutput() const3495 AudioStreamOut* PlaybackThread::getOutput() const
3496 {
3497 audio_utils::lock_guard _l(mutex());
3498 return mOutput;
3499 }
3500
clearOutput()3501 AudioStreamOut* PlaybackThread::clearOutput()
3502 {
3503 audio_utils::lock_guard _l(mutex());
3504 AudioStreamOut *output = mOutput;
3505 mOutput = NULL;
3506 // FIXME FastMixer might also have a raw ptr to mOutputSink;
3507 // must push a NULL and wait for ack
3508 mOutputSink.clear();
3509 mPipeSink.clear();
3510 mNormalSink.clear();
3511 return output;
3512 }
3513
3514 // this method must always be called either with ThreadBase mutex() held or inside the thread loop
stream() const3515 sp<StreamHalInterface> PlaybackThread::stream() const
3516 {
3517 if (mOutput == NULL) {
3518 return NULL;
3519 }
3520 return mOutput->stream;
3521 }
3522
activeSleepTimeUs() const3523 uint32_t PlaybackThread::activeSleepTimeUs() const
3524 {
3525 return (uint32_t)((uint32_t)((mNormalFrameCount * 1000) / mSampleRate) * 1000);
3526 }
3527
setSyncEvent(const sp<SyncEvent> & event)3528 status_t PlaybackThread::setSyncEvent(const sp<SyncEvent>& event)
3529 {
3530 if (!isValidSyncEvent(event)) {
3531 return BAD_VALUE;
3532 }
3533
3534 audio_utils::lock_guard _l(mutex());
3535
3536 for (size_t i = 0; i < mTracks.size(); ++i) {
3537 sp<IAfTrack> track = mTracks[i];
3538 if (event->triggerSession() == track->sessionId()) {
3539 (void) track->setSyncEvent(event);
3540 return NO_ERROR;
3541 }
3542 }
3543
3544 return NAME_NOT_FOUND;
3545 }
3546
isValidSyncEvent(const sp<SyncEvent> & event) const3547 bool PlaybackThread::isValidSyncEvent(const sp<SyncEvent>& event) const
3548 {
3549 return event->type() == AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE;
3550 }
3551
threadLoop_removeTracks(const Vector<sp<IAfTrack>> & tracksToRemove)3552 void PlaybackThread::threadLoop_removeTracks(
3553 [[maybe_unused]] const Vector<sp<IAfTrack>>& tracksToRemove)
3554 {
3555 // Miscellaneous track cleanup when removed from the active list,
3556 // called without Thread lock but synchronized with threadLoop processing.
3557 #ifdef ADD_BATTERY_DATA
3558 for (const auto& track : tracksToRemove) {
3559 if (track->isExternalTrack()) {
3560 // to track the speaker usage
3561 addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
3562 }
3563 }
3564 #endif
3565 }
3566
checkSilentMode_l()3567 void PlaybackThread::checkSilentMode_l()
3568 {
3569 if (property_get_bool("ro.audio.silent", false)) {
3570 ALOGW("ro.audio.silent is now ignored");
3571 }
3572 }
3573
3574 // shared by MIXER and DIRECT, overridden by DUPLICATING
threadLoop_write()3575 ssize_t PlaybackThread::threadLoop_write()
3576 {
3577 LOG_HIST_TS();
3578 mInWrite = true;
3579 ssize_t bytesWritten;
3580 const size_t offset = mCurrentWriteLength - mBytesRemaining;
3581
3582 // If an NBAIO sink is present, use it to write the normal mixer's submix
3583 if (mNormalSink != 0) {
3584
3585 const size_t count = mBytesRemaining / mFrameSize;
3586
3587 ATRACE_BEGIN("write");
3588 // update the setpoint when AudioFlinger::mScreenState changes
3589 const uint32_t screenState = mAfThreadCallback->getScreenState();
3590 if (screenState != mScreenState) {
3591 mScreenState = screenState;
3592 MonoPipe *pipe = (MonoPipe *)mPipeSink.get();
3593 if (pipe != NULL) {
3594 pipe->setAvgFrames((mScreenState & 1) ?
3595 (pipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
3596 }
3597 }
3598 ssize_t framesWritten = mNormalSink->write((char *)mSinkBuffer + offset, count);
3599 ATRACE_END();
3600
3601 if (framesWritten > 0) {
3602 bytesWritten = framesWritten * mFrameSize;
3603
3604 #ifdef TEE_SINK
3605 mTee.write((char *)mSinkBuffer + offset, framesWritten);
3606 #endif
3607 } else {
3608 bytesWritten = framesWritten;
3609 }
3610 // otherwise use the HAL / AudioStreamOut directly
3611 } else {
3612 // Direct output and offload threads
3613
3614 if (mUseAsyncWrite) {
3615 ALOGW_IF(mWriteAckSequence & 1, "threadLoop_write(): out of sequence write request");
3616 mWriteAckSequence += 2;
3617 mWriteAckSequence |= 1;
3618 ALOG_ASSERT(mCallbackThread != 0);
3619 mCallbackThread->setWriteBlocked(mWriteAckSequence);
3620 }
3621 ATRACE_BEGIN("write");
3622 // FIXME We should have an implementation of timestamps for direct output threads.
3623 // They are used e.g for multichannel PCM playback over HDMI.
3624 bytesWritten = mOutput->write((char *)mSinkBuffer + offset, mBytesRemaining);
3625 ATRACE_END();
3626
3627 if (mUseAsyncWrite &&
3628 ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) {
3629 // do not wait for async callback in case of error of full write
3630 mWriteAckSequence &= ~1;
3631 ALOG_ASSERT(mCallbackThread != 0);
3632 mCallbackThread->setWriteBlocked(mWriteAckSequence);
3633 }
3634 }
3635
3636 mNumWrites++;
3637 mInWrite = false;
3638 if (mStandby) {
3639 mThreadMetrics.logBeginInterval();
3640 mThreadSnapshot.onBegin();
3641 mStandby = false;
3642 }
3643 return bytesWritten;
3644 }
3645
3646 // startMelComputation_l() must be called with AudioFlinger::mutex() held
startMelComputation_l(const sp<audio_utils::MelProcessor> & processor)3647 void PlaybackThread::startMelComputation_l(
3648 const sp<audio_utils::MelProcessor>& processor)
3649 {
3650 auto outputSink = static_cast<AudioStreamOutSink*>(mOutputSink.get());
3651 if (outputSink != nullptr) {
3652 outputSink->startMelComputation(processor);
3653 }
3654 }
3655
3656 // stopMelComputation_l() must be called with AudioFlinger::mutex() held
stopMelComputation_l()3657 void PlaybackThread::stopMelComputation_l()
3658 {
3659 auto outputSink = static_cast<AudioStreamOutSink*>(mOutputSink.get());
3660 if (outputSink != nullptr) {
3661 outputSink->stopMelComputation();
3662 }
3663 }
3664
threadLoop_drain()3665 void PlaybackThread::threadLoop_drain()
3666 {
3667 bool supportsDrain = false;
3668 if (mOutput->stream->supportsDrain(&supportsDrain) == OK && supportsDrain) {
3669 ALOGV("draining %s", (mMixerStatus == MIXER_DRAIN_TRACK) ? "early" : "full");
3670 if (mUseAsyncWrite) {
3671 ALOGW_IF(mDrainSequence & 1, "threadLoop_drain(): out of sequence drain request");
3672 mDrainSequence |= 1;
3673 ALOG_ASSERT(mCallbackThread != 0);
3674 mCallbackThread->setDraining(mDrainSequence);
3675 }
3676 status_t result = mOutput->stream->drain(mMixerStatus == MIXER_DRAIN_TRACK);
3677 ALOGE_IF(result != OK, "Error when draining stream: %d", result);
3678 }
3679 }
3680
threadLoop_exit()3681 void PlaybackThread::threadLoop_exit()
3682 {
3683 {
3684 audio_utils::lock_guard _l(mutex());
3685 for (size_t i = 0; i < mTracks.size(); i++) {
3686 sp<IAfTrack> track = mTracks[i];
3687 track->invalidate();
3688 }
3689 // Clear ActiveTracks to update BatteryNotifier in case active tracks remain.
3690 // After we exit there are no more track changes sent to BatteryNotifier
3691 // because that requires an active threadLoop.
3692 // TODO: should we decActiveTrackCnt() of the cleared track effect chain?
3693 mActiveTracks.clear();
3694 }
3695 }
3696
3697 /*
3698 The derived values that are cached:
3699 - mSinkBufferSize from frame count * frame size
3700 - mActiveSleepTimeUs from activeSleepTimeUs()
3701 - mIdleSleepTimeUs from idleSleepTimeUs()
3702 - mStandbyDelayNs from mActiveSleepTimeUs (DIRECT only) or forced to at least
3703 kDefaultStandbyTimeInNsecs when connected to an A2DP device.
3704 - maxPeriod from frame count and sample rate (MIXER only)
3705
3706 The parameters that affect these derived values are:
3707 - frame count
3708 - frame size
3709 - sample rate
3710 - device type: A2DP or not
3711 - device latency
3712 - format: PCM or not
3713 - active sleep time
3714 - idle sleep time
3715 */
3716
cacheParameters_l()3717 void PlaybackThread::cacheParameters_l()
3718 {
3719 mSinkBufferSize = mNormalFrameCount * mFrameSize;
3720 mActiveSleepTimeUs = activeSleepTimeUs();
3721 mIdleSleepTimeUs = idleSleepTimeUs();
3722
3723 mStandbyDelayNs = getStandbyTimeInNanos();
3724
3725 // make sure standby delay is not too short when connected to an A2DP sink to avoid
3726 // truncating audio when going to standby.
3727 if (!Intersection(outDeviceTypes_l(), getAudioDeviceOutAllA2dpSet()).empty()) {
3728 if (mStandbyDelayNs < kDefaultStandbyTimeInNsecs) {
3729 mStandbyDelayNs = kDefaultStandbyTimeInNsecs;
3730 }
3731 }
3732 }
3733
invalidateTracks_l(audio_stream_type_t streamType)3734 bool PlaybackThread::invalidateTracks_l(audio_stream_type_t streamType)
3735 {
3736 ALOGV("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %zu",
3737 this, streamType, mTracks.size());
3738 bool trackMatch = false;
3739 size_t size = mTracks.size();
3740 for (size_t i = 0; i < size; i++) {
3741 sp<IAfTrack> t = mTracks[i];
3742 if (t->streamType() == streamType && t->isExternalTrack()) {
3743 t->invalidate();
3744 trackMatch = true;
3745 }
3746 }
3747 return trackMatch;
3748 }
3749
invalidateTracks(audio_stream_type_t streamType)3750 void PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
3751 {
3752 audio_utils::lock_guard _l(mutex());
3753 invalidateTracks_l(streamType);
3754 }
3755
invalidateTracks(std::set<audio_port_handle_t> & portIds)3756 void PlaybackThread::invalidateTracks(std::set<audio_port_handle_t>& portIds) {
3757 audio_utils::lock_guard _l(mutex());
3758 invalidateTracks_l(portIds);
3759 }
3760
invalidateTracks_l(std::set<audio_port_handle_t> & portIds)3761 bool PlaybackThread::invalidateTracks_l(std::set<audio_port_handle_t>& portIds) {
3762 bool trackMatch = false;
3763 const size_t size = mTracks.size();
3764 for (size_t i = 0; i < size; i++) {
3765 sp<IAfTrack> t = mTracks[i];
3766 if (t->isExternalTrack() && portIds.find(t->portId()) != portIds.end()) {
3767 t->invalidate();
3768 portIds.erase(t->portId());
3769 trackMatch = true;
3770 }
3771 if (portIds.empty()) {
3772 break;
3773 }
3774 }
3775 return trackMatch;
3776 }
3777
3778 // getTrackById_l must be called with holding thread lock
getTrackById_l(audio_port_handle_t trackPortId)3779 IAfTrack* PlaybackThread::getTrackById_l(
3780 audio_port_handle_t trackPortId) {
3781 for (size_t i = 0; i < mTracks.size(); i++) {
3782 if (mTracks[i]->portId() == trackPortId) {
3783 return mTracks[i].get();
3784 }
3785 }
3786 return nullptr;
3787 }
3788
addEffectChain_l(const sp<IAfEffectChain> & chain)3789 status_t PlaybackThread::addEffectChain_l(const sp<IAfEffectChain>& chain)
3790 {
3791 audio_session_t session = chain->sessionId();
3792 sp<EffectBufferHalInterface> halInBuffer, halOutBuffer;
3793 float *buffer = nullptr; // only used for non global sessions
3794
3795 if (mType == SPATIALIZER) {
3796 if (!audio_is_global_session(session)) {
3797 // player sessions on a spatializer output will use a dedicated input buffer and
3798 // will either output multi channel to mEffectBuffer if the track is spatilaized
3799 // or stereo to mPostSpatializerBuffer if not spatialized.
3800 uint32_t channelMask;
3801 bool isSessionSpatialized =
3802 (hasAudioSession_l(session) & ThreadBase::SPATIALIZED_SESSION) != 0;
3803 if (isSessionSpatialized) {
3804 channelMask = mMixerChannelMask;
3805 } else {
3806 channelMask = mChannelMask;
3807 }
3808 size_t numSamples = mNormalFrameCount
3809 * (audio_channel_count_from_out_mask(channelMask) + mHapticChannelCount);
3810 status_t result = mAfThreadCallback->getEffectsFactoryHal()->allocateBuffer(
3811 numSamples * sizeof(float),
3812 &halInBuffer);
3813 if (result != OK) return result;
3814
3815 result = mAfThreadCallback->getEffectsFactoryHal()->mirrorBuffer(
3816 isSessionSpatialized ? mEffectBuffer : mPostSpatializerBuffer,
3817 isSessionSpatialized ? mEffectBufferSize : mPostSpatializerBufferSize,
3818 &halOutBuffer);
3819 if (result != OK) return result;
3820
3821 buffer = halInBuffer ? halInBuffer->audioBuffer()->f32 : buffer;
3822
3823 ALOGV("addEffectChain_l() creating new input buffer %p session %d",
3824 buffer, session);
3825 } else {
3826 status_t result = INVALID_OPERATION;
3827 // Buffer configuration for global sessions on a SPATIALIZER thread:
3828 // - AUDIO_SESSION_OUTPUT_MIX session uses the mEffectBuffer as input and output buffer
3829 // - AUDIO_SESSION_OUTPUT_STAGE session uses the mEffectBuffer as input buffer and
3830 // mPostSpatializerBuffer as output buffer
3831 // - AUDIO_SESSION_DEVICE session uses the mPostSpatializerBuffer as input and output
3832 // buffer
3833 if (session == AUDIO_SESSION_OUTPUT_MIX || session == AUDIO_SESSION_OUTPUT_STAGE) {
3834 result = mAfThreadCallback->getEffectsFactoryHal()->mirrorBuffer(
3835 mEffectBuffer, mEffectBufferSize, &halInBuffer);
3836 if (result != OK) return result;
3837
3838 if (session == AUDIO_SESSION_OUTPUT_MIX) {
3839 halOutBuffer = halInBuffer;
3840 }
3841 }
3842
3843 if (session == AUDIO_SESSION_OUTPUT_STAGE || session == AUDIO_SESSION_DEVICE) {
3844 result = mAfThreadCallback->getEffectsFactoryHal()->mirrorBuffer(
3845 mPostSpatializerBuffer, mPostSpatializerBufferSize, &halOutBuffer);
3846 if (result != OK) return result;
3847
3848 if (session == AUDIO_SESSION_DEVICE) {
3849 halInBuffer = halOutBuffer;
3850 }
3851 }
3852 }
3853 } else {
3854 status_t result = mAfThreadCallback->getEffectsFactoryHal()->mirrorBuffer(
3855 mEffectBufferEnabled ? mEffectBuffer : mSinkBuffer,
3856 mEffectBufferEnabled ? mEffectBufferSize : mSinkBufferSize,
3857 &halInBuffer);
3858 if (result != OK) return result;
3859 halOutBuffer = halInBuffer;
3860 ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
3861 if (!audio_is_global_session(session)) {
3862 buffer = halInBuffer ? reinterpret_cast<float*>(halInBuffer->externalData())
3863 : buffer;
3864 // Only one effect chain can be present in direct output thread and it uses
3865 // the sink buffer as input
3866 if (mType != DIRECT) {
3867 size_t numSamples = mNormalFrameCount
3868 * (audio_channel_count_from_out_mask(mMixerChannelMask)
3869 + mHapticChannelCount);
3870 const status_t allocateStatus =
3871 mAfThreadCallback->getEffectsFactoryHal()->allocateBuffer(
3872 numSamples * sizeof(float),
3873 &halInBuffer);
3874 if (allocateStatus != OK) return allocateStatus;
3875
3876 buffer = halInBuffer ? halInBuffer->audioBuffer()->f32 : buffer;
3877 ALOGV("addEffectChain_l() creating new input buffer %p session %d",
3878 buffer, session);
3879 }
3880 }
3881 }
3882
3883 if (!audio_is_global_session(session)) {
3884 // Attach all tracks with same session ID to this chain.
3885 for (size_t i = 0; i < mTracks.size(); ++i) {
3886 sp<IAfTrack> track = mTracks[i];
3887 if (session == track->sessionId()) {
3888 ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p",
3889 track.get(), buffer);
3890 track->setMainBuffer(buffer);
3891 chain->incTrackCnt();
3892 }
3893 }
3894
3895 // indicate all active tracks in the chain
3896 for (const sp<IAfTrack>& track : mActiveTracks) {
3897 if (session == track->sessionId()) {
3898 ALOGV("addEffectChain_l() activating track %p on session %d",
3899 track.get(), session);
3900 chain->incActiveTrackCnt();
3901 }
3902 }
3903 }
3904
3905 chain->setThread(this);
3906 chain->setInBuffer(halInBuffer);
3907 chain->setOutBuffer(halOutBuffer);
3908 // Effect chain for session AUDIO_SESSION_DEVICE is inserted at end of effect
3909 // chains list in order to be processed last as it contains output device effects.
3910 // Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted just before to apply post
3911 // processing effects specific to an output stream before effects applied to all streams
3912 // routed to a given device.
3913 // Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before
3914 // session AUDIO_SESSION_OUTPUT_STAGE to be processed
3915 // after track specific effects and before output stage.
3916 // It is therefore mandatory that AUDIO_SESSION_OUTPUT_MIX == 0 and
3917 // that AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX.
3918 // Effect chain for other sessions are inserted at beginning of effect
3919 // chains list to be processed before output mix effects. Relative order between other
3920 // sessions is not important.
3921 static_assert(AUDIO_SESSION_OUTPUT_MIX == 0 &&
3922 AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX &&
3923 AUDIO_SESSION_DEVICE < AUDIO_SESSION_OUTPUT_STAGE,
3924 "audio_session_t constants misdefined");
3925 size_t size = mEffectChains.size();
3926 size_t i = 0;
3927 for (i = 0; i < size; i++) {
3928 if (mEffectChains[i]->sessionId() < session) {
3929 break;
3930 }
3931 }
3932 mEffectChains.insertAt(chain, i);
3933 checkSuspendOnAddEffectChain_l(chain);
3934
3935 return NO_ERROR;
3936 }
3937
removeEffectChain_l(const sp<IAfEffectChain> & chain)3938 size_t PlaybackThread::removeEffectChain_l(const sp<IAfEffectChain>& chain)
3939 {
3940 audio_session_t session = chain->sessionId();
3941
3942 ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session);
3943
3944 for (size_t i = 0; i < mEffectChains.size(); i++) {
3945 if (chain == mEffectChains[i]) {
3946 mEffectChains.removeAt(i);
3947 // detach all active tracks from the chain
3948 for (const sp<IAfTrack>& track : mActiveTracks) {
3949 if (session == track->sessionId()) {
3950 ALOGV("removeEffectChain_l(): stopping track on chain %p for session Id: %d",
3951 chain.get(), session);
3952 chain->decActiveTrackCnt();
3953 }
3954 }
3955
3956 // detach all tracks with same session ID from this chain
3957 for (size_t j = 0; j < mTracks.size(); ++j) {
3958 sp<IAfTrack> track = mTracks[j];
3959 if (session == track->sessionId()) {
3960 track->setMainBuffer(reinterpret_cast<float*>(mSinkBuffer));
3961 chain->decTrackCnt();
3962 }
3963 }
3964 break;
3965 }
3966 }
3967 return mEffectChains.size();
3968 }
3969
attachAuxEffect(const sp<IAfTrack> & track,int EffectId)3970 status_t PlaybackThread::attachAuxEffect(
3971 const sp<IAfTrack>& track, int EffectId)
3972 {
3973 audio_utils::lock_guard _l(mutex());
3974 return attachAuxEffect_l(track, EffectId);
3975 }
3976
attachAuxEffect_l(const sp<IAfTrack> & track,int EffectId)3977 status_t PlaybackThread::attachAuxEffect_l(
3978 const sp<IAfTrack>& track, int EffectId)
3979 {
3980 status_t status = NO_ERROR;
3981
3982 if (EffectId == 0) {
3983 track->setAuxBuffer(0, NULL);
3984 } else {
3985 // Auxiliary effects are always in audio session AUDIO_SESSION_OUTPUT_MIX
3986 sp<IAfEffectModule> effect = getEffect_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
3987 if (effect != 0) {
3988 if ((effect->desc().flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
3989 track->setAuxBuffer(EffectId, (int32_t *)effect->inBuffer());
3990 } else {
3991 status = INVALID_OPERATION;
3992 }
3993 } else {
3994 status = BAD_VALUE;
3995 }
3996 }
3997 return status;
3998 }
3999
detachAuxEffect_l(int effectId)4000 void PlaybackThread::detachAuxEffect_l(int effectId)
4001 {
4002 for (size_t i = 0; i < mTracks.size(); ++i) {
4003 sp<IAfTrack> track = mTracks[i];
4004 if (track->auxEffectId() == effectId) {
4005 attachAuxEffect_l(track, 0);
4006 }
4007 }
4008 }
4009
threadLoop()4010 bool PlaybackThread::threadLoop()
4011 NO_THREAD_SAFETY_ANALYSIS // manual locking of AudioFlinger
4012 {
4013 aflog::setThreadWriter(mNBLogWriter.get());
4014
4015 if (mType == SPATIALIZER) {
4016 const pid_t tid = getTid();
4017 if (tid == -1) { // odd: we are here, we must be a running thread.
4018 ALOGW("%s: Cannot update Spatializer mixer thread priority, no tid", __func__);
4019 } else {
4020 const int priorityBoost = requestSpatializerPriority(getpid(), tid);
4021 if (priorityBoost > 0) {
4022 stream()->setHalThreadPriority(priorityBoost);
4023 }
4024 }
4025 } else if (property_get_bool("ro.boot.container", false /* default_value */)) {
4026 // In ARC experiments (b/73091832), the latency under using CFS scheduler with any priority
4027 // is not enough for PlaybackThread to process audio data in time. We request the lowest
4028 // real-time priority, SCHED_FIFO=1, for PlaybackThread in ARC. ro.boot.container is true
4029 // only on ARC.
4030 const pid_t tid = getTid();
4031 if (tid == -1) {
4032 ALOGW("%s: Cannot update PlaybackThread priority for ARC, no tid", __func__);
4033 } else {
4034 const status_t status = requestPriority(getpid(),
4035 tid,
4036 kPriorityPlaybackThreadArc,
4037 false /* isForApp */,
4038 true /* asynchronous */);
4039 if (status != OK) {
4040 ALOGW("%s: Cannot update PlaybackThread priority for ARC, status %d", __func__,
4041 status);
4042 } else {
4043 stream()->setHalThreadPriority(kPriorityPlaybackThreadArc);
4044 }
4045 }
4046 }
4047
4048 Vector<sp<IAfTrack>> tracksToRemove;
4049
4050 mStandbyTimeNs = systemTime();
4051 int64_t lastLoopCountWritten = -2; // never matches "previous" loop, when loopCount = 0.
4052
4053 // MIXER
4054 nsecs_t lastWarning = 0;
4055
4056 // DUPLICATING
4057 // FIXME could this be made local to while loop?
4058 writeFrames = 0;
4059
4060 {
4061 audio_utils::lock_guard l(mutex());
4062
4063 cacheParameters_l();
4064 checkSilentMode_l();
4065 }
4066
4067 mSleepTimeUs = mIdleSleepTimeUs;
4068
4069 if (mType == MIXER || mType == SPATIALIZER) {
4070 sleepTimeShift = 0;
4071 }
4072
4073 CpuStats cpuStats;
4074 const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid()));
4075
4076 acquireWakeLock();
4077
4078 // mNBLogWriter logging APIs can only be called by a single thread, typically the
4079 // thread associated with this PlaybackThread.
4080 // If you want to share the mNBLogWriter with other threads (for example, binder threads)
4081 // then all such threads must agree to hold a common mutex before logging.
4082 // So if you need to log when mutex is unlocked, set logString to a non-NULL string,
4083 // and then that string will be logged at the next convenient opportunity.
4084 // See reference to logString below.
4085 const char *logString = NULL;
4086
4087 // Estimated time for next buffer to be written to hal. This is used only on
4088 // suspended mode (for now) to help schedule the wait time until next iteration.
4089 nsecs_t timeLoopNextNs = 0;
4090
4091 audio_patch_handle_t lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
4092
4093 sendCheckOutputStageEffectsEvent();
4094
4095 // loopCount is used for statistics and diagnostics.
4096 for (int64_t loopCount = 0; !exitPending(); ++loopCount)
4097 {
4098 // Log merge requests are performed during AudioFlinger binder transactions, but
4099 // that does not cover audio playback. It's requested here for that reason.
4100 mAfThreadCallback->requestLogMerge();
4101
4102 cpuStats.sample(myName);
4103
4104 Vector<sp<IAfEffectChain>> effectChains;
4105 audio_session_t activeHapticSessionId = AUDIO_SESSION_NONE;
4106 bool isHapticSessionSpatialized = false;
4107 std::vector<sp<IAfTrack>> activeTracks;
4108
4109 // If the device is AUDIO_DEVICE_OUT_BUS, check for downstream latency.
4110 //
4111 // Note: we access outDeviceTypes() outside of mutex().
4112 if (isMsdDevice() && outDeviceTypes_l().count(AUDIO_DEVICE_OUT_BUS) != 0) {
4113 // Here, we try for the AF lock, but do not block on it as the latency
4114 // is more informational.
4115 if (mAfThreadCallback->mutex().try_lock()) {
4116 std::vector<SoftwarePatch> swPatches;
4117 double latencyMs = 0.; // not required; initialized for clang-tidy
4118 status_t status = INVALID_OPERATION;
4119 audio_patch_handle_t downstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
4120 if (mAfThreadCallback->getPatchPanel()->getDownstreamSoftwarePatches(
4121 id(), &swPatches) == OK
4122 && swPatches.size() > 0) {
4123 status = swPatches[0].getLatencyMs_l(&latencyMs);
4124 downstreamPatchHandle = swPatches[0].getPatchHandle();
4125 }
4126 if (downstreamPatchHandle != lastDownstreamPatchHandle) {
4127 mDownstreamLatencyStatMs.reset();
4128 lastDownstreamPatchHandle = downstreamPatchHandle;
4129 }
4130 if (status == OK) {
4131 // verify downstream latency (we assume a max reasonable
4132 // latency of 5 seconds).
4133 const double minLatency = 0., maxLatency = 5000.;
4134 if (latencyMs >= minLatency && latencyMs <= maxLatency) {
4135 ALOGVV("new downstream latency %lf ms", latencyMs);
4136 } else {
4137 ALOGD("out of range downstream latency %lf ms", latencyMs);
4138 latencyMs = std::clamp(latencyMs, minLatency, maxLatency);
4139 }
4140 mDownstreamLatencyStatMs.add(latencyMs);
4141 }
4142 mAfThreadCallback->mutex().unlock();
4143 }
4144 } else {
4145 if (lastDownstreamPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
4146 // our device is no longer AUDIO_DEVICE_OUT_BUS, reset patch handle and stats.
4147 mDownstreamLatencyStatMs.reset();
4148 lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
4149 }
4150 }
4151
4152 if (mCheckOutputStageEffects.exchange(false)) {
4153 checkOutputStageEffects();
4154 }
4155
4156 MetadataUpdate metadataUpdate;
4157 { // scope for mutex()
4158
4159 audio_utils::unique_lock _l(mutex());
4160
4161 processConfigEvents_l();
4162 if (mCheckOutputStageEffects.load()) {
4163 continue;
4164 }
4165
4166 // See comment at declaration of logString for why this is done under mutex()
4167 if (logString != NULL) {
4168 mNBLogWriter->logTimestamp();
4169 mNBLogWriter->log(logString);
4170 logString = NULL;
4171 }
4172
4173 collectTimestamps_l();
4174
4175 saveOutputTracks();
4176 if (mSignalPending) {
4177 // A signal was raised while we were unlocked
4178 mSignalPending = false;
4179 } else if (waitingAsyncCallback_l()) {
4180 if (exitPending()) {
4181 break;
4182 }
4183 bool released = false;
4184 if (!keepWakeLock()) {
4185 releaseWakeLock_l();
4186 released = true;
4187 }
4188
4189 const int64_t waitNs = computeWaitTimeNs_l();
4190 ALOGV("wait async completion (wait time: %lld)", (long long)waitNs);
4191 std::cv_status cvstatus =
4192 mWaitWorkCV.wait_for(_l, std::chrono::nanoseconds(waitNs));
4193 if (cvstatus == std::cv_status::timeout) {
4194 mSignalPending = true; // if timeout recheck everything
4195 }
4196 ALOGV("async completion/wake");
4197 if (released) {
4198 acquireWakeLock_l();
4199 }
4200 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
4201 mSleepTimeUs = 0;
4202
4203 continue;
4204 }
4205 if ((mActiveTracks.isEmpty() && systemTime() > mStandbyTimeNs) ||
4206 isSuspended()) {
4207 // put audio hardware into standby after short delay
4208 if (shouldStandby_l()) {
4209
4210 threadLoop_standby();
4211
4212 // This is where we go into standby
4213 if (!mStandby) {
4214 LOG_AUDIO_STATE();
4215 mThreadMetrics.logEndInterval();
4216 mThreadSnapshot.onEnd();
4217 setStandby_l();
4218 }
4219 sendStatistics(false /* force */);
4220 }
4221
4222 if (mActiveTracks.isEmpty() && mConfigEvents.isEmpty()) {
4223 // we're about to wait, flush the binder command buffer
4224 IPCThreadState::self()->flushCommands();
4225
4226 clearOutputTracks();
4227
4228 if (exitPending()) {
4229 break;
4230 }
4231
4232 releaseWakeLock_l();
4233 // wait until we have something to do...
4234 ALOGV("%s going to sleep", myName.c_str());
4235 mWaitWorkCV.wait(_l);
4236 ALOGV("%s waking up", myName.c_str());
4237 acquireWakeLock_l();
4238
4239 mMixerStatus = MIXER_IDLE;
4240 mMixerStatusIgnoringFastTracks = MIXER_IDLE;
4241 mBytesWritten = 0;
4242 mBytesRemaining = 0;
4243 checkSilentMode_l();
4244
4245 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
4246 mSleepTimeUs = mIdleSleepTimeUs;
4247 if (mType == MIXER || mType == SPATIALIZER) {
4248 sleepTimeShift = 0;
4249 }
4250
4251 continue;
4252 }
4253 }
4254 // mMixerStatusIgnoringFastTracks is also updated internally
4255 mMixerStatus = prepareTracks_l(&tracksToRemove);
4256
4257 mActiveTracks.updatePowerState_l(this);
4258
4259 metadataUpdate = updateMetadata_l();
4260
4261 // Acquire a local copy of active tracks with lock (release w/o lock).
4262 //
4263 // Control methods on the track acquire the ThreadBase lock (e.g. start()
4264 // stop(), pause(), etc.), but the threadLoop is entitled to call audio
4265 // data / buffer methods on tracks from activeTracks without the ThreadBase lock.
4266 activeTracks.insert(activeTracks.end(), mActiveTracks.begin(), mActiveTracks.end());
4267
4268 setHalLatencyMode_l();
4269
4270 // updateTeePatches_l will acquire the ThreadBase_Mutex of other threads,
4271 // so this is done before we lock our effect chains.
4272 for (const auto& track : mActiveTracks) {
4273 track->updateTeePatches_l();
4274 }
4275
4276 // check if traces have been enabled.
4277 bool atraceEnabled = ATRACE_ENABLED();
4278 if (atraceEnabled != mAtraceEnabled) [[unlikely]] {
4279 mAtraceEnabled = atraceEnabled;
4280 if (atraceEnabled) {
4281 const auto devices = patchSinksToString(&mPatch);
4282 for (const auto& track : activeTracks) {
4283 track->logRefreshInterval(devices);
4284 }
4285 }
4286 }
4287 // signal actual start of output stream when the render position reported by
4288 // the kernel starts moving.
4289 if (!mHalStarted && ((isSuspended() && (mBytesWritten != 0)) || (!mStandby
4290 && (mKernelPositionOnStandby
4291 != mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL])))) {
4292 mHalStarted = true;
4293 mWaitHalStartCV.notify_all();
4294 }
4295
4296 // prevent any changes in effect chain list and in each effect chain
4297 // during mixing and effect process as the audio buffers could be deleted
4298 // or modified if an effect is created or deleted
4299 lockEffectChains_l(effectChains);
4300
4301 // Determine which session to pick up haptic data.
4302 // This must be done under the same lock as prepareTracks_l().
4303 // The haptic data from the effect is at a higher priority than the one from track.
4304 // TODO: Write haptic data directly to sink buffer when mixing.
4305 if (mHapticChannelCount > 0) {
4306 for (const auto& track : mActiveTracks) {
4307 sp<IAfEffectChain> effectChain = getEffectChain_l(track->sessionId());
4308 if (effectChain != nullptr
4309 && effectChain->containsHapticGeneratingEffect_l()) {
4310 activeHapticSessionId = track->sessionId();
4311 isHapticSessionSpatialized =
4312 mType == SPATIALIZER && track->isSpatialized();
4313 break;
4314 }
4315 if (activeHapticSessionId == AUDIO_SESSION_NONE
4316 && track->getHapticPlaybackEnabled()) {
4317 activeHapticSessionId = track->sessionId();
4318 isHapticSessionSpatialized =
4319 mType == SPATIALIZER && track->isSpatialized();
4320 }
4321 }
4322 }
4323 } // mutex() scope ends
4324
4325 if (mBytesRemaining == 0) {
4326 mCurrentWriteLength = 0;
4327 if (mMixerStatus == MIXER_TRACKS_READY) {
4328 // threadLoop_mix() sets mCurrentWriteLength
4329 threadLoop_mix();
4330 } else if ((mMixerStatus != MIXER_DRAIN_TRACK)
4331 && (mMixerStatus != MIXER_DRAIN_ALL)) {
4332 // threadLoop_sleepTime sets mSleepTimeUs to 0 if data
4333 // must be written to HAL
4334 threadLoop_sleepTime();
4335 if (mSleepTimeUs == 0) {
4336 mCurrentWriteLength = mSinkBufferSize;
4337
4338 // Tally underrun frames as we are inserting 0s here.
4339 for (const auto& track : activeTracks) {
4340 if (track->fillingStatus() == IAfTrack::FS_ACTIVE
4341 && !track->isStopped()
4342 && !track->isPaused()
4343 && !track->isTerminated()) {
4344 ALOGV("%s: track(%d) %s underrun due to thread sleep of %zu frames",
4345 __func__, track->id(), track->getTrackStateAsString(),
4346 mNormalFrameCount);
4347 track->audioTrackServerProxy()->tallyUnderrunFrames(
4348 mNormalFrameCount);
4349 }
4350 }
4351 }
4352 }
4353 // Either threadLoop_mix() or threadLoop_sleepTime() should have set
4354 // mMixerBuffer with data if mMixerBufferValid is true and mSleepTimeUs == 0.
4355 // Merge mMixerBuffer data into mEffectBuffer (if any effects are valid)
4356 // or mSinkBuffer (if there are no effects and there is no data already copied to
4357 // mSinkBuffer).
4358 //
4359 // This is done pre-effects computation; if effects change to
4360 // support higher precision, this needs to move.
4361 //
4362 // mMixerBufferValid is only set true by MixerThread::prepareTracks_l().
4363 // TODO use mSleepTimeUs == 0 as an additional condition.
4364 uint32_t mixerChannelCount = mEffectBufferValid ?
4365 audio_channel_count_from_out_mask(mMixerChannelMask) : mChannelCount;
4366 if (mMixerBufferValid && (mEffectBufferValid || !mHasDataCopiedToSinkBuffer)) {
4367 void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
4368 audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
4369
4370 // Apply mono blending and balancing if the effect buffer is not valid. Otherwise,
4371 // do these processes after effects are applied.
4372 if (!mEffectBufferValid) {
4373 // mono blend occurs for mixer threads only (not direct or offloaded)
4374 // and is handled here if we're going directly to the sink.
4375 if (requireMonoBlend()) {
4376 mono_blend(mMixerBuffer, mMixerBufferFormat, mChannelCount,
4377 mNormalFrameCount, true /*limit*/);
4378 }
4379
4380 if (!hasFastMixer()) {
4381 // Balance must take effect after mono conversion.
4382 // We do it here if there is no FastMixer.
4383 // mBalance detects zero balance within the class for speed
4384 // (not needed here).
4385 mBalance.setBalance(mMasterBalance.load());
4386 mBalance.process((float *)mMixerBuffer, mNormalFrameCount);
4387 }
4388 }
4389
4390 memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
4391 mNormalFrameCount * (mixerChannelCount + mHapticChannelCount));
4392
4393 // If we're going directly to the sink and there are haptic channels,
4394 // we should adjust channels as the sample data is partially interleaved
4395 // in this case.
4396 if (!mEffectBufferValid && mHapticChannelCount > 0) {
4397 adjust_channels_non_destructive(buffer, mChannelCount, buffer,
4398 mChannelCount + mHapticChannelCount,
4399 audio_bytes_per_sample(format),
4400 audio_bytes_per_frame(mChannelCount, format) * mNormalFrameCount);
4401 }
4402 }
4403
4404 mBytesRemaining = mCurrentWriteLength;
4405 if (isSuspended()) {
4406 // Simulate write to HAL when suspended (e.g. BT SCO phone call).
4407 mSleepTimeUs = suspendSleepTimeUs(); // assumes full buffer.
4408 const size_t framesRemaining = mBytesRemaining / mFrameSize;
4409 mBytesWritten += mBytesRemaining;
4410 mFramesWritten += framesRemaining;
4411 mSuspendedFrames += framesRemaining; // to adjust kernel HAL position
4412 mBytesRemaining = 0;
4413 }
4414
4415 // only process effects if we're going to write
4416 if (mSleepTimeUs == 0 && mType != OFFLOAD) {
4417 for (size_t i = 0; i < effectChains.size(); i ++) {
4418 effectChains[i]->process_l();
4419 // TODO: Write haptic data directly to sink buffer when mixing.
4420 if (activeHapticSessionId != AUDIO_SESSION_NONE
4421 && activeHapticSessionId == effectChains[i]->sessionId()) {
4422 // Haptic data is active in this case, copy it directly from
4423 // in buffer to out buffer.
4424 uint32_t hapticSessionChannelCount = mEffectBufferValid ?
4425 audio_channel_count_from_out_mask(mMixerChannelMask) :
4426 mChannelCount;
4427 if (mType == SPATIALIZER && !isHapticSessionSpatialized) {
4428 hapticSessionChannelCount = mChannelCount;
4429 }
4430
4431 const size_t audioBufferSize = mNormalFrameCount
4432 * audio_bytes_per_frame(hapticSessionChannelCount,
4433 AUDIO_FORMAT_PCM_FLOAT);
4434 memcpy_by_audio_format(
4435 (uint8_t*)effectChains[i]->outBuffer() + audioBufferSize,
4436 AUDIO_FORMAT_PCM_FLOAT,
4437 (const uint8_t*)effectChains[i]->inBuffer() + audioBufferSize,
4438 AUDIO_FORMAT_PCM_FLOAT, mNormalFrameCount * mHapticChannelCount);
4439 }
4440 }
4441 }
4442 }
4443 // Process effect chains for offloaded thread even if no audio
4444 // was read from audio track: process only updates effect state
4445 // and thus does have to be synchronized with audio writes but may have
4446 // to be called while waiting for async write callback
4447 if (mType == OFFLOAD) {
4448 for (size_t i = 0; i < effectChains.size(); i ++) {
4449 effectChains[i]->process_l();
4450 }
4451 }
4452
4453 // Only if the Effects buffer is enabled and there is data in the
4454 // Effects buffer (buffer valid), we need to
4455 // copy into the sink buffer.
4456 // TODO use mSleepTimeUs == 0 as an additional condition.
4457 if (mEffectBufferValid && !mHasDataCopiedToSinkBuffer) {
4458 //ALOGV("writing effect buffer to sink buffer format %#x", mFormat);
4459 void *effectBuffer = (mType == SPATIALIZER) ? mPostSpatializerBuffer : mEffectBuffer;
4460 if (requireMonoBlend()) {
4461 mono_blend(effectBuffer, mEffectBufferFormat, mChannelCount, mNormalFrameCount,
4462 true /*limit*/);
4463 }
4464
4465 if (!hasFastMixer()) {
4466 // Balance must take effect after mono conversion.
4467 // We do it here if there is no FastMixer.
4468 // mBalance detects zero balance within the class for speed (not needed here).
4469 mBalance.setBalance(mMasterBalance.load());
4470 mBalance.process((float *)effectBuffer, mNormalFrameCount);
4471 }
4472
4473 // for SPATIALIZER thread, Move haptics channels from mEffectBuffer to
4474 // mPostSpatializerBuffer if the haptics track is spatialized.
4475 // Otherwise, the haptics channels are already in mPostSpatializerBuffer.
4476 // For other thread types, the haptics channels are already in mEffectBuffer.
4477 if (mType == SPATIALIZER && isHapticSessionSpatialized) {
4478 const size_t srcBufferSize = mNormalFrameCount *
4479 audio_bytes_per_frame(audio_channel_count_from_out_mask(mMixerChannelMask),
4480 mEffectBufferFormat);
4481 const size_t dstBufferSize = mNormalFrameCount
4482 * audio_bytes_per_frame(mChannelCount, mEffectBufferFormat);
4483
4484 memcpy_by_audio_format((uint8_t*)mPostSpatializerBuffer + dstBufferSize,
4485 mEffectBufferFormat,
4486 (uint8_t*)mEffectBuffer + srcBufferSize,
4487 mEffectBufferFormat,
4488 mNormalFrameCount * mHapticChannelCount);
4489 }
4490 const size_t framesToCopy = mNormalFrameCount * (mChannelCount + mHapticChannelCount);
4491 if (mFormat == AUDIO_FORMAT_PCM_FLOAT &&
4492 mEffectBufferFormat == AUDIO_FORMAT_PCM_FLOAT) {
4493 // Clamp PCM float values more than this distance from 0 to insulate
4494 // a HAL which doesn't handle NaN correctly.
4495 static constexpr float HAL_FLOAT_SAMPLE_LIMIT = 2.0f;
4496 memcpy_to_float_from_float_with_clamping(static_cast<float*>(mSinkBuffer),
4497 static_cast<const float*>(effectBuffer),
4498 framesToCopy, HAL_FLOAT_SAMPLE_LIMIT /* absMax */);
4499 } else {
4500 memcpy_by_audio_format(mSinkBuffer, mFormat,
4501 effectBuffer, mEffectBufferFormat, framesToCopy);
4502 }
4503 // The sample data is partially interleaved when haptic channels exist,
4504 // we need to adjust channels here.
4505 if (mHapticChannelCount > 0) {
4506 adjust_channels_non_destructive(mSinkBuffer, mChannelCount, mSinkBuffer,
4507 mChannelCount + mHapticChannelCount,
4508 audio_bytes_per_sample(mFormat),
4509 audio_bytes_per_frame(mChannelCount, mFormat) * mNormalFrameCount);
4510 }
4511 }
4512
4513 // enable changes in effect chain
4514 unlockEffectChains(effectChains);
4515
4516 if (!metadataUpdate.playbackMetadataUpdate.empty()) {
4517 mAfThreadCallback->getMelReporter()->updateMetadataForCsd(id(),
4518 metadataUpdate.playbackMetadataUpdate);
4519 }
4520
4521 if (!waitingAsyncCallback()) {
4522 // mSleepTimeUs == 0 means we must write to audio hardware
4523 if (mSleepTimeUs == 0) {
4524 ssize_t ret = 0;
4525 // writePeriodNs is updated >= 0 when ret > 0.
4526 int64_t writePeriodNs = -1;
4527 if (mBytesRemaining) {
4528 // FIXME rewrite to reduce number of system calls
4529 const int64_t lastIoBeginNs = systemTime();
4530 ret = threadLoop_write();
4531 const int64_t lastIoEndNs = systemTime();
4532 if (ret < 0) {
4533 mBytesRemaining = 0;
4534 } else if (ret > 0) {
4535 mBytesWritten += ret;
4536 mBytesRemaining -= ret;
4537 const int64_t frames = ret / mFrameSize;
4538 mFramesWritten += frames;
4539
4540 writePeriodNs = lastIoEndNs - mLastIoEndNs;
4541 // process information relating to write time.
4542 if (audio_has_proportional_frames(mFormat)) {
4543 // we are in a continuous mixing cycle
4544 if (mMixerStatus == MIXER_TRACKS_READY &&
4545 loopCount == lastLoopCountWritten + 1) {
4546
4547 const double jitterMs =
4548 TimestampVerifier<int64_t, int64_t>::computeJitterMs(
4549 {frames, writePeriodNs},
4550 {0, 0} /* lastTimestamp */, mSampleRate);
4551 const double processMs =
4552 (lastIoBeginNs - mLastIoEndNs) * 1e-6;
4553
4554 audio_utils::lock_guard _l(mutex());
4555 mIoJitterMs.add(jitterMs);
4556 mProcessTimeMs.add(processMs);
4557
4558 if (mPipeSink.get() != nullptr) {
4559 // Using the Monopipe availableToWrite, we estimate the current
4560 // buffer size.
4561 MonoPipe* monoPipe = static_cast<MonoPipe*>(mPipeSink.get());
4562 const ssize_t
4563 availableToWrite = mPipeSink->availableToWrite();
4564 const size_t pipeFrames = monoPipe->maxFrames();
4565 const size_t
4566 remainingFrames = pipeFrames - max(availableToWrite, 0);
4567 mMonopipePipeDepthStats.add(remainingFrames);
4568 }
4569 }
4570
4571 // write blocked detection
4572 const int64_t deltaWriteNs = lastIoEndNs - lastIoBeginNs;
4573 if ((mType == MIXER || mType == SPATIALIZER)
4574 && deltaWriteNs > maxPeriod) {
4575 mNumDelayedWrites++;
4576 if ((lastIoEndNs - lastWarning) > kWarningThrottleNs) {
4577 ATRACE_NAME("underrun");
4578 ALOGW("write blocked for %lld msecs, "
4579 "%d delayed writes, thread %d",
4580 (long long)deltaWriteNs / NANOS_PER_MILLISECOND,
4581 mNumDelayedWrites, mId);
4582 lastWarning = lastIoEndNs;
4583 }
4584 }
4585 }
4586 // update timing info.
4587 mLastIoBeginNs = lastIoBeginNs;
4588 mLastIoEndNs = lastIoEndNs;
4589 lastLoopCountWritten = loopCount;
4590 }
4591 } else if ((mMixerStatus == MIXER_DRAIN_TRACK) ||
4592 (mMixerStatus == MIXER_DRAIN_ALL)) {
4593 threadLoop_drain();
4594 }
4595 if ((mType == MIXER || mType == SPATIALIZER) && !mStandby) {
4596
4597 if (mThreadThrottle
4598 && mMixerStatus == MIXER_TRACKS_READY // we are mixing (active tracks)
4599 && writePeriodNs > 0) { // we have write period info
4600 // Limit MixerThread data processing to no more than twice the
4601 // expected processing rate.
4602 //
4603 // This helps prevent underruns with NuPlayer and other applications
4604 // which may set up buffers that are close to the minimum size, or use
4605 // deep buffers, and rely on a double-buffering sleep strategy to fill.
4606 //
4607 // The throttle smooths out sudden large data drains from the device,
4608 // e.g. when it comes out of standby, which often causes problems with
4609 // (1) mixer threads without a fast mixer (which has its own warm-up)
4610 // (2) minimum buffer sized tracks (even if the track is full,
4611 // the app won't fill fast enough to handle the sudden draw).
4612 //
4613 // Total time spent in last processing cycle equals time spent in
4614 // 1. threadLoop_write, as well as time spent in
4615 // 2. threadLoop_mix (significant for heavy mixing, especially
4616 // on low tier processors)
4617
4618 // it's OK if deltaMs is an overestimate.
4619
4620 const int32_t deltaMs = writePeriodNs / NANOS_PER_MILLISECOND;
4621
4622 const int32_t throttleMs = (int32_t)mHalfBufferMs - deltaMs;
4623 if ((signed)mHalfBufferMs >= throttleMs && throttleMs > 0) {
4624 mThreadMetrics.logThrottleMs((double)throttleMs);
4625
4626 usleep(throttleMs * 1000);
4627 // notify of throttle start on verbose log
4628 ALOGV_IF(mThreadThrottleEndMs == mThreadThrottleTimeMs,
4629 "mixer(%p) throttle begin:"
4630 " ret(%zd) deltaMs(%d) requires sleep %d ms",
4631 this, ret, deltaMs, throttleMs);
4632 mThreadThrottleTimeMs += throttleMs;
4633 // Throttle must be attributed to the previous mixer loop's write time
4634 // to allow back-to-back throttling.
4635 // This also ensures proper timing statistics.
4636 mLastIoEndNs = systemTime(); // we fetch the write end time again.
4637 } else {
4638 uint32_t diff = mThreadThrottleTimeMs - mThreadThrottleEndMs;
4639 if (diff > 0) {
4640 // notify of throttle end on debug log
4641 // but prevent spamming for bluetooth
4642 ALOGD_IF(!isSingleDeviceType(
4643 outDeviceTypes_l(), audio_is_a2dp_out_device) &&
4644 !isSingleDeviceType(
4645 outDeviceTypes_l(),
4646 audio_is_hearing_aid_out_device),
4647 "mixer(%p) throttle end: throttle time(%u)", this, diff);
4648 mThreadThrottleEndMs = mThreadThrottleTimeMs;
4649 }
4650 }
4651 }
4652 }
4653
4654 } else {
4655 ATRACE_BEGIN("sleep");
4656 audio_utils::unique_lock _l(mutex());
4657 // suspended requires accurate metering of sleep time.
4658 if (isSuspended()) {
4659 // advance by expected sleepTime
4660 timeLoopNextNs += microseconds((nsecs_t)mSleepTimeUs);
4661 const nsecs_t nowNs = systemTime();
4662
4663 // compute expected next time vs current time.
4664 // (negative deltas are treated as delays).
4665 nsecs_t deltaNs = timeLoopNextNs - nowNs;
4666 if (deltaNs < -kMaxNextBufferDelayNs) {
4667 // Delays longer than the max allowed trigger a reset.
4668 ALOGV("DelayNs: %lld, resetting timeLoopNextNs", (long long) deltaNs);
4669 deltaNs = microseconds((nsecs_t)mSleepTimeUs);
4670 timeLoopNextNs = nowNs + deltaNs;
4671 } else if (deltaNs < 0) {
4672 // Delays within the max delay allowed: zero the delta/sleepTime
4673 // to help the system catch up in the next iteration(s)
4674 ALOGV("DelayNs: %lld, catching-up", (long long) deltaNs);
4675 deltaNs = 0;
4676 }
4677 // update sleep time (which is >= 0)
4678 mSleepTimeUs = deltaNs / 1000;
4679 }
4680 if (!mSignalPending && mConfigEvents.isEmpty() && !exitPending()) {
4681 mWaitWorkCV.wait_for(_l, std::chrono::microseconds(mSleepTimeUs));
4682 }
4683 ATRACE_END();
4684 }
4685 }
4686
4687 // Finally let go of removed track(s), without the lock held
4688 // since we can't guarantee the destructors won't acquire that
4689 // same lock. This will also mutate and push a new fast mixer state.
4690 threadLoop_removeTracks(tracksToRemove);
4691 tracksToRemove.clear();
4692
4693 // FIXME I don't understand the need for this here;
4694 // it was in the original code but maybe the
4695 // assignment in saveOutputTracks() makes this unnecessary?
4696 clearOutputTracks();
4697
4698 // Effect chains will be actually deleted here if they were removed from
4699 // mEffectChains list during mixing or effects processing
4700 effectChains.clear();
4701
4702 // FIXME Note that the above .clear() is no longer necessary since effectChains
4703 // is now local to this block, but will keep it for now (at least until merge done).
4704
4705 mThreadloopExecutor.process();
4706 }
4707 mThreadloopExecutor.process(); // process any remaining deferred actions.
4708 // deferred actions after this point are ignored.
4709
4710 threadLoop_exit();
4711
4712 if (!mStandby) {
4713 threadLoop_standby();
4714 setStandby();
4715 }
4716
4717 releaseWakeLock();
4718
4719 ALOGV("Thread %p type %d exiting", this, mType);
4720 return false;
4721 }
4722
collectTimestamps_l()4723 void PlaybackThread::collectTimestamps_l()
4724 {
4725 if (mStandby) {
4726 mTimestampVerifier.discontinuity(discontinuityForStandbyOrFlush());
4727 return;
4728 } else if (mHwPaused) {
4729 mTimestampVerifier.discontinuity(mTimestampVerifier.DISCONTINUITY_MODE_CONTINUOUS);
4730 return;
4731 }
4732
4733 // Gather the framesReleased counters for all active tracks,
4734 // and associate with the sink frames written out. We need
4735 // this to convert the sink timestamp to the track timestamp.
4736 bool kernelLocationUpdate = false;
4737 ExtendedTimestamp timestamp; // use private copy to fetch
4738
4739 // Always query HAL timestamp and update timestamp verifier. In standby or pause,
4740 // HAL may be draining some small duration buffered data for fade out.
4741 if (threadloop_getHalTimestamp_l(×tamp) == OK) {
4742 mTimestampVerifier.add(timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
4743 timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
4744 mSampleRate);
4745
4746 if (isTimestampCorrectionEnabled_l()) {
4747 ALOGVV("TS_BEFORE: %d %lld %lld", id(),
4748 (long long)timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
4749 (long long)timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]);
4750 auto correctedTimestamp = mTimestampVerifier.getLastCorrectedTimestamp();
4751 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
4752 = correctedTimestamp.mFrames;
4753 timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]
4754 = correctedTimestamp.mTimeNs;
4755 ALOGVV("TS_AFTER: %d %lld %lld", id(),
4756 (long long)timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
4757 (long long)timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]);
4758
4759 // Note: Downstream latency only added if timestamp correction enabled.
4760 if (mDownstreamLatencyStatMs.getN() > 0) { // we have latency info.
4761 const int64_t newPosition =
4762 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
4763 - int64_t(mDownstreamLatencyStatMs.getMean() * mSampleRate * 1e-3);
4764 // prevent retrograde
4765 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = max(
4766 newPosition,
4767 (mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
4768 - mSuspendedFrames));
4769 }
4770 }
4771
4772 // We always fetch the timestamp here because often the downstream
4773 // sink will block while writing.
4774
4775 // We keep track of the last valid kernel position in case we are in underrun
4776 // and the normal mixer period is the same as the fast mixer period, or there
4777 // is some error from the HAL.
4778 if (mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] >= 0) {
4779 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] =
4780 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
4781 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] =
4782 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
4783
4784 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] =
4785 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER];
4786 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] =
4787 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER];
4788 }
4789
4790 if (timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] >= 0) {
4791 kernelLocationUpdate = true;
4792 } else {
4793 ALOGVV("getTimestamp error - no valid kernel position");
4794 }
4795
4796 // copy over kernel info
4797 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] =
4798 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL]
4799 + mSuspendedFrames; // add frames discarded when suspended
4800 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] =
4801 timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
4802 } else {
4803 mTimestampVerifier.error();
4804 }
4805
4806 // mFramesWritten for non-offloaded tracks are contiguous
4807 // even after standby() is called. This is useful for the track frame
4808 // to sink frame mapping.
4809 bool serverLocationUpdate = false;
4810 if (mFramesWritten != mLastFramesWritten) {
4811 serverLocationUpdate = true;
4812 mLastFramesWritten = mFramesWritten;
4813 }
4814 // Only update timestamps if there is a meaningful change.
4815 // Either the kernel timestamp must be valid or we have written something.
4816 if (kernelLocationUpdate || serverLocationUpdate) {
4817 if (serverLocationUpdate) {
4818 // use the time before we called the HAL write - it is a bit more accurate
4819 // to when the server last read data than the current time here.
4820 //
4821 // If we haven't written anything, mLastIoBeginNs will be -1
4822 // and we use systemTime().
4823 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] = mFramesWritten;
4824 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = mLastIoBeginNs == -1
4825 ? systemTime() : (int64_t)mLastIoBeginNs;
4826 }
4827
4828 for (const sp<IAfTrack>& t : mActiveTracks) {
4829 if (!t->isFastTrack()) {
4830 t->updateTrackFrameInfo(
4831 t->audioTrackServerProxy()->framesReleased(),
4832 mFramesWritten,
4833 mSampleRate,
4834 mTimestamp);
4835 }
4836 }
4837 }
4838
4839 if (audio_has_proportional_frames(mFormat)) {
4840 const double latencyMs = mTimestamp.getOutputServerLatencyMs(mSampleRate);
4841 if (latencyMs != 0.) { // note 0. means timestamp is empty.
4842 mLatencyMs.add(latencyMs);
4843 }
4844 }
4845 #if 0
4846 // logFormat example
4847 if (z % 100 == 0) {
4848 timespec ts;
4849 clock_gettime(CLOCK_MONOTONIC, &ts);
4850 LOGT("This is an integer %d, this is a float %f, this is my "
4851 "pid %p %% %s %t", 42, 3.14, "and this is a timestamp", ts);
4852 LOGT("A deceptive null-terminated string %\0");
4853 }
4854 ++z;
4855 #endif
4856 }
4857
4858 // removeTracks_l() must be called with ThreadBase::mutex() held
removeTracks_l(const Vector<sp<IAfTrack>> & tracksToRemove)4859 void PlaybackThread::removeTracks_l(const Vector<sp<IAfTrack>>& tracksToRemove)
4860 NO_THREAD_SAFETY_ANALYSIS // release and re-acquire mutex()
4861 {
4862 if (tracksToRemove.empty()) return;
4863
4864 // Block all incoming TrackHandle requests until we are finished with the release.
4865 setThreadBusy_l(true);
4866
4867 for (const auto& track : tracksToRemove) {
4868 ALOGV("%s(%d): removing track on session %d", __func__, track->id(), track->sessionId());
4869 sp<IAfEffectChain> chain = getEffectChain_l(track->sessionId());
4870 if (chain != 0) {
4871 ALOGV("%s(%d): stopping track on chain %p for session Id: %d",
4872 __func__, track->id(), chain.get(), track->sessionId());
4873 chain->decActiveTrackCnt();
4874 }
4875
4876 // If an external client track, inform APM we're no longer active, and remove if needed.
4877 // Since the track is active, we do it here instead of TrackBase::destroy().
4878 if (track->isExternalTrack()) {
4879 mutex().unlock();
4880 AudioSystem::stopOutput(track->portId());
4881 if (track->isTerminated()) {
4882 AudioSystem::releaseOutput(track->portId());
4883 }
4884 mutex().lock();
4885 }
4886 if (mHapticChannelCount > 0 &&
4887 ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
4888 || (chain != nullptr && chain->containsHapticGeneratingEffect()))) {
4889 mutex().unlock();
4890 // Unlock due to VibratorService will lock for this call and will
4891 // call Tracks.mute/unmute which also require thread's lock.
4892 afutils::onExternalVibrationStop(track->getExternalVibration());
4893 mutex().lock();
4894
4895 // When the track is stop, set the haptic intensity as MUTE
4896 // for the HapticGenerator effect.
4897 if (chain != nullptr) {
4898 chain->setHapticScale_l(track->id(), os::HapticScale::mute());
4899 }
4900 }
4901
4902 // Under lock, the track is removed from the active tracks list.
4903 //
4904 // Once the track is no longer active, the TrackHandle may directly
4905 // modify it as the threadLoop() is no longer responsible for its maintenance.
4906 // Do not modify the track from threadLoop after the mutex is unlocked
4907 // if it is not active.
4908 mActiveTracks.remove(track);
4909
4910 if (track->isTerminated()) {
4911 // remove from our tracks vector
4912 removeTrack_l(track);
4913 }
4914 }
4915
4916 // Allow incoming TrackHandle requests. We still hold the mutex,
4917 // so pending TrackHandle requests will occur after we unlock it.
4918 setThreadBusy_l(false);
4919 }
4920
getTimestamp_l(AudioTimestamp & timestamp)4921 status_t PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp)
4922 {
4923 if (mNormalSink != 0) {
4924 ExtendedTimestamp ets;
4925 status_t status = mNormalSink->getTimestamp(ets);
4926 if (status == NO_ERROR) {
4927 status = ets.getBestTimestamp(×tamp);
4928 }
4929 return status;
4930 }
4931 if ((mType == OFFLOAD || mType == DIRECT) && mOutput != NULL) {
4932 collectTimestamps_l();
4933 if (mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] <= 0) {
4934 return INVALID_OPERATION;
4935 }
4936 timestamp.mPosition = mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
4937 const int64_t timeNs = mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
4938 timestamp.mTime.tv_sec = timeNs / NANOS_PER_SECOND;
4939 timestamp.mTime.tv_nsec = timeNs - (timestamp.mTime.tv_sec * NANOS_PER_SECOND);
4940 return NO_ERROR;
4941 }
4942 return INVALID_OPERATION;
4943 }
4944
4945 // For dedicated VoIP outputs, let the HAL apply the stream volume. Track volume is
4946 // still applied by the mixer.
4947 // All tracks attached to a mixer with flag VOIP_RX are tied to the same
4948 // stream type STREAM_VOICE_CALL so this will only change the HAL volume once even
4949 // if more than one track are active
handleVoipVolume_l(float * volume)4950 status_t PlaybackThread::handleVoipVolume_l(float* volume)
4951 {
4952 status_t result = NO_ERROR;
4953 if ((mOutput->flags & AUDIO_OUTPUT_FLAG_VOIP_RX) != 0) {
4954 if (*volume != mLeftVolFloat) {
4955 result = mOutput->stream->setVolume(*volume, *volume);
4956 // HAL can return INVALID_OPERATION if operation is not supported.
4957 ALOGE_IF(result != OK && result != INVALID_OPERATION,
4958 "Error when setting output stream volume: %d", result);
4959 if (result == NO_ERROR) {
4960 mLeftVolFloat = *volume;
4961 }
4962 }
4963 // if stream volume was successfully sent to the HAL, mLeftVolFloat == v here and we
4964 // remove stream volume contribution from software volume.
4965 if (mLeftVolFloat == *volume) {
4966 *volume = 1.0f;
4967 }
4968 }
4969 return result;
4970 }
4971
createAudioPatch_l(const struct audio_patch * patch,audio_patch_handle_t * handle)4972 status_t MixerThread::createAudioPatch_l(const struct audio_patch* patch,
4973 audio_patch_handle_t *handle)
4974 {
4975 status_t status;
4976 if (property_get_bool("af.patch_park", false /* default_value */)) {
4977 // Park FastMixer to avoid potential DOS issues with writing to the HAL
4978 // or if HAL does not properly lock against access.
4979 AutoPark<FastMixer> park(mFastMixer);
4980 status = PlaybackThread::createAudioPatch_l(patch, handle);
4981 } else {
4982 status = PlaybackThread::createAudioPatch_l(patch, handle);
4983 }
4984
4985 updateHalSupportedLatencyModes_l();
4986 return status;
4987 }
4988
createAudioPatch_l(const struct audio_patch * patch,audio_patch_handle_t * handle)4989 status_t PlaybackThread::createAudioPatch_l(const struct audio_patch *patch,
4990 audio_patch_handle_t *handle)
4991 {
4992 status_t status = NO_ERROR;
4993
4994 // store new device and send to effects
4995 audio_devices_t type = AUDIO_DEVICE_NONE;
4996 AudioDeviceTypeAddrVector deviceTypeAddrs;
4997 for (unsigned int i = 0; i < patch->num_sinks; i++) {
4998 LOG_ALWAYS_FATAL_IF(popcount(patch->sinks[i].ext.device.type) > 1
4999 && !mOutput->audioHwDev->supportsAudioPatches(),
5000 "Enumerated device type(%#x) must not be used "
5001 "as it does not support audio patches",
5002 patch->sinks[i].ext.device.type);
5003 type = static_cast<audio_devices_t>(type | patch->sinks[i].ext.device.type);
5004 deviceTypeAddrs.emplace_back(patch->sinks[i].ext.device.type,
5005 patch->sinks[i].ext.device.address);
5006 }
5007
5008 audio_port_handle_t sinkPortId = patch->sinks[0].id;
5009 #ifdef ADD_BATTERY_DATA
5010 // when changing the audio output device, call addBatteryData to notify
5011 // the change
5012 if (outDeviceTypes() != deviceTypes) {
5013 uint32_t params = 0;
5014 // check whether speaker is on
5015 if (deviceTypes.count(AUDIO_DEVICE_OUT_SPEAKER) > 0) {
5016 params |= IMediaPlayerService::kBatteryDataSpeakerOn;
5017 }
5018
5019 // check if any other device (except speaker) is on
5020 if (!isSingleDeviceType(deviceTypes, AUDIO_DEVICE_OUT_SPEAKER)) {
5021 params |= IMediaPlayerService::kBatteryDataOtherAudioDeviceOn;
5022 }
5023
5024 if (params != 0) {
5025 addBatteryData(params);
5026 }
5027 }
5028 #endif
5029
5030 for (size_t i = 0; i < mEffectChains.size(); i++) {
5031 mEffectChains[i]->setDevices_l(deviceTypeAddrs);
5032 }
5033
5034 // mPatch.num_sinks is not set when the thread is created so that
5035 // the first patch creation triggers an ioConfigChanged callback
5036 bool configChanged = (mPatch.num_sinks == 0) ||
5037 (mPatch.sinks[0].id != sinkPortId);
5038 mPatch = *patch;
5039 mOutDeviceTypeAddrs = deviceTypeAddrs;
5040 checkSilentMode_l();
5041
5042 if (mOutput->audioHwDev->supportsAudioPatches()) {
5043 sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
5044 status = hwDevice->createAudioPatch(patch->num_sources,
5045 patch->sources,
5046 patch->num_sinks,
5047 patch->sinks,
5048 handle);
5049 } else {
5050 status = mOutput->stream->legacyCreateAudioPatch(patch->sinks[0], std::nullopt, type);
5051 *handle = AUDIO_PATCH_HANDLE_NONE;
5052 }
5053 const std::string patchSinksAsString = patchSinksToString(patch);
5054
5055 mThreadMetrics.logEndInterval();
5056 mThreadMetrics.logCreatePatch(/* inDevices */ {}, patchSinksAsString);
5057 mThreadMetrics.logBeginInterval();
5058 // also dispatch to active AudioTracks for MediaMetrics
5059 for (const auto &track : mActiveTracks) {
5060 track->logEndInterval();
5061 track->logBeginInterval(patchSinksAsString);
5062 }
5063
5064 if (configChanged) {
5065 sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
5066 }
5067 // Force metadata update after a route change
5068 mActiveTracks.setHasChanged();
5069
5070 return status;
5071 }
5072
releaseAudioPatch_l(const audio_patch_handle_t handle)5073 status_t MixerThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
5074 {
5075 status_t status;
5076 if (property_get_bool("af.patch_park", false /* default_value */)) {
5077 // Park FastMixer to avoid potential DOS issues with writing to the HAL
5078 // or if HAL does not properly lock against access.
5079 AutoPark<FastMixer> park(mFastMixer);
5080 status = PlaybackThread::releaseAudioPatch_l(handle);
5081 } else {
5082 status = PlaybackThread::releaseAudioPatch_l(handle);
5083 }
5084 return status;
5085 }
5086
releaseAudioPatch_l(const audio_patch_handle_t handle)5087 status_t PlaybackThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
5088 {
5089 status_t status = NO_ERROR;
5090
5091 mPatch = audio_patch{};
5092 mOutDeviceTypeAddrs.clear();
5093
5094 if (mOutput->audioHwDev->supportsAudioPatches()) {
5095 sp<DeviceHalInterface> hwDevice = mOutput->audioHwDev->hwDevice();
5096 status = hwDevice->releaseAudioPatch(handle);
5097 } else {
5098 status = mOutput->stream->legacyReleaseAudioPatch();
5099 }
5100 // Force meteadata update after a route change
5101 mActiveTracks.setHasChanged();
5102
5103 return status;
5104 }
5105
addPatchTrack(const sp<IAfPatchTrack> & track)5106 void PlaybackThread::addPatchTrack(const sp<IAfPatchTrack>& track)
5107 {
5108 audio_utils::lock_guard _l(mutex());
5109 mTracks.add(track);
5110 }
5111
deletePatchTrack(const sp<IAfPatchTrack> & track)5112 void PlaybackThread::deletePatchTrack(const sp<IAfPatchTrack>& track)
5113 {
5114 audio_utils::lock_guard _l(mutex());
5115 destroyTrack_l(track);
5116 }
5117
toAudioPortConfig(struct audio_port_config * config)5118 void PlaybackThread::toAudioPortConfig(struct audio_port_config* config)
5119 {
5120 ThreadBase::toAudioPortConfig(config);
5121 config->role = AUDIO_PORT_ROLE_SOURCE;
5122 config->ext.mix.hw_module = mOutput->audioHwDev->handle();
5123 config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
5124 if (mOutput && mOutput->flags != AUDIO_OUTPUT_FLAG_NONE) {
5125 config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
5126 config->flags.output = mOutput->flags;
5127 }
5128 }
5129
getLocalLogHeader() const5130 std::string PlaybackThread::getLocalLogHeader() const {
5131 using namespace std::literals;
5132 static constexpr auto indent = " "
5133 " "sv;
5134 return std::string{indent}.append(IAfTrack::getLogHeader());
5135 }
5136 // ----------------------------------------------------------------------------
5137
5138 /* static */
createMixerThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,type_t type,audio_config_base_t * mixerConfig)5139 sp<IAfPlaybackThread> IAfPlaybackThread::createMixerThread(
5140 const sp<IAfThreadCallback>& afThreadCallback, AudioStreamOut* output,
5141 audio_io_handle_t id, bool systemReady, type_t type, audio_config_base_t* mixerConfig) {
5142 return sp<MixerThread>::make(afThreadCallback, output, id, systemReady, type, mixerConfig);
5143 }
5144
MixerThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,type_t type,audio_config_base_t * mixerConfig)5145 MixerThread::MixerThread(const sp<IAfThreadCallback>& afThreadCallback, AudioStreamOut* output,
5146 audio_io_handle_t id, bool systemReady, type_t type, audio_config_base_t *mixerConfig)
5147 : PlaybackThread(afThreadCallback, output, id, type, systemReady, mixerConfig),
5148 // mAudioMixer below
5149 // mFastMixer below
5150 mBluetoothLatencyModesEnabled(false),
5151 mFastMixerFutex(0),
5152 mMasterMono(false)
5153 // mOutputSink below
5154 // mPipeSink below
5155 // mNormalSink below
5156 {
5157 ALOGV("MixerThread() id=%d type=%d", id, type);
5158 ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%#x, mFrameSize=%zu, "
5159 "mFrameCount=%zu, mNormalFrameCount=%zu",
5160 mSampleRate, mChannelMask, mChannelCount, mFormat, mFrameSize, mFrameCount,
5161 mNormalFrameCount);
5162 mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
5163
5164 if (type == DUPLICATING) {
5165 // The Duplicating thread uses the AudioMixer and delivers data to OutputTracks
5166 // (downstream MixerThreads) in DuplicatingThread::threadLoop_write().
5167 // Do not create or use mFastMixer, mOutputSink, mPipeSink, or mNormalSink.
5168 // Balance is *not* set in the DuplicatingThread here (or from AudioFlinger),
5169 // as the downstream MixerThreads implement it.
5170 return;
5171 }
5172 // create an NBAIO sink for the HAL output stream, and negotiate
5173 mOutputSink = new AudioStreamOutSink(output->stream);
5174 size_t numCounterOffers = 0;
5175 const NBAIO_Format offers[1] = {Format_from_SR_C(
5176 mSampleRate, mChannelCount + mHapticChannelCount, mFormat)};
5177 #if !LOG_NDEBUG
5178 ssize_t index =
5179 #else
5180 (void)
5181 #endif
5182 mOutputSink->negotiate(offers, 1, NULL, numCounterOffers);
5183 ALOG_ASSERT(index == 0);
5184
5185 // initialize fast mixer depending on configuration
5186 bool initFastMixer;
5187 if (mType == SPATIALIZER || mType == BIT_PERFECT) {
5188 initFastMixer = false;
5189 } else {
5190 switch (kUseFastMixer) {
5191 case FastMixer_Never:
5192 initFastMixer = false;
5193 break;
5194 case FastMixer_Always:
5195 initFastMixer = true;
5196 break;
5197 case FastMixer_Static:
5198 case FastMixer_Dynamic:
5199 if (mType == MIXER && (output->flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER)) {
5200 /* Do not init fast mixer on deep buffer, warn if buffers are confed too small */
5201 initFastMixer = false;
5202 ALOGW_IF(mFrameCount * 1000 / mSampleRate < kMinNormalSinkBufferSizeMs,
5203 "HAL DEEP BUFFER Buffer (%zu ms) is smaller than set minimal buffer "
5204 "(%u ms), seems like a configuration error",
5205 mFrameCount * 1000 / mSampleRate, kMinNormalSinkBufferSizeMs);
5206 } else {
5207 initFastMixer = mFrameCount < mNormalFrameCount;
5208 }
5209 break;
5210 }
5211 ALOGW_IF(initFastMixer == false && mFrameCount < mNormalFrameCount,
5212 "FastMixer is preferred for this sink as frameCount %zu is less than threshold %zu",
5213 mFrameCount, mNormalFrameCount);
5214 }
5215 if (initFastMixer) {
5216 audio_format_t fastMixerFormat;
5217 if (mMixerBufferEnabled && mEffectBufferEnabled) {
5218 fastMixerFormat = AUDIO_FORMAT_PCM_FLOAT;
5219 } else {
5220 fastMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
5221 }
5222 if (mFormat != fastMixerFormat) {
5223 // change our Sink format to accept our intermediate precision
5224 mFormat = fastMixerFormat;
5225 free(mSinkBuffer);
5226 mFrameSize = audio_bytes_per_frame(mChannelCount + mHapticChannelCount, mFormat);
5227 const size_t sinkBufferSize = mNormalFrameCount * mFrameSize;
5228 (void)posix_memalign(&mSinkBuffer, 32, sinkBufferSize);
5229 }
5230
5231 // create a MonoPipe to connect our submix to FastMixer
5232 NBAIO_Format format = mOutputSink->format();
5233
5234 // adjust format to match that of the Fast Mixer
5235 ALOGV("format changed from %#x to %#x", format.mFormat, fastMixerFormat);
5236 format.mFormat = fastMixerFormat;
5237 format.mFrameSize = audio_bytes_per_sample(format.mFormat) * format.mChannelCount;
5238
5239 // This pipe depth compensates for scheduling latency of the normal mixer thread.
5240 // When it wakes up after a maximum latency, it runs a few cycles quickly before
5241 // finally blocking. Note the pipe implementation rounds up the request to a power of 2.
5242 MonoPipe *monoPipe = new MonoPipe(mNormalFrameCount * 4, format, true /*writeCanBlock*/);
5243 const NBAIO_Format offersFast[1] = {format};
5244 size_t numCounterOffersFast = 0;
5245 #if !LOG_NDEBUG
5246 index =
5247 #else
5248 (void)
5249 #endif
5250 monoPipe->negotiate(offersFast, std::size(offersFast),
5251 nullptr /* counterOffers */, numCounterOffersFast);
5252 ALOG_ASSERT(index == 0);
5253 monoPipe->setAvgFrames((mScreenState & 1) ?
5254 (monoPipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
5255 mPipeSink = monoPipe;
5256
5257 // create fast mixer and configure it initially with just one fast track for our submix
5258 mFastMixer = new FastMixer(mId);
5259 FastMixerStateQueue *sq = mFastMixer->sq();
5260 #ifdef STATE_QUEUE_DUMP
5261 sq->setObserverDump(&mStateQueueObserverDump);
5262 sq->setMutatorDump(&mStateQueueMutatorDump);
5263 #endif
5264 FastMixerState *state = sq->begin();
5265 FastTrack *fastTrack = &state->mFastTracks[0];
5266 // wrap the source side of the MonoPipe to make it an AudioBufferProvider
5267 fastTrack->mBufferProvider = new SourceAudioBufferProvider(new MonoPipeReader(monoPipe));
5268 fastTrack->mVolumeProvider = NULL;
5269 fastTrack->mChannelMask = static_cast<audio_channel_mask_t>(
5270 mChannelMask | mHapticChannelMask); // mPipeSink channel mask for
5271 // audio to FastMixer
5272 fastTrack->mFormat = mFormat; // mPipeSink format for audio to FastMixer
5273 fastTrack->mHapticPlaybackEnabled = mHapticChannelMask != AUDIO_CHANNEL_NONE;
5274 fastTrack->mHapticScale = os::HapticScale::none();
5275 fastTrack->mHapticMaxAmplitude = NAN;
5276 fastTrack->mGeneration++;
5277 snprintf(fastTrack->mTraceName, sizeof(fastTrack->mTraceName),
5278 "%s.0.0.%d", AUDIO_TRACE_PREFIX_AUDIO_TRACK_FRDY, mId);
5279 state->mFastTracksGen++;
5280 state->mTrackMask = 1;
5281 // fast mixer will use the HAL output sink
5282 state->mOutputSink = mOutputSink.get();
5283 state->mOutputSinkGen++;
5284 state->mFrameCount = mFrameCount;
5285 // specify sink channel mask when haptic channel mask present as it can not
5286 // be calculated directly from channel count
5287 state->mSinkChannelMask = mHapticChannelMask == AUDIO_CHANNEL_NONE
5288 ? AUDIO_CHANNEL_NONE
5289 : static_cast<audio_channel_mask_t>(mChannelMask | mHapticChannelMask);
5290 state->mCommand = FastMixerState::COLD_IDLE;
5291 // already done in constructor initialization list
5292 //mFastMixerFutex = 0;
5293 state->mColdFutexAddr = &mFastMixerFutex;
5294 state->mColdGen++;
5295 state->mDumpState = &mFastMixerDumpState;
5296 mFastMixerNBLogWriter = afThreadCallback->newWriter_l(kFastMixerLogSize, "FastMixer");
5297 state->mNBLogWriter = mFastMixerNBLogWriter.get();
5298 sq->end();
5299 {
5300 audio_utils::mutex::scoped_queue_wait_check queueWaitCheck(mFastMixer->getTid());
5301 sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
5302 }
5303
5304 NBLog::thread_info_t info;
5305 info.id = mId;
5306 info.type = NBLog::FASTMIXER;
5307 mFastMixerNBLogWriter->log<NBLog::EVENT_THREAD_INFO>(info);
5308
5309 // start the fast mixer
5310 mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO);
5311 pid_t tid = mFastMixer->getTid();
5312 sendPrioConfigEvent(getpid(), tid, kPriorityFastMixer, false /*forApp*/);
5313 stream()->setHalThreadPriority(kPriorityFastMixer);
5314
5315 #ifdef AUDIO_WATCHDOG
5316 // create and start the watchdog
5317 mAudioWatchdog = new AudioWatchdog();
5318 mAudioWatchdog->setDump(&mAudioWatchdogDump);
5319 mAudioWatchdog->run("AudioWatchdog", PRIORITY_URGENT_AUDIO);
5320 tid = mAudioWatchdog->getTid();
5321 sendPrioConfigEvent(getpid(), tid, kPriorityFastMixer, false /*forApp*/);
5322 #endif
5323 } else {
5324 #ifdef TEE_SINK
5325 // Only use the MixerThread tee if there is no FastMixer.
5326 mTee.set(mOutputSink->format(), NBAIO_Tee::TEE_FLAG_OUTPUT_THREAD);
5327 mTee.setId(std::string("_") + std::to_string(mId) + "_M");
5328 #endif
5329 }
5330
5331 switch (kUseFastMixer) {
5332 case FastMixer_Never:
5333 case FastMixer_Dynamic:
5334 mNormalSink = mOutputSink;
5335 break;
5336 case FastMixer_Always:
5337 mNormalSink = mPipeSink;
5338 break;
5339 case FastMixer_Static:
5340 mNormalSink = initFastMixer ? mPipeSink : mOutputSink;
5341 break;
5342 }
5343 // setMasterBalance needs to be called after the FastMixer
5344 // (if any) is set up, in order to deliver the balance settings to it.
5345 setMasterBalance(afThreadCallback->getMasterBalance_l());
5346 }
5347
~MixerThread()5348 MixerThread::~MixerThread()
5349 {
5350 if (mFastMixer != 0) {
5351 FastMixerStateQueue *sq = mFastMixer->sq();
5352 FastMixerState *state = sq->begin();
5353 if (state->mCommand == FastMixerState::COLD_IDLE) {
5354 int32_t old = android_atomic_inc(&mFastMixerFutex);
5355 if (old == -1) {
5356 (void) syscall(__NR_futex, &mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
5357 }
5358 }
5359 state->mCommand = FastMixerState::EXIT;
5360 sq->end();
5361 {
5362 audio_utils::mutex::scoped_join_wait_check queueWaitCheck(mFastMixer->getTid());
5363 sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
5364 mFastMixer->join();
5365 }
5366 // Though the fast mixer thread has exited, it's state queue is still valid.
5367 // We'll use that extract the final state which contains one remaining fast track
5368 // corresponding to our sub-mix.
5369 state = sq->begin();
5370 ALOG_ASSERT(state->mTrackMask == 1);
5371 FastTrack *fastTrack = &state->mFastTracks[0];
5372 ALOG_ASSERT(fastTrack->mBufferProvider != NULL);
5373 delete fastTrack->mBufferProvider;
5374 sq->end(false /*didModify*/);
5375 mFastMixer.clear();
5376 #ifdef AUDIO_WATCHDOG
5377 if (mAudioWatchdog != 0) {
5378 mAudioWatchdog->requestExit();
5379 mAudioWatchdog->requestExitAndWait();
5380 mAudioWatchdog.clear();
5381 }
5382 #endif
5383 }
5384 mAfThreadCallback->unregisterWriter(mFastMixerNBLogWriter);
5385 delete mAudioMixer;
5386 }
5387
onFirstRef()5388 void MixerThread::onFirstRef() {
5389 PlaybackThread::onFirstRef();
5390
5391 audio_utils::lock_guard _l(mutex());
5392 if (mOutput != nullptr && mOutput->stream != nullptr) {
5393 status_t status = mOutput->stream->setLatencyModeCallback(this);
5394 if (status != INVALID_OPERATION) {
5395 updateHalSupportedLatencyModes_l();
5396 }
5397 // Default to enabled if the HAL supports it. This can be changed by Audioflinger after
5398 // the thread construction according to AudioFlinger::mBluetoothLatencyModesEnabled
5399 mBluetoothLatencyModesEnabled.store(
5400 mOutput->audioHwDev->supportsBluetoothVariableLatency());
5401 }
5402 }
5403
correctLatency_l(uint32_t latency) const5404 uint32_t MixerThread::correctLatency_l(uint32_t latency) const
5405 {
5406 if (mFastMixer != 0) {
5407 MonoPipe *pipe = (MonoPipe *)mPipeSink.get();
5408 latency += (pipe->getAvgFrames() * 1000) / mSampleRate;
5409 }
5410 return latency;
5411 }
5412
threadLoop_write()5413 ssize_t MixerThread::threadLoop_write()
5414 {
5415 // FIXME we should only do one push per cycle; confirm this is true
5416 // Start the fast mixer if it's not already running
5417 if (mFastMixer != 0) {
5418 FastMixerStateQueue *sq = mFastMixer->sq();
5419 FastMixerState *state = sq->begin();
5420 if (state->mCommand != FastMixerState::MIX_WRITE &&
5421 (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)) {
5422 if (state->mCommand == FastMixerState::COLD_IDLE) {
5423
5424 // FIXME workaround for first HAL write being CPU bound on some devices
5425 ATRACE_BEGIN("write");
5426 mOutput->write((char *)mSinkBuffer, 0);
5427 ATRACE_END();
5428
5429 int32_t old = android_atomic_inc(&mFastMixerFutex);
5430 if (old == -1) {
5431 (void) syscall(__NR_futex, &mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
5432 }
5433 #ifdef AUDIO_WATCHDOG
5434 if (mAudioWatchdog != 0) {
5435 mAudioWatchdog->resume();
5436 }
5437 #endif
5438 }
5439 state->mCommand = FastMixerState::MIX_WRITE;
5440 #ifdef FAST_THREAD_STATISTICS
5441 mFastMixerDumpState.increaseSamplingN(mAfThreadCallback->isLowRamDevice() ?
5442 FastThreadDumpState::kSamplingNforLowRamDevice : FastThreadDumpState::kSamplingN);
5443 #endif
5444 sq->end();
5445 {
5446 audio_utils::mutex::scoped_queue_wait_check queueWaitCheck(mFastMixer->getTid());
5447 sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
5448 }
5449 if (kUseFastMixer == FastMixer_Dynamic) {
5450 mNormalSink = mPipeSink;
5451 }
5452 } else {
5453 sq->end(false /*didModify*/);
5454 }
5455 }
5456 return PlaybackThread::threadLoop_write();
5457 }
5458
threadLoop_standby()5459 void MixerThread::threadLoop_standby()
5460 {
5461 // Idle the fast mixer if it's currently running
5462 if (mFastMixer != 0) {
5463 FastMixerStateQueue *sq = mFastMixer->sq();
5464 FastMixerState *state = sq->begin();
5465 if (!(state->mCommand & FastMixerState::IDLE)) {
5466 // Report any frames trapped in the Monopipe
5467 MonoPipe *monoPipe = (MonoPipe *)mPipeSink.get();
5468 const long long pipeFrames = monoPipe->maxFrames() - monoPipe->availableToWrite();
5469 mLocalLog.log("threadLoop_standby: framesWritten:%lld suspendedFrames:%lld "
5470 "monoPipeWritten:%lld monoPipeLeft:%lld",
5471 (long long)mFramesWritten, (long long)mSuspendedFrames,
5472 (long long)mPipeSink->framesWritten(), pipeFrames);
5473 mLocalLog.log("threadLoop_standby: %s", mTimestamp.toString().c_str());
5474
5475 state->mCommand = FastMixerState::COLD_IDLE;
5476 state->mColdFutexAddr = &mFastMixerFutex;
5477 state->mColdGen++;
5478 mFastMixerFutex = 0;
5479 sq->end();
5480 // BLOCK_UNTIL_PUSHED would be insufficient, as we need it to stop doing I/O now
5481 {
5482 audio_utils::mutex::scoped_queue_wait_check queueWaitCheck(mFastMixer->getTid());
5483 sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
5484 }
5485 if (kUseFastMixer == FastMixer_Dynamic) {
5486 mNormalSink = mOutputSink;
5487 }
5488 #ifdef AUDIO_WATCHDOG
5489 if (mAudioWatchdog != 0) {
5490 mAudioWatchdog->pause();
5491 }
5492 #endif
5493 } else {
5494 sq->end(false /*didModify*/);
5495 }
5496 }
5497 PlaybackThread::threadLoop_standby();
5498 }
5499
waitingAsyncCallback_l()5500 bool PlaybackThread::waitingAsyncCallback_l()
5501 {
5502 return false;
5503 }
5504
shouldStandby_l()5505 bool PlaybackThread::shouldStandby_l()
5506 {
5507 return !mStandby;
5508 }
5509
waitingAsyncCallback()5510 bool PlaybackThread::waitingAsyncCallback()
5511 {
5512 audio_utils::lock_guard _l(mutex());
5513 return waitingAsyncCallback_l();
5514 }
5515
5516 // shared by MIXER and DIRECT, overridden by DUPLICATING
threadLoop_standby()5517 void PlaybackThread::threadLoop_standby()
5518 {
5519 ALOGV("%s: audio hardware entering standby, mixer %p, suspend count %d",
5520 __func__, this, (int32_t)mSuspended);
5521 mOutput->standby();
5522 if (mUseAsyncWrite != 0) {
5523 // discard any pending drain or write ack by incrementing sequence
5524 mWriteAckSequence = (mWriteAckSequence + 2) & ~1;
5525 mDrainSequence = (mDrainSequence + 2) & ~1;
5526 ALOG_ASSERT(mCallbackThread != 0);
5527 mCallbackThread->setWriteBlocked(mWriteAckSequence);
5528 mCallbackThread->setDraining(mDrainSequence);
5529 }
5530 mHwPaused = false;
5531 setHalLatencyMode_l();
5532 }
5533
onAddNewTrack_l()5534 void PlaybackThread::onAddNewTrack_l()
5535 {
5536 ALOGV("signal playback thread");
5537 broadcast_l();
5538 }
5539
onAsyncError(bool isHardError)5540 void PlaybackThread::onAsyncError(bool isHardError)
5541 {
5542 auto allTrackPortIds = getTrackPortIds();
5543 for (int i = AUDIO_STREAM_SYSTEM; i < (int)AUDIO_STREAM_CNT; i++) {
5544 invalidateTracks((audio_stream_type_t)i);
5545 }
5546 if (isHardError) {
5547 mAfThreadCallback->onHardError(allTrackPortIds);
5548 }
5549 }
5550
threadLoop_mix()5551 void MixerThread::threadLoop_mix()
5552 {
5553 // mix buffers...
5554 mAudioMixer->process();
5555 mCurrentWriteLength = mSinkBufferSize;
5556 // increase sleep time progressively when application underrun condition clears.
5557 // Only increase sleep time if the mixer is ready for two consecutive times to avoid
5558 // that a steady state of alternating ready/not ready conditions keeps the sleep time
5559 // such that we would underrun the audio HAL.
5560 if ((mSleepTimeUs == 0) && (sleepTimeShift > 0)) {
5561 sleepTimeShift--;
5562 }
5563 mSleepTimeUs = 0;
5564 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
5565 //TODO: delay standby when effects have a tail
5566
5567 }
5568
threadLoop_sleepTime()5569 void MixerThread::threadLoop_sleepTime()
5570 {
5571 // If no tracks are ready, sleep once for the duration of an output
5572 // buffer size, then write 0s to the output
5573 if (mSleepTimeUs == 0) {
5574 if (mMixerStatus == MIXER_TRACKS_ENABLED) {
5575 if (mPipeSink.get() != nullptr && mPipeSink == mNormalSink) {
5576 // Using the Monopipe availableToWrite, we estimate the
5577 // sleep time to retry for more data (before we underrun).
5578 MonoPipe *monoPipe = static_cast<MonoPipe *>(mPipeSink.get());
5579 const ssize_t availableToWrite = mPipeSink->availableToWrite();
5580 const size_t pipeFrames = monoPipe->maxFrames();
5581 const size_t framesLeft = pipeFrames - max(availableToWrite, 0);
5582 // HAL_framecount <= framesDelay ~ framesLeft / 2 <= Normal_Mixer_framecount
5583 const size_t framesDelay = std::min(
5584 mNormalFrameCount, max(framesLeft / 2, mFrameCount));
5585 ALOGV("pipeFrames:%zu framesLeft:%zu framesDelay:%zu",
5586 pipeFrames, framesLeft, framesDelay);
5587 mSleepTimeUs = framesDelay * MICROS_PER_SECOND / mSampleRate;
5588 } else {
5589 mSleepTimeUs = mActiveSleepTimeUs >> sleepTimeShift;
5590 if (mSleepTimeUs < kMinThreadSleepTimeUs) {
5591 mSleepTimeUs = kMinThreadSleepTimeUs;
5592 }
5593 // reduce sleep time in case of consecutive application underruns to avoid
5594 // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
5595 // duration we would end up writing less data than needed by the audio HAL if
5596 // the condition persists.
5597 if (sleepTimeShift < kMaxThreadSleepTimeShift) {
5598 sleepTimeShift++;
5599 }
5600 }
5601 } else {
5602 mSleepTimeUs = mIdleSleepTimeUs;
5603 }
5604 } else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) {
5605 // clear out mMixerBuffer or mSinkBuffer, to ensure buffers are cleared
5606 // before effects processing or output.
5607 if (mMixerBufferValid) {
5608 memset(mMixerBuffer, 0, mMixerBufferSize);
5609 if (mType == SPATIALIZER) {
5610 memset(mSinkBuffer, 0, mSinkBufferSize);
5611 }
5612 } else {
5613 memset(mSinkBuffer, 0, mSinkBufferSize);
5614 }
5615 mSleepTimeUs = 0;
5616 ALOGV_IF(mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED),
5617 "anticipated start");
5618 }
5619 // TODO add standby time extension fct of effect tail
5620 }
5621
5622 // prepareTracks_l() must be called with ThreadBase::mutex() held
prepareTracks_l(Vector<sp<IAfTrack>> * tracksToRemove)5623 PlaybackThread::mixer_state MixerThread::prepareTracks_l(
5624 Vector<sp<IAfTrack>>* tracksToRemove)
5625 {
5626 // clean up deleted track ids in AudioMixer before allocating new tracks
5627 (void)mTracks.processDeletedTrackIds([this](int trackId) {
5628 // for each trackId, destroy it in the AudioMixer
5629 if (mAudioMixer->exists(trackId)) {
5630 mAudioMixer->destroy(trackId);
5631 }
5632 });
5633 mTracks.clearDeletedTrackIds();
5634
5635 mixer_state mixerStatus = MIXER_IDLE;
5636 // find out which tracks need to be processed
5637 size_t count = mActiveTracks.size();
5638 size_t mixedTracks = 0;
5639 size_t tracksWithEffect = 0;
5640 // counts only _active_ fast tracks
5641 size_t fastTracks = 0;
5642 uint32_t resetMask = 0; // bit mask of fast tracks that need to be reset
5643
5644 float masterVolume = mMasterVolume;
5645 bool masterMute = mMasterMute;
5646
5647 if (masterMute) {
5648 masterVolume = 0;
5649 }
5650 // Delegate master volume control to effect in output mix effect chain if needed
5651 sp<IAfEffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
5652 if (chain != 0) {
5653 uint32_t v = (uint32_t)(masterVolume * (1 << 24));
5654 chain->setVolume(&v, &v);
5655 masterVolume = (float)((v + (1 << 23)) >> 24);
5656 chain.clear();
5657 }
5658
5659 // prepare a new state to push
5660 FastMixerStateQueue *sq = NULL;
5661 FastMixerState *state = NULL;
5662 bool didModify = false;
5663 FastMixerStateQueue::block_t block = FastMixerStateQueue::BLOCK_UNTIL_PUSHED;
5664 bool coldIdle = false;
5665 if (mFastMixer != 0) {
5666 sq = mFastMixer->sq();
5667 state = sq->begin();
5668 coldIdle = state->mCommand == FastMixerState::COLD_IDLE;
5669 }
5670
5671 mMixerBufferValid = false; // mMixerBuffer has no valid data until appropriate tracks found.
5672 mEffectBufferValid = false; // mEffectBuffer has no valid data until tracks found.
5673
5674 // DeferredOperations handles statistics after setting mixerStatus.
5675 class DeferredOperations {
5676 public:
5677 DeferredOperations(mixer_state *mixerStatus, ThreadMetrics *threadMetrics)
5678 : mMixerStatus(mixerStatus)
5679 , mThreadMetrics(threadMetrics) {}
5680
5681 // when leaving scope, tally frames properly.
5682 ~DeferredOperations() {
5683 // Tally underrun frames only if we are actually mixing (MIXER_TRACKS_READY)
5684 // because that is when the underrun occurs.
5685 // We do not distinguish between FastTracks and NormalTracks here.
5686 size_t maxUnderrunFrames = 0;
5687 if (*mMixerStatus == MIXER_TRACKS_READY && mUnderrunFrames.size() > 0) {
5688 for (const auto &underrun : mUnderrunFrames) {
5689 underrun.first->tallyUnderrunFrames(underrun.second);
5690 maxUnderrunFrames = max(underrun.second, maxUnderrunFrames);
5691 }
5692 }
5693 // send the max underrun frames for this mixer period
5694 mThreadMetrics->logUnderrunFrames(maxUnderrunFrames);
5695 }
5696
5697 // tallyUnderrunFrames() is called to update the track counters
5698 // with the number of underrun frames for a particular mixer period.
5699 // We defer tallying until we know the final mixer status.
5700 void tallyUnderrunFrames(const sp<IAfTrack>& track, size_t underrunFrames) {
5701 mUnderrunFrames.emplace_back(track, underrunFrames);
5702 }
5703
5704 private:
5705 const mixer_state * const mMixerStatus;
5706 ThreadMetrics * const mThreadMetrics;
5707 std::vector<std::pair<sp<IAfTrack>, size_t>> mUnderrunFrames;
5708 } deferredOperations(&mixerStatus, &mThreadMetrics);
5709 // implicit nested scope for variable capture
5710
5711 bool noFastHapticTrack = true;
5712 for (size_t i=0 ; i<count ; i++) {
5713 const sp<IAfTrack> t = mActiveTracks[i];
5714
5715 // this const just means the local variable doesn't change
5716 IAfTrack* const track = t.get();
5717
5718 // process fast tracks
5719 if (track->isFastTrack()) {
5720 LOG_ALWAYS_FATAL_IF(mFastMixer.get() == nullptr,
5721 "%s(%d): FastTrack(%d) present without FastMixer",
5722 __func__, id(), track->id());
5723
5724 if (track->getHapticPlaybackEnabled()) {
5725 noFastHapticTrack = false;
5726 }
5727
5728 // It's theoretically possible (though unlikely) for a fast track to be created
5729 // and then removed within the same normal mix cycle. This is not a problem, as
5730 // the track never becomes active so it's fast mixer slot is never touched.
5731 // The converse, of removing an (active) track and then creating a new track
5732 // at the identical fast mixer slot within the same normal mix cycle,
5733 // is impossible because the slot isn't marked available until the end of each cycle.
5734 int j = track->fastIndex();
5735 ALOG_ASSERT(0 < j && j < (int)FastMixerState::sMaxFastTracks);
5736 ALOG_ASSERT(!(mFastTrackAvailMask & (1 << j)));
5737 FastTrack *fastTrack = &state->mFastTracks[j];
5738
5739 // Determine whether the track is currently in underrun condition,
5740 // and whether it had a recent underrun.
5741 FastTrackDump *ftDump = &mFastMixerDumpState.mTracks[j];
5742 FastTrackUnderruns underruns = ftDump->mUnderruns;
5743 uint32_t recentFull = (underruns.mBitFields.mFull -
5744 track->fastTrackUnderruns().mBitFields.mFull) & UNDERRUN_MASK;
5745 uint32_t recentPartial = (underruns.mBitFields.mPartial -
5746 track->fastTrackUnderruns().mBitFields.mPartial) & UNDERRUN_MASK;
5747 uint32_t recentEmpty = (underruns.mBitFields.mEmpty -
5748 track->fastTrackUnderruns().mBitFields.mEmpty) & UNDERRUN_MASK;
5749 uint32_t recentUnderruns = recentPartial + recentEmpty;
5750 track->fastTrackUnderruns() = underruns;
5751 // don't count underruns that occur while stopping or pausing
5752 // or stopped which can occur when flush() is called while active
5753 size_t underrunFrames = 0;
5754 if (!(track->isStopping() || track->isPausing() || track->isStopped()) &&
5755 recentUnderruns > 0) {
5756 // FIXME fast mixer will pull & mix partial buffers, but we count as a full underrun
5757 underrunFrames = recentUnderruns * mFrameCount;
5758 }
5759 // Immediately account for FastTrack underruns.
5760 track->audioTrackServerProxy()->tallyUnderrunFrames(underrunFrames);
5761
5762 // This is similar to the state machine for normal tracks,
5763 // with a few modifications for fast tracks.
5764 bool isActive = true;
5765 switch (track->state()) {
5766 case IAfTrackBase::STOPPING_1:
5767 // track stays active in STOPPING_1 state until first underrun
5768 if (recentUnderruns > 0 || track->isTerminated()) {
5769 track->setState(IAfTrackBase::STOPPING_2);
5770 }
5771 break;
5772 case IAfTrackBase::PAUSING:
5773 // ramp down is not yet implemented
5774 track->setPaused();
5775 break;
5776 case IAfTrackBase::RESUMING:
5777 // ramp up is not yet implemented
5778 track->setState(IAfTrackBase::ACTIVE);
5779 break;
5780 case IAfTrackBase::ACTIVE:
5781 if (recentFull > 0 || recentPartial > 0) {
5782 // track has provided at least some frames recently: reset retry count
5783 track->retryCount() = kMaxTrackRetries;
5784 }
5785 if (recentUnderruns == 0) {
5786 // no recent underruns: stay active
5787 break;
5788 }
5789 // there has recently been an underrun of some kind
5790 if (track->sharedBuffer() == 0) {
5791 // were any of the recent underruns "empty" (no frames available)?
5792 if (recentEmpty == 0) {
5793 // no, then ignore the partial underruns as they are allowed indefinitely
5794 break;
5795 }
5796 // there has recently been an "empty" underrun: decrement the retry counter
5797 if (--(track->retryCount()) > 0) {
5798 break;
5799 }
5800 // indicate to client process that the track was disabled because of underrun;
5801 // it will then automatically call start() when data is available
5802 track->disable();
5803 // remove from active list, but state remains ACTIVE [confusing but true]
5804 isActive = false;
5805 break;
5806 }
5807 FALLTHROUGH_INTENDED;
5808 case IAfTrackBase::STOPPING_2:
5809 case IAfTrackBase::PAUSED:
5810 case IAfTrackBase::STOPPED:
5811 case IAfTrackBase::FLUSHED: // flush() while active
5812 // Check for presentation complete if track is inactive
5813 // We have consumed all the buffers of this track.
5814 // This would be incomplete if we auto-paused on underrun
5815 {
5816 uint32_t latency = 0;
5817 status_t result = mOutput->stream->getLatency(&latency);
5818 ALOGE_IF(result != OK,
5819 "Error when retrieving output stream latency: %d", result);
5820 size_t audioHALFrames = (latency * mSampleRate) / 1000;
5821 int64_t framesWritten = mBytesWritten / mFrameSize;
5822 if (!(mStandby || track->presentationComplete(framesWritten, audioHALFrames))) {
5823 // track stays in active list until presentation is complete
5824 break;
5825 }
5826 }
5827 if (track->isStopping_2()) {
5828 track->setState(IAfTrackBase::STOPPED);
5829 }
5830 if (track->isStopped()) {
5831 // Can't reset directly, as fast mixer is still polling this track
5832 // track->reset();
5833 // So instead mark this track as needing to be reset after push with ack
5834 resetMask |= 1 << i;
5835 }
5836 isActive = false;
5837 break;
5838 case IAfTrackBase::IDLE:
5839 default:
5840 LOG_ALWAYS_FATAL("unexpected track state %d", (int)track->state());
5841 }
5842
5843 if (isActive) {
5844 // was it previously inactive?
5845 if (!(state->mTrackMask & (1 << j))) {
5846 ExtendedAudioBufferProvider *eabp = track->asExtendedAudioBufferProvider();
5847 VolumeProvider *vp = track->asVolumeProvider();
5848 fastTrack->mBufferProvider = eabp;
5849 fastTrack->mVolumeProvider = vp;
5850 fastTrack->mChannelMask = track->channelMask();
5851 fastTrack->mFormat = track->format();
5852 fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
5853 fastTrack->mHapticScale = track->getHapticScale();
5854 fastTrack->mHapticMaxAmplitude = track->getHapticMaxAmplitude();
5855 fastTrack->mGeneration++;
5856 snprintf(fastTrack->mTraceName, sizeof(fastTrack->mTraceName),
5857 "%s%s", AUDIO_TRACE_PREFIX_AUDIO_TRACK_FRDY,
5858 track->getTraceSuffix().c_str());
5859 state->mTrackMask |= 1 << j;
5860 didModify = true;
5861 // no acknowledgement required for newly active tracks
5862 }
5863 sp<AudioTrackServerProxy> proxy = track->audioTrackServerProxy();
5864 float volume;
5865 if (!audioserver_flags::portid_volume_management()) {
5866 if (track->isPlaybackRestricted() || mStreamTypes[track->streamType()].mute) {
5867 volume = 0.f;
5868 } else {
5869 volume = masterVolume * mStreamTypes[track->streamType()].volume;
5870 }
5871 } else {
5872 if (track->isPlaybackRestricted() || track->getPortMute()) {
5873 volume = 0.f;
5874 } else {
5875 volume = masterVolume * track->getPortVolume();
5876 }
5877 }
5878 handleVoipVolume_l(&volume);
5879
5880 // cache the combined master volume and stream type volume for fast mixer; this
5881 // lacks any synchronization or barrier so VolumeProvider may read a stale value
5882 const float vh = track->getVolumeHandler()->getVolume(
5883 proxy->framesReleased()).first;
5884 volume *= vh;
5885 track->setCachedVolume(volume);
5886 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
5887 float vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
5888 float vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
5889 if (!audioserver_flags::portid_volume_management()) {
5890 track->processMuteEvent_l(mAfThreadCallback->getOrCreateAudioManager(),
5891 /*muteState=*/{masterVolume == 0.f,
5892 mStreamTypes[track->streamType()].volume == 0.f,
5893 mStreamTypes[track->streamType()].mute,
5894 track->isPlaybackRestricted(),
5895 vlf == 0.f && vrf == 0.f,
5896 vh == 0.f,
5897 /*muteFromPortVolume=*/false});
5898 } else {
5899 track->processMuteEvent_l(mAfThreadCallback->getOrCreateAudioManager(),
5900 /*muteState=*/{masterVolume == 0.f,
5901 track->getPortVolume() == 0.f,
5902 /* muteFromStreamMuted= */ false,
5903 track->isPlaybackRestricted(),
5904 vlf == 0.f && vrf == 0.f,
5905 vh == 0.f,
5906 track->getPortMute()});
5907 }
5908 vlf *= volume;
5909 vrf *= volume;
5910
5911 if (track->getInternalMute()) {
5912 vlf = 0.f;
5913 vrf = 0.f;
5914 }
5915
5916 track->setFinalVolume(vlf, vrf);
5917 ++fastTracks;
5918 } else {
5919 // was it previously active?
5920 if (state->mTrackMask & (1 << j)) {
5921 fastTrack->mBufferProvider = NULL;
5922 fastTrack->mGeneration++;
5923 state->mTrackMask &= ~(1 << j);
5924 didModify = true;
5925 // If any fast tracks were removed, we must wait for acknowledgement
5926 // because we're about to decrement the last sp<> on those tracks.
5927 block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
5928 } else {
5929 // ALOGW rather than LOG_ALWAYS_FATAL because it seems there are cases where an
5930 // AudioTrack may start (which may not be with a start() but with a write()
5931 // after underrun) and immediately paused or released. In that case the
5932 // FastTrack state hasn't had time to update.
5933 // TODO Remove the ALOGW when this theory is confirmed.
5934 ALOGW("fast track %d should have been active; "
5935 "mState=%d, mTrackMask=%#x, recentUnderruns=%u, isShared=%d",
5936 j, (int)track->state(), state->mTrackMask, recentUnderruns,
5937 track->sharedBuffer() != 0);
5938 // Since the FastMixer state already has the track inactive, do nothing here.
5939 }
5940 tracksToRemove->add(track);
5941 // Avoids a misleading display in dumpsys
5942 track->fastTrackUnderruns().mBitFields.mMostRecent = UNDERRUN_FULL;
5943 }
5944 if (fastTrack->mHapticPlaybackEnabled != track->getHapticPlaybackEnabled()) {
5945 fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
5946 didModify = true;
5947 }
5948 continue;
5949 }
5950
5951 { // local variable scope to avoid goto warning
5952
5953 audio_track_cblk_t* cblk = track->cblk();
5954
5955 // The first time a track is added we wait
5956 // for all its buffers to be filled before processing it
5957 const int trackId = track->id();
5958
5959 // if an active track doesn't exist in the AudioMixer, create it.
5960 // use the trackId as the AudioMixer name.
5961 if (!mAudioMixer->exists(trackId)) {
5962 status_t status = mAudioMixer->create(
5963 trackId,
5964 track->channelMask(),
5965 track->format(),
5966 track->sessionId());
5967 if (status != OK) {
5968 ALOGW("%s(): AudioMixer cannot create track(%d)"
5969 " mask %#x, format %#x, sessionId %d",
5970 __func__, trackId,
5971 track->channelMask(), track->format(), track->sessionId());
5972 tracksToRemove->add(track);
5973 track->invalidate(); // consider it dead.
5974 continue;
5975 }
5976 }
5977
5978 // make sure that we have enough frames to mix one full buffer.
5979 // enforce this condition only once to enable draining the buffer in case the client
5980 // app does not call stop() and relies on underrun to stop:
5981 // hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed
5982 // during last round
5983 size_t desiredFrames;
5984 const uint32_t sampleRate = track->audioTrackServerProxy()->getSampleRate();
5985 const AudioPlaybackRate playbackRate = track->audioTrackServerProxy()->getPlaybackRate();
5986
5987 desiredFrames = sourceFramesNeededWithTimestretch(
5988 sampleRate, mNormalFrameCount, mSampleRate, playbackRate.mSpeed);
5989 // TODO: ONLY USED FOR LEGACY RESAMPLERS, remove when they are removed.
5990 // add frames already consumed but not yet released by the resampler
5991 // because mAudioTrackServerProxy->framesReady() will include these frames
5992 desiredFrames += mAudioMixer->getUnreleasedFrames(trackId);
5993
5994 uint32_t minFrames = 1;
5995 if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() &&
5996 (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) {
5997 minFrames = desiredFrames;
5998 }
5999
6000 size_t framesReady = track->framesReady();
6001 if (ATRACE_ENABLED()) [[unlikely]] {
6002 ATRACE_INT(std::string(AUDIO_TRACE_PREFIX_AUDIO_TRACK_NRDY)
6003 .append(track->getTraceSuffix()).c_str(), framesReady);
6004 }
6005 if ((framesReady >= minFrames) && track->isReady() &&
6006 !track->isPaused() && !track->isTerminated())
6007 {
6008 ALOGVV("track(%d) s=%08x [OK] on thread %p", trackId, cblk->mServer, this);
6009
6010 mixedTracks++;
6011
6012 // track->mainBuffer() != mSinkBuffer and mMixerBuffer means
6013 // there is an effect chain connected to the track
6014 chain.clear();
6015 if (track->mainBuffer() != mSinkBuffer &&
6016 track->mainBuffer() != mMixerBuffer) {
6017 if (mEffectBufferEnabled) {
6018 mEffectBufferValid = true; // Later can set directly.
6019 }
6020 chain = getEffectChain_l(track->sessionId());
6021 // Delegate volume control to effect in track effect chain if needed
6022 if (chain != 0) {
6023 tracksWithEffect++;
6024 } else {
6025 ALOGW("prepareTracks_l(): track(%d) attached to effect but no chain found on "
6026 "session %d",
6027 trackId, track->sessionId());
6028 }
6029 }
6030
6031
6032 int param = AudioMixer::VOLUME;
6033 if (track->fillingStatus() == IAfTrack::FS_FILLED) {
6034 // no ramp for the first volume setting
6035 track->fillingStatus() = IAfTrack::FS_ACTIVE;
6036 if (track->state() == IAfTrackBase::RESUMING) {
6037 track->setState(IAfTrackBase::ACTIVE);
6038 // If a new track is paused immediately after start, do not ramp on resume.
6039 if (cblk->mServer != 0) {
6040 param = AudioMixer::RAMP_VOLUME;
6041 }
6042 }
6043 mAudioMixer->setParameter(trackId, AudioMixer::RESAMPLE, AudioMixer::RESET, NULL);
6044 mLeftVolFloat = -1.0;
6045 // FIXME should not make a decision based on mServer
6046 } else if (cblk->mServer != 0) {
6047 // If the track is stopped before the first frame was mixed,
6048 // do not apply ramp
6049 param = AudioMixer::RAMP_VOLUME;
6050 }
6051
6052 // compute volume for this track
6053 uint32_t vl, vr; // in U8.24 integer format
6054 float vlf, vrf, vaf; // in [0.0, 1.0] float format
6055 // read original volumes with volume control
6056 // Always fetch volumeshaper volume to ensure state is updated.
6057 const sp<AudioTrackServerProxy> proxy = track->audioTrackServerProxy();
6058 const float vh = track->getVolumeHandler()->getVolume(
6059 track->audioTrackServerProxy()->framesReleased()).first;
6060 float v;
6061 if (!audioserver_flags::portid_volume_management()) {
6062 v = masterVolume * mStreamTypes[track->streamType()].volume;
6063 if (mStreamTypes[track->streamType()].mute || track->isPlaybackRestricted()) {
6064 v = 0;
6065 }
6066 } else {
6067 v = masterVolume * track->getPortVolume();
6068 if (track->isPlaybackRestricted() || track->getPortMute()) {
6069 v = 0;
6070 }
6071 }
6072 handleVoipVolume_l(&v);
6073
6074 if (track->isPausing()) {
6075 vl = vr = 0;
6076 vlf = vrf = vaf = 0.;
6077 track->setPaused();
6078 } else {
6079 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
6080 vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
6081 vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
6082 // track volumes come from shared memory, so can't be trusted and must be clamped
6083 if (vlf > GAIN_FLOAT_UNITY) {
6084 ALOGV("Track left volume out of range: %.3g", vlf);
6085 vlf = GAIN_FLOAT_UNITY;
6086 }
6087 if (vrf > GAIN_FLOAT_UNITY) {
6088 ALOGV("Track right volume out of range: %.3g", vrf);
6089 vrf = GAIN_FLOAT_UNITY;
6090 }
6091 if (!audioserver_flags::portid_volume_management()) {
6092 track->processMuteEvent_l(mAfThreadCallback->getOrCreateAudioManager(),
6093 /*muteState=*/{masterVolume == 0.f,
6094 mStreamTypes[track->streamType()].volume == 0.f,
6095 mStreamTypes[track->streamType()].mute,
6096 track->isPlaybackRestricted(),
6097 vlf == 0.f && vrf == 0.f,
6098 vh == 0.f,
6099 /*muteFromPortVolume=*/false});
6100 } else {
6101 track->processMuteEvent_l(mAfThreadCallback->getOrCreateAudioManager(),
6102 /*muteState=*/{masterVolume == 0.f,
6103 track->getPortVolume() == 0.f,
6104 /* muteFromStreamMuted= */ false,
6105 track->isPlaybackRestricted(),
6106 vlf == 0.f && vrf == 0.f,
6107 vh == 0.f,
6108 track->getPortMute()});
6109 }
6110 // now apply the master volume and stream type volume and shaper volume
6111 vlf *= v * vh;
6112 vrf *= v * vh;
6113 // assuming master volume and stream type volume each go up to 1.0,
6114 // then derive vl and vr as U8.24 versions for the effect chain
6115 const float scaleto8_24 = MAX_GAIN_INT * MAX_GAIN_INT;
6116 vl = (uint32_t) (scaleto8_24 * vlf);
6117 vr = (uint32_t) (scaleto8_24 * vrf);
6118 // vl and vr are now in U8.24 format
6119 uint16_t sendLevel = proxy->getSendLevel_U4_12();
6120 // send level comes from shared memory and so may be corrupt
6121 if (sendLevel > MAX_GAIN_INT) {
6122 ALOGV("Track send level out of range: %04X", sendLevel);
6123 sendLevel = MAX_GAIN_INT;
6124 }
6125 // vaf is represented as [0.0, 1.0] float by rescaling sendLevel
6126 vaf = v * sendLevel * (1. / MAX_GAIN_INT);
6127 }
6128
6129 if (track->getInternalMute()) {
6130 vrf = 0.f;
6131 vlf = 0.f;
6132 }
6133
6134 track->setFinalVolume(vlf, vrf);
6135
6136 // Delegate volume control to effect in track effect chain if needed
6137 if (chain != 0 && chain->setVolume(&vl, &vr)) {
6138 // Do not ramp volume if volume is controlled by effect
6139 param = AudioMixer::VOLUME;
6140 // Update remaining floating point volume levels
6141 vlf = (float)vl / (1 << 24);
6142 vrf = (float)vr / (1 << 24);
6143 track->setHasVolumeController(true);
6144 } else {
6145 // force no volume ramp when volume controller was just disabled or removed
6146 // from effect chain to avoid volume spike
6147 if (track->hasVolumeController()) {
6148 param = AudioMixer::VOLUME;
6149 }
6150 track->setHasVolumeController(false);
6151 }
6152
6153 // XXX: these things DON'T need to be done each time
6154 mAudioMixer->setBufferProvider(trackId, track->asExtendedAudioBufferProvider());
6155 mAudioMixer->enable(trackId);
6156
6157 mAudioMixer->setParameter(trackId, param, AudioMixer::VOLUME0, &vlf);
6158 mAudioMixer->setParameter(trackId, param, AudioMixer::VOLUME1, &vrf);
6159 mAudioMixer->setParameter(trackId, param, AudioMixer::AUXLEVEL, &vaf);
6160 mAudioMixer->setParameter(
6161 trackId,
6162 AudioMixer::TRACK,
6163 AudioMixer::FORMAT, (void *)track->format());
6164 mAudioMixer->setParameter(
6165 trackId,
6166 AudioMixer::TRACK,
6167 AudioMixer::CHANNEL_MASK, (void *)(uintptr_t)track->channelMask());
6168
6169 if (mType == SPATIALIZER && !track->isSpatialized()) {
6170 mAudioMixer->setParameter(
6171 trackId,
6172 AudioMixer::TRACK,
6173 AudioMixer::MIXER_CHANNEL_MASK,
6174 (void *)(uintptr_t)(mChannelMask | mHapticChannelMask));
6175 } else {
6176 mAudioMixer->setParameter(
6177 trackId,
6178 AudioMixer::TRACK,
6179 AudioMixer::MIXER_CHANNEL_MASK,
6180 (void *)(uintptr_t)(mMixerChannelMask | mHapticChannelMask));
6181 }
6182
6183 // limit track sample rate to 2 x output sample rate, which changes at re-configuration
6184 uint32_t maxSampleRate = mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX;
6185 uint32_t reqSampleRate = proxy->getSampleRate();
6186 if (reqSampleRate == 0) {
6187 reqSampleRate = mSampleRate;
6188 } else if (reqSampleRate > maxSampleRate) {
6189 reqSampleRate = maxSampleRate;
6190 }
6191 mAudioMixer->setParameter(
6192 trackId,
6193 AudioMixer::RESAMPLE,
6194 AudioMixer::SAMPLE_RATE,
6195 (void *)(uintptr_t)reqSampleRate);
6196
6197 mAudioMixer->setParameter(
6198 trackId,
6199 AudioMixer::TIMESTRETCH,
6200 AudioMixer::PLAYBACK_RATE,
6201 // cast away constness for this generic API.
6202 const_cast<void *>(reinterpret_cast<const void *>(&playbackRate)));
6203
6204 /*
6205 * Select the appropriate output buffer for the track.
6206 *
6207 * Tracks with effects go into their own effects chain buffer
6208 * and from there into either mEffectBuffer or mSinkBuffer.
6209 *
6210 * Other tracks can use mMixerBuffer for higher precision
6211 * channel accumulation. If this buffer is enabled
6212 * (mMixerBufferEnabled true), then selected tracks will accumulate
6213 * into it.
6214 *
6215 */
6216 if (mMixerBufferEnabled
6217 && (track->mainBuffer() == mSinkBuffer
6218 || track->mainBuffer() == mMixerBuffer)) {
6219 if (mType == SPATIALIZER && !track->isSpatialized()) {
6220 mAudioMixer->setParameter(
6221 trackId,
6222 AudioMixer::TRACK,
6223 AudioMixer::MIXER_FORMAT, (void *)mEffectBufferFormat);
6224 mAudioMixer->setParameter(
6225 trackId,
6226 AudioMixer::TRACK,
6227 AudioMixer::MAIN_BUFFER, (void *)mPostSpatializerBuffer);
6228 } else {
6229 mAudioMixer->setParameter(
6230 trackId,
6231 AudioMixer::TRACK,
6232 AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
6233 mAudioMixer->setParameter(
6234 trackId,
6235 AudioMixer::TRACK,
6236 AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer);
6237 // TODO: override track->mainBuffer()?
6238 mMixerBufferValid = true;
6239 }
6240 } else {
6241 mAudioMixer->setParameter(
6242 trackId,
6243 AudioMixer::TRACK,
6244 AudioMixer::MIXER_FORMAT, (void *)AUDIO_FORMAT_PCM_FLOAT);
6245 mAudioMixer->setParameter(
6246 trackId,
6247 AudioMixer::TRACK,
6248 AudioMixer::MAIN_BUFFER, (void *)track->mainBuffer());
6249 }
6250 mAudioMixer->setParameter(
6251 trackId,
6252 AudioMixer::TRACK,
6253 AudioMixer::AUX_BUFFER, (void *)track->auxBuffer());
6254 mAudioMixer->setParameter(
6255 trackId,
6256 AudioMixer::TRACK,
6257 AudioMixer::HAPTIC_ENABLED, (void *)(uintptr_t)track->getHapticPlaybackEnabled());
6258 const os::HapticScale hapticScale = track->getHapticScale();
6259 mAudioMixer->setParameter(
6260 trackId,
6261 AudioMixer::TRACK,
6262 AudioMixer::HAPTIC_SCALE, (void *)&hapticScale);
6263 const float hapticMaxAmplitude = track->getHapticMaxAmplitude();
6264 mAudioMixer->setParameter(
6265 trackId,
6266 AudioMixer::TRACK,
6267 AudioMixer::HAPTIC_MAX_AMPLITUDE, (void *)&hapticMaxAmplitude);
6268
6269 // reset retry count
6270 track->retryCount() = kMaxTrackRetries;
6271
6272 // If one track is ready, set the mixer ready if:
6273 // - the mixer was not ready during previous round OR
6274 // - no other track is not ready
6275 if (mMixerStatusIgnoringFastTracks != MIXER_TRACKS_READY ||
6276 mixerStatus != MIXER_TRACKS_ENABLED) {
6277 mixerStatus = MIXER_TRACKS_READY;
6278 }
6279
6280 // Enable the next few lines to instrument a test for underrun log handling.
6281 // TODO: Remove when we have a better way of testing the underrun log.
6282 #if 0
6283 static int i;
6284 if ((++i & 0xf) == 0) {
6285 deferredOperations.tallyUnderrunFrames(track, 10 /* underrunFrames */);
6286 }
6287 #endif
6288 } else {
6289 size_t underrunFrames = 0;
6290 if (framesReady < desiredFrames && !track->isStopped() && !track->isPaused()) {
6291 ALOGV("track(%d) underrun, track state %s framesReady(%zu) < framesDesired(%zd)",
6292 trackId, track->getTrackStateAsString(), framesReady, desiredFrames);
6293 underrunFrames = desiredFrames;
6294 }
6295 deferredOperations.tallyUnderrunFrames(track, underrunFrames);
6296
6297 // clear effect chain input buffer if an active track underruns to avoid sending
6298 // previous audio buffer again to effects
6299 chain = getEffectChain_l(track->sessionId());
6300 if (chain != 0) {
6301 chain->clearInputBuffer();
6302 }
6303
6304 ALOGVV("track(%d) s=%08x [NOT READY] on thread %p", trackId, cblk->mServer, this);
6305 if ((track->sharedBuffer() != 0) || track->isTerminated() ||
6306 track->isStopped() || track->isPaused()) {
6307 // We have consumed all the buffers of this track.
6308 // Remove it from the list of active tracks.
6309 // TODO: use actual buffer filling status instead of latency when available from
6310 // audio HAL
6311 size_t audioHALFrames = (latency_l() * mSampleRate) / 1000;
6312 int64_t framesWritten = mBytesWritten / mFrameSize;
6313 if (mStandby || track->presentationComplete(framesWritten, audioHALFrames)) {
6314 if (track->isStopped()) {
6315 track->reset();
6316 }
6317 tracksToRemove->add(track);
6318 }
6319 } else {
6320 // No buffers for this track. Give it a few chances to
6321 // fill a buffer, then remove it from active list.
6322 if (--(track->retryCount()) <= 0) {
6323 ALOGI("%s BUFFER TIMEOUT: remove track(%d) from active list due to underrun"
6324 " on thread %d", __func__, trackId, mId);
6325 tracksToRemove->add(track);
6326 // indicate to client process that the track was disabled because of underrun;
6327 // it will then automatically call start() when data is available
6328 track->disable();
6329 // If one track is not ready, mark the mixer also not ready if:
6330 // - the mixer was ready during previous round OR
6331 // - no other track is ready
6332 } else if (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY ||
6333 mixerStatus != MIXER_TRACKS_READY) {
6334 mixerStatus = MIXER_TRACKS_ENABLED;
6335 }
6336 }
6337 mAudioMixer->disable(trackId);
6338 }
6339
6340 } // local variable scope to avoid goto warning
6341
6342 }
6343
6344 if (mHapticChannelMask != AUDIO_CHANNEL_NONE && sq != NULL) {
6345 // When there is no fast track playing haptic and FastMixer exists,
6346 // enabling the first FastTrack, which provides mixed data from normal
6347 // tracks, to play haptic data.
6348 FastTrack *fastTrack = &state->mFastTracks[0];
6349 if (fastTrack->mHapticPlaybackEnabled != noFastHapticTrack) {
6350 fastTrack->mHapticPlaybackEnabled = noFastHapticTrack;
6351 didModify = true;
6352 }
6353 }
6354
6355 // Push the new FastMixer state if necessary
6356 [[maybe_unused]] bool pauseAudioWatchdog = false;
6357 if (didModify) {
6358 state->mFastTracksGen++;
6359 // if the fast mixer was active, but now there are no fast tracks, then put it in cold idle
6360 if (kUseFastMixer == FastMixer_Dynamic &&
6361 state->mCommand == FastMixerState::MIX_WRITE && state->mTrackMask <= 1) {
6362 state->mCommand = FastMixerState::COLD_IDLE;
6363 state->mColdFutexAddr = &mFastMixerFutex;
6364 state->mColdGen++;
6365 mFastMixerFutex = 0;
6366 if (kUseFastMixer == FastMixer_Dynamic) {
6367 mNormalSink = mOutputSink;
6368 }
6369 // If we go into cold idle, need to wait for acknowledgement
6370 // so that fast mixer stops doing I/O.
6371 block = FastMixerStateQueue::BLOCK_UNTIL_ACKED;
6372 pauseAudioWatchdog = true;
6373 }
6374 }
6375 if (sq != NULL) {
6376 sq->end(didModify);
6377 // No need to block if the FastMixer is in COLD_IDLE as the FastThread
6378 // is not active. (We BLOCK_UNTIL_ACKED when entering COLD_IDLE
6379 // when bringing the output sink into standby.)
6380 //
6381 // We will get the latest FastMixer state when we come out of COLD_IDLE.
6382 //
6383 // This occurs with BT suspend when we idle the FastMixer with
6384 // active tracks, which may be added or removed.
6385 {
6386 audio_utils::mutex::scoped_queue_wait_check queueWaitCheck(mFastMixer->getTid());
6387 sq->push(coldIdle ? FastMixerStateQueue::BLOCK_NEVER : block);
6388 }
6389 }
6390 #ifdef AUDIO_WATCHDOG
6391 if (pauseAudioWatchdog && mAudioWatchdog != 0) {
6392 mAudioWatchdog->pause();
6393 }
6394 #endif
6395
6396 // Now perform the deferred reset on fast tracks that have stopped
6397 while (resetMask != 0) {
6398 size_t i = __builtin_ctz(resetMask);
6399 ALOG_ASSERT(i < count);
6400 resetMask &= ~(1 << i);
6401 sp<IAfTrack> track = mActiveTracks[i];
6402 ALOG_ASSERT(track->isFastTrack() && track->isStopped());
6403 track->reset();
6404 }
6405
6406 // Track destruction may occur outside of threadLoop once it is removed from active tracks.
6407 // Ensure the AudioMixer doesn't have a raw "buffer provider" pointer to the track if
6408 // it ceases to be active, to allow safe removal from the AudioMixer at the start
6409 // of prepareTracks_l(); this releases any outstanding buffer back to the track.
6410 // See also the implementation of destroyTrack_l().
6411 for (const auto &track : *tracksToRemove) {
6412 const int trackId = track->id();
6413 if (mAudioMixer->exists(trackId)) { // Normal tracks here, fast tracks in FastMixer.
6414 mAudioMixer->setBufferProvider(trackId, nullptr /* bufferProvider */);
6415 }
6416 }
6417
6418 // remove all the tracks that need to be...
6419 removeTracks_l(*tracksToRemove);
6420
6421 if (getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX) != 0 ||
6422 getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE) != 0) {
6423 mEffectBufferValid = true;
6424 }
6425
6426 if (mEffectBufferValid) {
6427 // as long as there are effects we should clear the effects buffer, to avoid
6428 // passing a non-clean buffer to the effect chain
6429 memset(mEffectBuffer, 0, mEffectBufferSize);
6430 if (mType == SPATIALIZER) {
6431 memset(mPostSpatializerBuffer, 0, mPostSpatializerBufferSize);
6432 }
6433 }
6434 // sink or mix buffer must be cleared if all tracks are connected to an
6435 // effect chain as in this case the mixer will not write to the sink or mix buffer
6436 // and track effects will accumulate into it
6437 // always clear sink buffer for spatializer output as the output of the spatializer
6438 // effect will be accumulated into it
6439 if ((mBytesRemaining == 0) && (((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
6440 (mixedTracks == 0 && fastTracks > 0)) || (mType == SPATIALIZER))) {
6441 // FIXME as a performance optimization, should remember previous zero status
6442 if (mMixerBufferValid) {
6443 memset(mMixerBuffer, 0, mMixerBufferSize);
6444 // TODO: In testing, mSinkBuffer below need not be cleared because
6445 // the PlaybackThread::threadLoop() copies mMixerBuffer into mSinkBuffer
6446 // after mixing.
6447 //
6448 // To enforce this guarantee:
6449 // ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
6450 // (mixedTracks == 0 && fastTracks > 0))
6451 // must imply MIXER_TRACKS_READY.
6452 // Later, we may clear buffers regardless, and skip much of this logic.
6453 }
6454 // FIXME as a performance optimization, should remember previous zero status
6455 memset(mSinkBuffer, 0, mNormalFrameCount * mFrameSize);
6456 }
6457
6458 // if any fast tracks, then status is ready
6459 mMixerStatusIgnoringFastTracks = mixerStatus;
6460 if (fastTracks > 0) {
6461 mixerStatus = MIXER_TRACKS_READY;
6462 }
6463 return mixerStatus;
6464 }
6465
6466 // trackCountForUid_l() must be called with ThreadBase::mutex() held
trackCountForUid_l(uid_t uid) const6467 uint32_t PlaybackThread::trackCountForUid_l(uid_t uid) const
6468 {
6469 uint32_t trackCount = 0;
6470 for (size_t i = 0; i < mTracks.size() ; i++) {
6471 if (mTracks[i]->uid() == uid) {
6472 trackCount++;
6473 }
6474 }
6475 return trackCount;
6476 }
6477
check(AudioStreamOut * output)6478 bool PlaybackThread::IsTimestampAdvancing::check(AudioStreamOut* output)
6479 {
6480 // Check the timestamp to see if it's advancing once every 150ms. If we check too frequently, we
6481 // could falsely detect that the frame position has stalled due to underrun because we haven't
6482 // given the Audio HAL enough time to update.
6483 const nsecs_t nowNs = systemTime();
6484 if (nowNs - mPreviousNs < mMinimumTimeBetweenChecksNs) {
6485 return mLatchedValue;
6486 }
6487 mPreviousNs = nowNs;
6488 mLatchedValue = false;
6489 // Determine if the presentation position is still advancing.
6490 uint64_t position = 0;
6491 struct timespec unused;
6492 const status_t ret = output->getPresentationPosition(&position, &unused);
6493 if (ret == NO_ERROR) {
6494 if (position != mPreviousPosition) {
6495 mPreviousPosition = position;
6496 mLatchedValue = true;
6497 }
6498 }
6499 return mLatchedValue;
6500 }
6501
clear()6502 void PlaybackThread::IsTimestampAdvancing::clear()
6503 {
6504 mLatchedValue = true;
6505 mPreviousPosition = 0;
6506 mPreviousNs = 0;
6507 }
6508
6509 // isTrackAllowed_l() must be called with ThreadBase::mutex() held
isTrackAllowed_l(audio_channel_mask_t channelMask,audio_format_t format,audio_session_t sessionId,uid_t uid) const6510 bool MixerThread::isTrackAllowed_l(
6511 audio_channel_mask_t channelMask, audio_format_t format,
6512 audio_session_t sessionId, uid_t uid) const
6513 {
6514 if (!PlaybackThread::isTrackAllowed_l(channelMask, format, sessionId, uid)) {
6515 return false;
6516 }
6517 // Check validity as we don't call AudioMixer::create() here.
6518 if (!mAudioMixer->isValidFormat(format)) {
6519 ALOGW("%s: invalid format: %#x", __func__, format);
6520 return false;
6521 }
6522 if (!mAudioMixer->isValidChannelMask(channelMask)) {
6523 ALOGW("%s: invalid channelMask: %#x", __func__, channelMask);
6524 return false;
6525 }
6526 return true;
6527 }
6528
6529 // checkForNewParameter_l() must be called with ThreadBase::mutex() held
checkForNewParameter_l(const String8 & keyValuePair,status_t & status)6530 bool MixerThread::checkForNewParameter_l(const String8& keyValuePair,
6531 status_t& status)
6532 {
6533 bool reconfig = false;
6534 status = NO_ERROR;
6535
6536 AutoPark<FastMixer> park(mFastMixer);
6537
6538 AudioParameter param = AudioParameter(keyValuePair);
6539 int value;
6540 if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
6541 reconfig = true;
6542 }
6543 if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
6544 if (!isValidPcmSinkFormat(static_cast<audio_format_t>(value))) {
6545 status = BAD_VALUE;
6546 } else {
6547 // no need to save value, since it's constant
6548 reconfig = true;
6549 }
6550 }
6551 if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
6552 if (!isValidPcmSinkChannelMask(static_cast<audio_channel_mask_t>(value))) {
6553 status = BAD_VALUE;
6554 } else {
6555 // no need to save value, since it's constant
6556 reconfig = true;
6557 }
6558 }
6559 if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
6560 // do not accept frame count changes if tracks are open as the track buffer
6561 // size depends on frame count and correct behavior would not be guaranteed
6562 // if frame count is changed after track creation
6563 if (!mTracks.isEmpty()) {
6564 status = INVALID_OPERATION;
6565 } else {
6566 reconfig = true;
6567 }
6568 }
6569 if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
6570 LOG_FATAL("Should not set routing device in MixerThread");
6571 }
6572
6573 if (status == NO_ERROR) {
6574 status = mOutput->stream->setParameters(keyValuePair);
6575 if (!mStandby && status == INVALID_OPERATION) {
6576 ALOGW("%s: setParameters failed with keyValuePair %s, entering standby",
6577 __func__, keyValuePair.c_str());
6578 mOutput->standby();
6579 mThreadMetrics.logEndInterval();
6580 mThreadSnapshot.onEnd();
6581 setStandby_l();
6582 mBytesWritten = 0;
6583 status = mOutput->stream->setParameters(keyValuePair);
6584 }
6585 if (status == NO_ERROR && reconfig) {
6586 readOutputParameters_l();
6587 delete mAudioMixer;
6588 mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
6589 for (const auto &track : mTracks) {
6590 const int trackId = track->id();
6591 const status_t createStatus = mAudioMixer->create(
6592 trackId,
6593 track->channelMask(),
6594 track->format(),
6595 track->sessionId());
6596 ALOGW_IF(createStatus != NO_ERROR,
6597 "%s(): AudioMixer cannot create track(%d)"
6598 " mask %#x, format %#x, sessionId %d",
6599 __func__,
6600 trackId, track->channelMask(), track->format(), track->sessionId());
6601 }
6602 sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
6603 }
6604 }
6605
6606 return reconfig;
6607 }
6608
6609
dumpInternals_l(int fd,const Vector<String16> & args)6610 void MixerThread::dumpInternals_l(int fd, const Vector<String16>& args)
6611 {
6612 PlaybackThread::dumpInternals_l(fd, args);
6613 dprintf(fd, " Thread throttle time (msecs): %u\n", (uint32_t)mThreadThrottleTimeMs);
6614 dprintf(fd, " AudioMixer tracks: %s\n", mAudioMixer->trackNames().c_str());
6615 dprintf(fd, " Master mono: %s\n", mMasterMono ? "on" : "off");
6616 dprintf(fd, " Master balance: %f (%s)\n", mMasterBalance.load(),
6617 (hasFastMixer() ? std::to_string(mFastMixer->getMasterBalance())
6618 : mBalance.toString()).c_str());
6619 if (hasFastMixer()) {
6620 dprintf(fd, " FastMixer thread %p tid=%d", mFastMixer.get(), mFastMixer->getTid());
6621
6622 // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
6623 // while we are dumping it. It may be inconsistent, but it won't mutate!
6624 // This is a large object so we place it on the heap.
6625 // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
6626 const std::unique_ptr<FastMixerDumpState> copy =
6627 std::make_unique<FastMixerDumpState>(mFastMixerDumpState);
6628 copy->dump(fd);
6629
6630 #ifdef STATE_QUEUE_DUMP
6631 // Similar for state queue
6632 StateQueueObserverDump observerCopy = mStateQueueObserverDump;
6633 observerCopy.dump(fd);
6634 StateQueueMutatorDump mutatorCopy = mStateQueueMutatorDump;
6635 mutatorCopy.dump(fd);
6636 #endif
6637
6638 #ifdef AUDIO_WATCHDOG
6639 if (mAudioWatchdog != 0) {
6640 // Make a non-atomic copy of audio watchdog dump so it won't change underneath us
6641 AudioWatchdogDump wdCopy = mAudioWatchdogDump;
6642 wdCopy.dump(fd);
6643 }
6644 #endif
6645
6646 } else {
6647 dprintf(fd, " No FastMixer\n");
6648 }
6649
6650 dprintf(fd, "Bluetooth latency modes are %senabled\n",
6651 mBluetoothLatencyModesEnabled ? "" : "not ");
6652 dprintf(fd, "HAL does %ssupport Bluetooth latency modes\n", mOutput != nullptr &&
6653 mOutput->audioHwDev->supportsBluetoothVariableLatency() ? "" : "not ");
6654 dprintf(fd, "Supported latency modes: %s\n", toString(mSupportedLatencyModes).c_str());
6655 }
6656
idleSleepTimeUs() const6657 uint32_t MixerThread::idleSleepTimeUs() const
6658 {
6659 return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000) / 2;
6660 }
6661
suspendSleepTimeUs() const6662 uint32_t MixerThread::suspendSleepTimeUs() const
6663 {
6664 return (uint32_t)(((mNormalFrameCount * 1000) / mSampleRate) * 1000);
6665 }
6666
cacheParameters_l()6667 void MixerThread::cacheParameters_l()
6668 {
6669 PlaybackThread::cacheParameters_l();
6670
6671 // FIXME: Relaxed timing because of a certain device that can't meet latency
6672 // Should be reduced to 2x after the vendor fixes the driver issue
6673 // increase threshold again due to low power audio mode. The way this warning
6674 // threshold is calculated and its usefulness should be reconsidered anyway.
6675 maxPeriod = seconds(mNormalFrameCount) / mSampleRate * 15;
6676 }
6677
onHalLatencyModesChanged_l()6678 void MixerThread::onHalLatencyModesChanged_l() {
6679 mAfThreadCallback->onSupportedLatencyModesChanged(mId, mSupportedLatencyModes);
6680 }
6681
setHalLatencyMode_l()6682 void MixerThread::setHalLatencyMode_l() {
6683 // Only handle latency mode if:
6684 // - mBluetoothLatencyModesEnabled is true
6685 // - the HAL supports latency modes
6686 // - the selected device is Bluetooth LE or A2DP
6687 if (!mBluetoothLatencyModesEnabled.load() || mSupportedLatencyModes.empty()) {
6688 return;
6689 }
6690 if (mOutDeviceTypeAddrs.size() != 1
6691 || !(audio_is_a2dp_out_device(mOutDeviceTypeAddrs[0].mType)
6692 || audio_is_ble_out_device(mOutDeviceTypeAddrs[0].mType))) {
6693 return;
6694 }
6695
6696 audio_latency_mode_t latencyMode = AUDIO_LATENCY_MODE_FREE;
6697 if (mSupportedLatencyModes.size() == 1) {
6698 // If the HAL only support one latency mode currently, confirm the choice
6699 latencyMode = mSupportedLatencyModes[0];
6700 } else if (mSupportedLatencyModes.size() > 1) {
6701 // Request low latency if:
6702 // - At least one active track is either:
6703 // - a fast track with gaming usage or
6704 // - a track with acessibility usage
6705 for (const auto& track : mActiveTracks) {
6706 if ((track->isFastTrack() && track->attributes().usage == AUDIO_USAGE_GAME)
6707 || track->attributes().usage == AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY) {
6708 latencyMode = AUDIO_LATENCY_MODE_LOW;
6709 break;
6710 }
6711 }
6712 }
6713
6714 if (latencyMode != mSetLatencyMode) {
6715 status_t status = mOutput->stream->setLatencyMode(latencyMode);
6716 ALOGD("%s: thread(%d) setLatencyMode(%s) returned %d",
6717 __func__, mId, toString(latencyMode).c_str(), status);
6718 if (status == NO_ERROR) {
6719 mSetLatencyMode = latencyMode;
6720 }
6721 }
6722 }
6723
updateHalSupportedLatencyModes_l()6724 void MixerThread::updateHalSupportedLatencyModes_l() {
6725
6726 if (mOutput == nullptr || mOutput->stream == nullptr) {
6727 return;
6728 }
6729 std::vector<audio_latency_mode_t> latencyModes;
6730 const status_t status = mOutput->stream->getRecommendedLatencyModes(&latencyModes);
6731 if (status != NO_ERROR) {
6732 latencyModes.clear();
6733 }
6734 if (latencyModes != mSupportedLatencyModes) {
6735 ALOGD("%s: thread(%d) status %d supported latency modes: %s",
6736 __func__, mId, status, toString(latencyModes).c_str());
6737 mSupportedLatencyModes.swap(latencyModes);
6738 sendHalLatencyModesChangedEvent_l();
6739 }
6740 }
6741
getSupportedLatencyModes(std::vector<audio_latency_mode_t> * modes)6742 status_t MixerThread::getSupportedLatencyModes(
6743 std::vector<audio_latency_mode_t>* modes) {
6744 if (modes == nullptr) {
6745 return BAD_VALUE;
6746 }
6747 audio_utils::lock_guard _l(mutex());
6748 *modes = mSupportedLatencyModes;
6749 return NO_ERROR;
6750 }
6751
onRecommendedLatencyModeChanged(std::vector<audio_latency_mode_t> modes)6752 void MixerThread::onRecommendedLatencyModeChanged(
6753 std::vector<audio_latency_mode_t> modes) {
6754 audio_utils::lock_guard _l(mutex());
6755 if (modes != mSupportedLatencyModes) {
6756 ALOGD("%s: thread(%d) supported latency modes: %s",
6757 __func__, mId, toString(modes).c_str());
6758 mSupportedLatencyModes.swap(modes);
6759 sendHalLatencyModesChangedEvent_l();
6760 }
6761 }
6762
setBluetoothVariableLatencyEnabled(bool enabled)6763 status_t MixerThread::setBluetoothVariableLatencyEnabled(bool enabled) {
6764 if (mOutput == nullptr || mOutput->audioHwDev == nullptr
6765 || !mOutput->audioHwDev->supportsBluetoothVariableLatency()) {
6766 return INVALID_OPERATION;
6767 }
6768 mBluetoothLatencyModesEnabled.store(enabled);
6769 return NO_ERROR;
6770 }
6771
6772 // ----------------------------------------------------------------------------
6773
6774 /* static */
createDirectOutputThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,const audio_offload_info_t & offloadInfo)6775 sp<IAfPlaybackThread> IAfPlaybackThread::createDirectOutputThread(
6776 const sp<IAfThreadCallback>& afThreadCallback,
6777 AudioStreamOut* output, audio_io_handle_t id, bool systemReady,
6778 const audio_offload_info_t& offloadInfo) {
6779 return sp<DirectOutputThread>::make(
6780 afThreadCallback, output, id, systemReady, offloadInfo);
6781 }
6782
DirectOutputThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,ThreadBase::type_t type,bool systemReady,const audio_offload_info_t & offloadInfo)6783 DirectOutputThread::DirectOutputThread(const sp<IAfThreadCallback>& afThreadCallback,
6784 AudioStreamOut* output, audio_io_handle_t id, ThreadBase::type_t type, bool systemReady,
6785 const audio_offload_info_t& offloadInfo)
6786 : PlaybackThread(afThreadCallback, output, id, type, systemReady)
6787 , mOffloadInfo(offloadInfo)
6788 {
6789 setMasterBalance(afThreadCallback->getMasterBalance_l());
6790 }
6791
~DirectOutputThread()6792 DirectOutputThread::~DirectOutputThread()
6793 {
6794 }
6795
dumpInternals_l(int fd,const Vector<String16> & args)6796 void DirectOutputThread::dumpInternals_l(int fd, const Vector<String16>& args)
6797 {
6798 PlaybackThread::dumpInternals_l(fd, args);
6799 dprintf(fd, " Master balance: %f Left: %f Right: %f\n",
6800 mMasterBalance.load(), mMasterBalanceLeft, mMasterBalanceRight);
6801 }
6802
setMasterBalance(float balance)6803 void DirectOutputThread::setMasterBalance(float balance)
6804 {
6805 audio_utils::lock_guard _l(mutex());
6806 if (mMasterBalance != balance) {
6807 mMasterBalance.store(balance);
6808 mBalance.computeStereoBalance(balance, &mMasterBalanceLeft, &mMasterBalanceRight);
6809 broadcast_l();
6810 }
6811 }
6812
processVolume_l(IAfTrack * track,bool lastTrack)6813 void DirectOutputThread::processVolume_l(IAfTrack* track, bool lastTrack)
6814 {
6815 float left, right;
6816
6817 // Ensure volumeshaper state always advances even when muted.
6818 const sp<AudioTrackServerProxy> proxy = track->audioTrackServerProxy();
6819
6820 const int64_t frames = mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
6821 const int64_t time = mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL];
6822
6823 ALOGVV("%s: Direct/Offload bufferConsumed:%zu timestamp frames:%lld time:%lld",
6824 __func__, proxy->framesReleased(), (long long)frames, (long long)time);
6825
6826 const int64_t volumeShaperFrames =
6827 mMonotonicFrameCounter.updateAndGetMonotonicFrameCount(frames, time);
6828 const auto [shaperVolume, shaperActive] =
6829 track->getVolumeHandler()->getVolume(volumeShaperFrames);
6830 mVolumeShaperActive = shaperActive;
6831
6832 gain_minifloat_packed_t vlr = proxy->getVolumeLR();
6833 left = float_from_gain(gain_minifloat_unpack_left(vlr));
6834 right = float_from_gain(gain_minifloat_unpack_right(vlr));
6835
6836 const bool clientVolumeMute = (left == 0.f && right == 0.f);
6837
6838 if (!audioserver_flags::portid_volume_management()) {
6839 if (mMasterMute || mStreamTypes[track->streamType()].mute ||
6840 track->isPlaybackRestricted()) {
6841 left = right = 0;
6842 } else {
6843 float typeVolume = mStreamTypes[track->streamType()].volume;
6844 const float v = mMasterVolume * typeVolume * shaperVolume;
6845
6846 if (left > GAIN_FLOAT_UNITY) {
6847 left = GAIN_FLOAT_UNITY;
6848 }
6849 if (right > GAIN_FLOAT_UNITY) {
6850 right = GAIN_FLOAT_UNITY;
6851 }
6852 left *= v;
6853 right *= v;
6854 if (mAfThreadCallback->getMode() != AUDIO_MODE_IN_COMMUNICATION
6855 || audio_channel_count_from_out_mask(mChannelMask) > 1) {
6856 left *= mMasterBalanceLeft; // DirectOutputThread balance applied as track volume
6857 right *= mMasterBalanceRight;
6858 }
6859 }
6860 track->processMuteEvent_l(mAfThreadCallback->getOrCreateAudioManager(),
6861 /*muteState=*/{mMasterMute,
6862 mStreamTypes[track->streamType()].volume == 0.f,
6863 mStreamTypes[track->streamType()].mute,
6864 track->isPlaybackRestricted(),
6865 clientVolumeMute,
6866 shaperVolume == 0.f,
6867 /*muteFromPortVolume=*/false});
6868 } else {
6869 if (mMasterMute || track->isPlaybackRestricted()) {
6870 left = right = 0;
6871 } else {
6872 float typeVolume = track->getPortVolume();
6873 const float v = mMasterVolume * typeVolume * shaperVolume;
6874
6875 if (left > GAIN_FLOAT_UNITY) {
6876 left = GAIN_FLOAT_UNITY;
6877 }
6878 if (right > GAIN_FLOAT_UNITY) {
6879 right = GAIN_FLOAT_UNITY;
6880 }
6881 left *= v;
6882 right *= v;
6883 if (mAfThreadCallback->getMode() != AUDIO_MODE_IN_COMMUNICATION
6884 || audio_channel_count_from_out_mask(mChannelMask) > 1) {
6885 left *= mMasterBalanceLeft; // DirectOutputThread balance applied as track volume
6886 right *= mMasterBalanceRight;
6887 }
6888 }
6889 track->processMuteEvent_l(mAfThreadCallback->getOrCreateAudioManager(),
6890 /*muteState=*/{mMasterMute,
6891 track->getPortVolume() == 0.f,
6892 /* muteFromStreamMuted= */ false,
6893 track->isPlaybackRestricted(),
6894 clientVolumeMute,
6895 shaperVolume == 0.f,
6896 track->getPortMute()});
6897 }
6898
6899 if (lastTrack) {
6900 track->setFinalVolume(left, right);
6901 if (left != mLeftVolFloat || right != mRightVolFloat) {
6902 mLeftVolFloat = left;
6903 mRightVolFloat = right;
6904
6905 // Delegate volume control to effect in track effect chain if needed
6906 // only one effect chain can be present on DirectOutputThread, so if
6907 // there is one, the track is connected to it
6908 if (!mEffectChains.isEmpty()) {
6909 // if effect chain exists, volume is handled by it.
6910 // Convert volumes from float to 8.24
6911 uint32_t vl = (uint32_t)(left * (1 << 24));
6912 uint32_t vr = (uint32_t)(right * (1 << 24));
6913 // Direct/Offload effect chains set output volume in setVolume().
6914 (void)mEffectChains[0]->setVolume(&vl, &vr);
6915 } else {
6916 // otherwise we directly set the volume.
6917 setVolumeForOutput_l(left, right);
6918 }
6919 }
6920 }
6921 }
6922
onAddNewTrack_l()6923 void DirectOutputThread::onAddNewTrack_l()
6924 {
6925 sp<IAfTrack> previousTrack = mPreviousTrack.promote();
6926 sp<IAfTrack> latestTrack = mActiveTracks.getLatest();
6927
6928 if (previousTrack != 0 && latestTrack != 0) {
6929 if (mType == DIRECT) {
6930 if (previousTrack.get() != latestTrack.get()) {
6931 mFlushPending = true;
6932 }
6933 } else /* mType == OFFLOAD */ {
6934 if (previousTrack->sessionId() != latestTrack->sessionId() ||
6935 previousTrack->isFlushPending()) {
6936 mFlushPending = true;
6937 }
6938 }
6939 } else if (previousTrack == 0) {
6940 // there could be an old track added back during track transition for direct
6941 // output, so always issues flush to flush data of the previous track if it
6942 // was already destroyed with HAL paused, then flush can resume the playback
6943 mFlushPending = true;
6944 }
6945 PlaybackThread::onAddNewTrack_l();
6946 }
6947
prepareTracks_l(Vector<sp<IAfTrack>> * tracksToRemove)6948 PlaybackThread::mixer_state DirectOutputThread::prepareTracks_l(
6949 Vector<sp<IAfTrack>>* tracksToRemove
6950 )
6951 {
6952 size_t count = mActiveTracks.size();
6953 mixer_state mixerStatus = MIXER_IDLE;
6954 bool doHwPause = false;
6955 bool doHwResume = false;
6956
6957 // find out which tracks need to be processed
6958 for (const sp<IAfTrack>& t : mActiveTracks) {
6959 if (t->isInvalid()) {
6960 ALOGW("An invalidated track shouldn't be in active list");
6961 tracksToRemove->add(t);
6962 continue;
6963 }
6964
6965 IAfTrack* const track = t.get();
6966 #ifdef VERY_VERY_VERBOSE_LOGGING
6967 audio_track_cblk_t* cblk = track->cblk();
6968 #endif
6969 // Only consider last track started for volume and mixer state control.
6970 // In theory an older track could underrun and restart after the new one starts
6971 // but as we only care about the transition phase between two tracks on a
6972 // direct output, it is not a problem to ignore the underrun case.
6973 sp<IAfTrack> l = mActiveTracks.getLatest();
6974 bool last = l.get() == track;
6975
6976 if (track->isPausePending()) {
6977 track->pauseAck();
6978 // It is possible a track might have been flushed or stopped.
6979 // Other operations such as flush pending might occur on the next prepare.
6980 if (track->isPausing()) {
6981 track->setPaused();
6982 }
6983 // Always perform pause, as an immediate flush will change
6984 // the pause state to be no longer isPausing().
6985 if (mHwSupportsPause && last && !mHwPaused) {
6986 doHwPause = true;
6987 mHwPaused = true;
6988 }
6989 } else if (track->isFlushPending()) {
6990 track->flushAck();
6991 if (last) {
6992 mFlushPending = true;
6993 }
6994 } else if (track->isResumePending()) {
6995 track->resumeAck();
6996 if (last) {
6997 mLeftVolFloat = mRightVolFloat = -1.0;
6998 if (mHwPaused) {
6999 doHwResume = true;
7000 mHwPaused = false;
7001 }
7002 }
7003 }
7004
7005 // The first time a track is added we wait
7006 // for all its buffers to be filled before processing it.
7007 // Allow draining the buffer in case the client
7008 // app does not call stop() and relies on underrun to stop:
7009 // hence the test on (track->retryCount() > 1).
7010 // If track->retryCount() <= 1 then track is about to be disabled, paused, removed,
7011 // so we accept any nonzero amount of data delivered by the AudioTrack (which will
7012 // reset the retry counter).
7013 // Do not use a high threshold for compressed audio.
7014
7015 // target retry count that we will use is based on the time we wait for retries.
7016 const int32_t targetRetryCount = kMaxTrackRetriesDirectMs * 1000 / mActiveSleepTimeUs;
7017 // the retry threshold is when we accept any size for PCM data. This is slightly
7018 // smaller than the retry count so we can push small bits of data without a glitch.
7019 const int32_t retryThreshold = targetRetryCount > 2 ? targetRetryCount - 1 : 1;
7020 uint32_t minFrames;
7021 if ((track->sharedBuffer() == 0) && !track->isStopping_1() && !track->isPausing()
7022 && (track->retryCount() > retryThreshold) && audio_has_proportional_frames(mFormat)) {
7023 minFrames = mNormalFrameCount;
7024 } else {
7025 minFrames = 1;
7026 }
7027
7028 const size_t framesReady = track->framesReady();
7029 const int trackId = track->id();
7030 if (ATRACE_ENABLED()) [[unlikely]] {
7031 ATRACE_INT(std::string(AUDIO_TRACE_PREFIX_AUDIO_TRACK_NRDY)
7032 .append(track->getTraceSuffix()).c_str(), framesReady);
7033 }
7034 if ((framesReady >= minFrames) && track->isReady() && !track->isPaused() &&
7035 !track->isStopping_2() && !track->isStopped())
7036 {
7037 ALOGVV("track(%d) s=%08x [OK]", trackId, cblk->mServer);
7038
7039 if (track->fillingStatus() == IAfTrack::FS_FILLED) {
7040 track->fillingStatus() = IAfTrack::FS_ACTIVE;
7041 if (last) {
7042 // make sure processVolume_l() will apply new volume even if 0
7043 mLeftVolFloat = mRightVolFloat = -1.0;
7044 }
7045 if (!mHwSupportsPause) {
7046 track->resumeAck();
7047 }
7048 }
7049
7050 // compute volume for this track
7051 processVolume_l(track, last);
7052 if (last) {
7053 sp<IAfTrack> previousTrack = mPreviousTrack.promote();
7054 if (previousTrack != 0) {
7055 if (track != previousTrack.get()) {
7056 // Flush any data still being written from last track
7057 mBytesRemaining = 0;
7058 // Invalidate previous track to force a seek when resuming.
7059 previousTrack->invalidate();
7060 }
7061 }
7062 mPreviousTrack = track;
7063
7064 // reset retry count
7065 track->retryCount() = targetRetryCount;
7066 mActiveTrack = t;
7067 mixerStatus = MIXER_TRACKS_READY;
7068 if (mHwPaused) {
7069 doHwResume = true;
7070 mHwPaused = false;
7071 }
7072 }
7073 } else {
7074 // clear effect chain input buffer if the last active track started underruns
7075 // to avoid sending previous audio buffer again to effects
7076 if (!mEffectChains.isEmpty() && last) {
7077 mEffectChains[0]->clearInputBuffer();
7078 }
7079 if (track->isStopping_1()) {
7080 track->setState(IAfTrackBase::STOPPING_2);
7081 if (last && mHwPaused) {
7082 doHwResume = true;
7083 mHwPaused = false;
7084 }
7085 }
7086 if ((track->sharedBuffer() != 0) || track->isStopped() ||
7087 track->isStopping_2() || track->isPaused()) {
7088 // We have consumed all the buffers of this track.
7089 // Remove it from the list of active tracks.
7090 bool presComplete = false;
7091 if (mStandby || !last ||
7092 (presComplete = track->presentationComplete(latency_l())) ||
7093 track->isPaused() || mHwPaused) {
7094 if (presComplete) {
7095 mOutput->presentationComplete();
7096 }
7097 if (track->isStopping_2()) {
7098 track->setState(IAfTrackBase::STOPPED);
7099 }
7100 if (track->isStopped()) {
7101 track->reset();
7102 }
7103 tracksToRemove->add(track);
7104 }
7105 } else {
7106 // No buffers for this track. Give it a few chances to
7107 // fill a buffer, then remove it from active list.
7108 // Only consider last track started for mixer state control
7109 bool isTimestampAdvancing = mIsTimestampAdvancing.check(mOutput);
7110 if (!isTunerStream() // tuner streams remain active in underrun
7111 && --(track->retryCount()) <= 0) {
7112 if (isTimestampAdvancing) { // HAL is still playing audio, give us more time.
7113 track->retryCount() = kMaxTrackRetriesOffload;
7114 } else {
7115 ALOGI("%s BUFFER TIMEOUT: remove track(%d) from active list due to"
7116 " underrun on thread %d", __func__, trackId, mId);
7117 tracksToRemove->add(track);
7118 // indicate to client process that the track was disabled because of
7119 // underrun; it will then automatically call start() when data is available
7120 track->disable();
7121 // only do hw pause when track is going to be removed due to BUFFER TIMEOUT.
7122 // unlike mixerthread, HAL can be paused for direct output
7123 ALOGW("pause because of UNDERRUN, framesReady = %zu,"
7124 "minFrames = %u, mFormat = %#x",
7125 framesReady, minFrames, mFormat);
7126 if (last && mHwSupportsPause && !mHwPaused && !mStandby) {
7127 doHwPause = true;
7128 mHwPaused = true;
7129 }
7130 }
7131 } else if (last) {
7132 mixerStatus = MIXER_TRACKS_ENABLED;
7133 }
7134 }
7135 }
7136 }
7137
7138 // if an active track did not command a flush, check for pending flush on stopped tracks
7139 if (!mFlushPending) {
7140 for (size_t i = 0; i < mTracks.size(); i++) {
7141 if (mTracks[i]->isFlushPending()) {
7142 mTracks[i]->flushAck();
7143 mFlushPending = true;
7144 }
7145 }
7146 }
7147
7148 // make sure the pause/flush/resume sequence is executed in the right order.
7149 // If a flush is pending and a track is active but the HW is not paused, force a HW pause
7150 // before flush and then resume HW. This can happen in case of pause/flush/resume
7151 // if resume is received before pause is executed.
7152 if (mHwSupportsPause && !mStandby &&
7153 (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
7154 status_t result = mOutput->stream->pause();
7155 ALOGE_IF(result != OK, "Error when pausing output stream: %d", result);
7156 doHwResume = !doHwPause; // resume if pause is due to flush.
7157 }
7158 if (mFlushPending) {
7159 flushHw_l();
7160 }
7161 if (mHwSupportsPause && !mStandby && doHwResume) {
7162 status_t result = mOutput->stream->resume();
7163 ALOGE_IF(result != OK, "Error when resuming output stream: %d", result);
7164 }
7165 // remove all the tracks that need to be...
7166 removeTracks_l(*tracksToRemove);
7167
7168 return mixerStatus;
7169 }
7170
threadLoop_mix()7171 void DirectOutputThread::threadLoop_mix()
7172 {
7173 size_t frameCount = mFrameCount;
7174 int8_t *curBuf = (int8_t *)mSinkBuffer;
7175 // output audio to hardware
7176 while (frameCount) {
7177 AudioBufferProvider::Buffer buffer;
7178 buffer.frameCount = frameCount;
7179 status_t status = mActiveTrack->getNextBuffer(&buffer);
7180 if (status != NO_ERROR || buffer.raw == NULL) {
7181 // no need to pad with 0 for compressed audio
7182 if (audio_has_proportional_frames(mFormat)) {
7183 memset(curBuf, 0, frameCount * mFrameSize);
7184 }
7185 break;
7186 }
7187 memcpy(curBuf, buffer.raw, buffer.frameCount * mFrameSize);
7188 frameCount -= buffer.frameCount;
7189 curBuf += buffer.frameCount * mFrameSize;
7190 mActiveTrack->releaseBuffer(&buffer);
7191 }
7192 mCurrentWriteLength = curBuf - (int8_t *)mSinkBuffer;
7193 mSleepTimeUs = 0;
7194 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
7195 mActiveTrack.clear();
7196 }
7197
threadLoop_sleepTime()7198 void DirectOutputThread::threadLoop_sleepTime()
7199 {
7200 // do not write to HAL when paused
7201 if (mHwPaused || (usesHwAvSync() && mStandby)) {
7202 mSleepTimeUs = mIdleSleepTimeUs;
7203 return;
7204 }
7205 if (mMixerStatus == MIXER_TRACKS_ENABLED) {
7206 mSleepTimeUs = mActiveSleepTimeUs;
7207 } else {
7208 mSleepTimeUs = mIdleSleepTimeUs;
7209 }
7210 // Note: In S or later, we do not write zeroes for
7211 // linear or proportional PCM direct tracks in underrun.
7212 }
7213
threadLoop_exit()7214 void DirectOutputThread::threadLoop_exit()
7215 {
7216 {
7217 audio_utils::lock_guard _l(mutex());
7218 for (size_t i = 0; i < mTracks.size(); i++) {
7219 if (mTracks[i]->isFlushPending()) {
7220 mTracks[i]->flushAck();
7221 mFlushPending = true;
7222 }
7223 }
7224 if (mFlushPending) {
7225 flushHw_l();
7226 }
7227 }
7228 PlaybackThread::threadLoop_exit();
7229 }
7230
7231 // must be called with thread mutex locked
shouldStandby_l()7232 bool DirectOutputThread::shouldStandby_l()
7233 {
7234 bool trackPaused = false;
7235 bool trackStopped = false;
7236 bool trackDisabled = false;
7237
7238 // do not put the HAL in standby when paused. NuPlayer clear the offloaded AudioTrack
7239 // after a timeout and we will enter standby then.
7240 // On offload threads, do not enter standby if the main track is still underrunning.
7241 if (mTracks.size() > 0) {
7242 const auto& mainTrack = mTracks[mTracks.size() - 1];
7243
7244 trackPaused = mainTrack->isPaused();
7245 trackStopped = mainTrack->isStopped() || mainTrack->state() == IAfTrackBase::IDLE;
7246 trackDisabled = (mType == OFFLOAD) && mainTrack->isDisabled();
7247 }
7248
7249 return !mStandby && !(trackPaused || (mHwPaused && !trackStopped) || trackDisabled);
7250 }
7251
7252 // checkForNewParameter_l() must be called with ThreadBase::mutex() held
checkForNewParameter_l(const String8 & keyValuePair,status_t & status)7253 bool DirectOutputThread::checkForNewParameter_l(const String8& keyValuePair,
7254 status_t& status)
7255 {
7256 bool reconfig = false;
7257 status = NO_ERROR;
7258
7259 AudioParameter param = AudioParameter(keyValuePair);
7260 int value;
7261 if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
7262 LOG_FATAL("Should not set routing device in DirectOutputThread");
7263 }
7264 if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
7265 // do not accept frame count changes if tracks are open as the track buffer
7266 // size depends on frame count and correct behavior would not be garantied
7267 // if frame count is changed after track creation
7268 if (!mTracks.isEmpty()) {
7269 status = INVALID_OPERATION;
7270 } else {
7271 reconfig = true;
7272 }
7273 }
7274 if (status == NO_ERROR) {
7275 status = mOutput->stream->setParameters(keyValuePair);
7276 if (!mStandby && status == INVALID_OPERATION) {
7277 mOutput->standby();
7278 if (!mStandby) {
7279 mThreadMetrics.logEndInterval();
7280 mThreadSnapshot.onEnd();
7281 setStandby_l();
7282 }
7283 mBytesWritten = 0;
7284 status = mOutput->stream->setParameters(keyValuePair);
7285 }
7286 if (status == NO_ERROR && reconfig) {
7287 readOutputParameters_l();
7288 sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
7289 }
7290 }
7291
7292 return reconfig;
7293 }
7294
activeSleepTimeUs() const7295 uint32_t DirectOutputThread::activeSleepTimeUs() const
7296 {
7297 uint32_t time;
7298 if (audio_has_proportional_frames(mFormat) && mType != OFFLOAD) {
7299 time = PlaybackThread::activeSleepTimeUs();
7300 } else {
7301 time = kDirectMinSleepTimeUs;
7302 }
7303 return time;
7304 }
7305
idleSleepTimeUs() const7306 uint32_t DirectOutputThread::idleSleepTimeUs() const
7307 {
7308 uint32_t time;
7309 if (audio_has_proportional_frames(mFormat) && mType != OFFLOAD) {
7310 time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000) / 2;
7311 } else {
7312 time = kDirectMinSleepTimeUs;
7313 }
7314 return time;
7315 }
7316
suspendSleepTimeUs() const7317 uint32_t DirectOutputThread::suspendSleepTimeUs() const
7318 {
7319 uint32_t time;
7320 if (audio_has_proportional_frames(mFormat) && mType != OFFLOAD) {
7321 time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000);
7322 } else {
7323 time = kDirectMinSleepTimeUs;
7324 }
7325 return time;
7326 }
7327
cacheParameters_l()7328 void DirectOutputThread::cacheParameters_l()
7329 {
7330 PlaybackThread::cacheParameters_l();
7331
7332 // use shorter standby delay as on normal output to release
7333 // hardware resources as soon as possible
7334 // no delay on outputs with HW A/V sync
7335 if (usesHwAvSync()) {
7336 mStandbyDelayNs = 0;
7337 } else if (mType == OFFLOAD) {
7338 mStandbyDelayNs = kOffloadStandbyDelayNs;
7339 } else {
7340 mStandbyDelayNs = microseconds(mActiveSleepTimeUs*2);
7341 }
7342 }
7343
flushHw_l()7344 void DirectOutputThread::flushHw_l()
7345 {
7346 PlaybackThread::flushHw_l();
7347 mOutput->flush();
7348 mFlushPending = false;
7349 mTimestampVerifier.discontinuity(discontinuityForStandbyOrFlush());
7350 mTimestamp.clear();
7351 mMonotonicFrameCounter.onFlush();
7352 // We do not reset mHwPaused which is hidden from the Track client.
7353 // Note: the client track in Tracks.cpp and AudioTrack.cpp
7354 // has a FLUSHED state but the DirectOutputThread does not;
7355 // those tracks will continue to show isStopped().
7356 }
7357
computeWaitTimeNs_l() const7358 int64_t DirectOutputThread::computeWaitTimeNs_l() const {
7359 // If a VolumeShaper is active, we must wake up periodically to update volume.
7360 const int64_t NS_PER_MS = 1000000;
7361 return mVolumeShaperActive ?
7362 kMinNormalSinkBufferSizeMs * NS_PER_MS : PlaybackThread::computeWaitTimeNs_l();
7363 }
7364
7365 // ----------------------------------------------------------------------------
7366
AsyncCallbackThread(const wp<PlaybackThread> & playbackThread)7367 AsyncCallbackThread::AsyncCallbackThread(
7368 const wp<PlaybackThread>& playbackThread)
7369 : Thread(false /*canCallJava*/),
7370 mPlaybackThread(playbackThread),
7371 mWriteAckSequence(0),
7372 mDrainSequence(0),
7373 mAsyncError(ASYNC_ERROR_NONE)
7374 {
7375 }
7376
onFirstRef()7377 void AsyncCallbackThread::onFirstRef()
7378 {
7379 run("Offload Cbk", ANDROID_PRIORITY_URGENT_AUDIO);
7380 }
7381
threadLoop()7382 bool AsyncCallbackThread::threadLoop()
7383 {
7384 while (!exitPending()) {
7385 uint32_t writeAckSequence;
7386 uint32_t drainSequence;
7387 AsyncError asyncError;
7388
7389 {
7390 audio_utils::unique_lock _l(mutex());
7391 while (!((mWriteAckSequence & 1) ||
7392 (mDrainSequence & 1) ||
7393 mAsyncError ||
7394 exitPending())) {
7395 mWaitWorkCV.wait(_l);
7396 }
7397
7398 if (exitPending()) {
7399 break;
7400 }
7401 ALOGV("AsyncCallbackThread mWriteAckSequence %d mDrainSequence %d",
7402 mWriteAckSequence, mDrainSequence);
7403 writeAckSequence = mWriteAckSequence;
7404 mWriteAckSequence &= ~1;
7405 drainSequence = mDrainSequence;
7406 mDrainSequence &= ~1;
7407 asyncError = mAsyncError;
7408 mAsyncError = ASYNC_ERROR_NONE;
7409 }
7410 {
7411 const sp<PlaybackThread> playbackThread = mPlaybackThread.promote();
7412 if (playbackThread != 0) {
7413 if (writeAckSequence & 1) {
7414 playbackThread->resetWriteBlocked(writeAckSequence >> 1);
7415 }
7416 if (drainSequence & 1) {
7417 playbackThread->resetDraining(drainSequence >> 1);
7418 }
7419 if (asyncError != ASYNC_ERROR_NONE) {
7420 playbackThread->onAsyncError(asyncError == ASYNC_ERROR_HARD);
7421 }
7422 }
7423 }
7424 }
7425 return false;
7426 }
7427
exit()7428 void AsyncCallbackThread::exit()
7429 {
7430 ALOGV("AsyncCallbackThread::exit");
7431 audio_utils::lock_guard _l(mutex());
7432 requestExit();
7433 mWaitWorkCV.notify_all();
7434 }
7435
setWriteBlocked(uint32_t sequence)7436 void AsyncCallbackThread::setWriteBlocked(uint32_t sequence)
7437 {
7438 audio_utils::lock_guard _l(mutex());
7439 // bit 0 is cleared
7440 mWriteAckSequence = sequence << 1;
7441 }
7442
resetWriteBlocked()7443 void AsyncCallbackThread::resetWriteBlocked()
7444 {
7445 audio_utils::lock_guard _l(mutex());
7446 // ignore unexpected callbacks
7447 if (mWriteAckSequence & 2) {
7448 mWriteAckSequence |= 1;
7449 mWaitWorkCV.notify_one();
7450 }
7451 }
7452
setDraining(uint32_t sequence)7453 void AsyncCallbackThread::setDraining(uint32_t sequence)
7454 {
7455 audio_utils::lock_guard _l(mutex());
7456 // bit 0 is cleared
7457 mDrainSequence = sequence << 1;
7458 }
7459
resetDraining()7460 void AsyncCallbackThread::resetDraining()
7461 {
7462 audio_utils::lock_guard _l(mutex());
7463 // ignore unexpected callbacks
7464 if (mDrainSequence & 2) {
7465 mDrainSequence |= 1;
7466 mWaitWorkCV.notify_one();
7467 }
7468 }
7469
setAsyncError(bool isHardError)7470 void AsyncCallbackThread::setAsyncError(bool isHardError)
7471 {
7472 audio_utils::lock_guard _l(mutex());
7473 mAsyncError = isHardError ? ASYNC_ERROR_HARD : ASYNC_ERROR_SOFT;
7474 mWaitWorkCV.notify_one();
7475 }
7476
7477
7478 // ----------------------------------------------------------------------------
7479
7480 /* static */
createOffloadThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,const audio_offload_info_t & offloadInfo)7481 sp<IAfPlaybackThread> IAfPlaybackThread::createOffloadThread(
7482 const sp<IAfThreadCallback>& afThreadCallback,
7483 AudioStreamOut* output, audio_io_handle_t id, bool systemReady,
7484 const audio_offload_info_t& offloadInfo) {
7485 return sp<OffloadThread>::make(afThreadCallback, output, id, systemReady, offloadInfo);
7486 }
7487
OffloadThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,const audio_offload_info_t & offloadInfo)7488 OffloadThread::OffloadThread(const sp<IAfThreadCallback>& afThreadCallback,
7489 AudioStreamOut* output, audio_io_handle_t id, bool systemReady,
7490 const audio_offload_info_t& offloadInfo)
7491 : DirectOutputThread(afThreadCallback, output, id, OFFLOAD, systemReady, offloadInfo),
7492 mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true)
7493 {
7494 //FIXME: mStandby should be set to true by ThreadBase constructo
7495 mStandby = true;
7496 mKeepWakeLock = property_get_bool("ro.audio.offload_wakelock", true /* default_value */);
7497 }
7498
threadLoop_exit()7499 void OffloadThread::threadLoop_exit()
7500 {
7501 if (mFlushPending || mHwPaused) {
7502 // If a flush is pending or track was paused, just discard buffered data
7503 audio_utils::lock_guard l(mutex());
7504 flushHw_l();
7505 } else {
7506 mMixerStatus = MIXER_DRAIN_ALL;
7507 threadLoop_drain();
7508 }
7509 if (mUseAsyncWrite) {
7510 ALOG_ASSERT(mCallbackThread != 0);
7511 mCallbackThread->exit();
7512 }
7513 PlaybackThread::threadLoop_exit();
7514 }
7515
prepareTracks_l(Vector<sp<IAfTrack>> * tracksToRemove)7516 PlaybackThread::mixer_state OffloadThread::prepareTracks_l(
7517 Vector<sp<IAfTrack>>* tracksToRemove
7518 )
7519 {
7520 size_t count = mActiveTracks.size();
7521
7522 mixer_state mixerStatus = MIXER_IDLE;
7523 bool doHwPause = false;
7524 bool doHwResume = false;
7525
7526 ALOGV("OffloadThread::prepareTracks_l active tracks %zu", count);
7527
7528 // find out which tracks need to be processed
7529 for (const sp<IAfTrack>& t : mActiveTracks) {
7530 IAfTrack* const track = t.get();
7531 #ifdef VERY_VERY_VERBOSE_LOGGING
7532 audio_track_cblk_t* cblk = track->cblk();
7533 #endif
7534 // Only consider last track started for volume and mixer state control.
7535 // In theory an older track could underrun and restart after the new one starts
7536 // but as we only care about the transition phase between two tracks on a
7537 // direct output, it is not a problem to ignore the underrun case.
7538 sp<IAfTrack> l = mActiveTracks.getLatest();
7539 bool last = l.get() == track;
7540
7541 if (track->isInvalid()) {
7542 ALOGW("An invalidated track shouldn't be in active list");
7543 tracksToRemove->add(track);
7544 continue;
7545 }
7546
7547 if (track->state() == IAfTrackBase::IDLE) {
7548 ALOGW("An idle track shouldn't be in active list");
7549 continue;
7550 }
7551
7552 const size_t framesReady = track->framesReady();
7553 if (ATRACE_ENABLED()) [[unlikely]] {
7554 ATRACE_INT(std::string(AUDIO_TRACE_PREFIX_AUDIO_TRACK_NRDY)
7555 .append(track->getTraceSuffix()).c_str(), framesReady);
7556 }
7557 if (track->isPausePending()) {
7558 track->pauseAck();
7559 // It is possible a track might have been flushed or stopped.
7560 // Other operations such as flush pending might occur on the next prepare.
7561 if (track->isPausing()) {
7562 track->setPaused();
7563 }
7564 // Always perform pause if last, as an immediate flush will change
7565 // the pause state to be no longer isPausing().
7566 if (last) {
7567 if (mHwSupportsPause && !mHwPaused) {
7568 doHwPause = true;
7569 mHwPaused = true;
7570 }
7571 // If we were part way through writing the mixbuffer to
7572 // the HAL we must save this until we resume
7573 // BUG - this will be wrong if a different track is made active,
7574 // in that case we want to discard the pending data in the
7575 // mixbuffer and tell the client to present it again when the
7576 // track is resumed
7577 mPausedWriteLength = mCurrentWriteLength;
7578 mPausedBytesRemaining = mBytesRemaining;
7579 mBytesRemaining = 0; // stop writing
7580 }
7581 tracksToRemove->add(track);
7582 } else if (track->isFlushPending()) {
7583 if (track->isStopping_1()) {
7584 track->retryCount() = kMaxTrackStopRetriesOffload;
7585 } else {
7586 track->retryCount() = kMaxTrackRetriesOffload;
7587 }
7588 track->flushAck();
7589 if (last) {
7590 mFlushPending = true;
7591 }
7592 } else if (track->isResumePending()){
7593 track->resumeAck();
7594 if (last) {
7595 if (mPausedBytesRemaining) {
7596 // Need to continue write that was interrupted
7597 mCurrentWriteLength = mPausedWriteLength;
7598 mBytesRemaining = mPausedBytesRemaining;
7599 mPausedBytesRemaining = 0;
7600 }
7601 if (mHwPaused) {
7602 doHwResume = true;
7603 mHwPaused = false;
7604 // threadLoop_mix() will handle the case that we need to
7605 // resume an interrupted write
7606 }
7607 // enable write to audio HAL
7608 mSleepTimeUs = 0;
7609
7610 mLeftVolFloat = mRightVolFloat = -1.0;
7611
7612 // Do not handle new data in this iteration even if track->framesReady()
7613 mixerStatus = MIXER_TRACKS_ENABLED;
7614 }
7615 } else if (framesReady && track->isReady() &&
7616 !track->isPaused() && !track->isTerminated() && !track->isStopping_2()) {
7617 ALOGVV("OffloadThread: track(%d) s=%08x [OK]", track->id(), cblk->mServer);
7618 if (track->fillingStatus() == IAfTrack::FS_FILLED) {
7619 track->fillingStatus() = IAfTrack::FS_ACTIVE;
7620 if (last) {
7621 // make sure processVolume_l() will apply new volume even if 0
7622 mLeftVolFloat = mRightVolFloat = -1.0;
7623 }
7624 }
7625
7626 if (last) {
7627 sp<IAfTrack> previousTrack = mPreviousTrack.promote();
7628 if (previousTrack != 0) {
7629 if (track != previousTrack.get()) {
7630 // Flush any data still being written from last track
7631 mBytesRemaining = 0;
7632 if (mPausedBytesRemaining) {
7633 // Last track was paused so we also need to flush saved
7634 // mixbuffer state and invalidate track so that it will
7635 // re-submit that unwritten data when it is next resumed
7636 mPausedBytesRemaining = 0;
7637 // Invalidate is a bit drastic - would be more efficient
7638 // to have a flag to tell client that some of the
7639 // previously written data was lost
7640 previousTrack->invalidate();
7641 }
7642 // flush data already sent to the DSP if changing audio session as audio
7643 // comes from a different source. Also invalidate previous track to force a
7644 // seek when resuming.
7645 if (previousTrack->sessionId() != track->sessionId()) {
7646 previousTrack->invalidate();
7647 }
7648 }
7649 }
7650 mPreviousTrack = track;
7651 // reset retry count
7652 if (track->isStopping_1()) {
7653 track->retryCount() = kMaxTrackStopRetriesOffload;
7654 } else {
7655 track->retryCount() = kMaxTrackRetriesOffload;
7656 }
7657 mActiveTrack = t;
7658 mixerStatus = MIXER_TRACKS_READY;
7659 }
7660 } else {
7661 ALOGVV("OffloadThread: track(%d) s=%08x [NOT READY]", track->id(), cblk->mServer);
7662 if (track->isStopping_1()) {
7663 if (--(track->retryCount()) <= 0) {
7664 // Hardware buffer can hold a large amount of audio so we must
7665 // wait for all current track's data to drain before we say
7666 // that the track is stopped.
7667 if (mBytesRemaining == 0) {
7668 // Only start draining when all data in mixbuffer
7669 // has been written
7670 ALOGV("OffloadThread: underrun and STOPPING_1 -> draining, STOPPING_2");
7671 track->setState(IAfTrackBase::STOPPING_2);
7672 // so presentation completes after
7673 // drain do not drain if no data was ever sent to HAL (mStandby == true)
7674 if (last && !mStandby) {
7675 // do not modify drain sequence if we are already draining. This happens
7676 // when resuming from pause after drain.
7677 if ((mDrainSequence & 1) == 0) {
7678 mSleepTimeUs = 0;
7679 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
7680 mixerStatus = MIXER_DRAIN_TRACK;
7681 mDrainSequence += 2;
7682 }
7683 if (mHwPaused) {
7684 // It is possible to move from PAUSED to STOPPING_1 without
7685 // a resume so we must ensure hardware is running
7686 doHwResume = true;
7687 mHwPaused = false;
7688 }
7689 }
7690 }
7691 } else if (last) {
7692 ALOGV("stopping1 underrun retries left %d", track->retryCount());
7693 mixerStatus = MIXER_TRACKS_ENABLED;
7694 }
7695 } else if (track->isStopping_2()) {
7696 // Drain has completed or we are in standby, signal presentation complete
7697 if (!(mDrainSequence & 1) || !last || mStandby) {
7698 track->setState(IAfTrackBase::STOPPED);
7699 mOutput->presentationComplete();
7700 track->presentationComplete(latency_l()); // always returns true
7701 track->reset();
7702 tracksToRemove->add(track);
7703 // OFFLOADED stop resets frame counts.
7704 if (!mUseAsyncWrite) {
7705 // If we don't get explicit drain notification we must
7706 // register discontinuity regardless of whether this is
7707 // the previous (!last) or the upcoming (last) track
7708 // to avoid skipping the discontinuity.
7709 mTimestampVerifier.discontinuity(
7710 mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
7711 }
7712 }
7713 } else {
7714 // No buffers for this track. Give it a few chances to
7715 // fill a buffer, then remove it from active list.
7716 bool isTimestampAdvancing = mIsTimestampAdvancing.check(mOutput);
7717 if (!isTunerStream() // tuner streams remain active in underrun
7718 && --(track->retryCount()) <= 0) {
7719 if (isTimestampAdvancing) { // HAL is still playing audio, give us more time.
7720 track->retryCount() = kMaxTrackRetriesOffload;
7721 } else {
7722 ALOGI("%s BUFFER TIMEOUT: remove track(%d) from active list due to"
7723 " underrun on thread %d", __func__, track->id(), mId);
7724 tracksToRemove->add(track);
7725 // tell client process that the track was disabled because of underrun;
7726 // it will then automatically call start() when data is available
7727 track->disable();
7728 }
7729 } else if (last){
7730 mixerStatus = MIXER_TRACKS_ENABLED;
7731 }
7732 }
7733 }
7734 // compute volume for this track
7735 if (track->isReady()) { // check ready to prevent premature start.
7736 processVolume_l(track, last);
7737 }
7738 }
7739
7740 // make sure the pause/flush/resume sequence is executed in the right order.
7741 // If a flush is pending and a track is active but the HW is not paused, force a HW pause
7742 // before flush and then resume HW. This can happen in case of pause/flush/resume
7743 // if resume is received before pause is executed.
7744 if (!mStandby && (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
7745 status_t result = mOutput->stream->pause();
7746 ALOGE_IF(result != OK, "Error when pausing output stream: %d", result);
7747 doHwResume = !doHwPause; // resume if pause is due to flush.
7748 }
7749 if (mFlushPending) {
7750 flushHw_l();
7751 }
7752 if (!mStandby && doHwResume) {
7753 status_t result = mOutput->stream->resume();
7754 ALOGE_IF(result != OK, "Error when resuming output stream: %d", result);
7755 }
7756
7757 // remove all the tracks that need to be...
7758 removeTracks_l(*tracksToRemove);
7759
7760 return mixerStatus;
7761 }
7762
7763 // must be called with thread mutex locked
waitingAsyncCallback_l()7764 bool OffloadThread::waitingAsyncCallback_l()
7765 {
7766 ALOGVV("waitingAsyncCallback_l mWriteAckSequence %d mDrainSequence %d",
7767 mWriteAckSequence, mDrainSequence);
7768 if (mUseAsyncWrite && ((mWriteAckSequence & 1) || (mDrainSequence & 1))) {
7769 return true;
7770 }
7771 return false;
7772 }
7773
waitingAsyncCallback()7774 bool OffloadThread::waitingAsyncCallback()
7775 {
7776 audio_utils::lock_guard _l(mutex());
7777 return waitingAsyncCallback_l();
7778 }
7779
flushHw_l()7780 void OffloadThread::flushHw_l()
7781 {
7782 DirectOutputThread::flushHw_l();
7783 // Flush anything still waiting in the mixbuffer
7784 mCurrentWriteLength = 0;
7785 mBytesRemaining = 0;
7786 mPausedWriteLength = 0;
7787 mPausedBytesRemaining = 0;
7788 // reset bytes written count to reflect that DSP buffers are empty after flush.
7789 mBytesWritten = 0;
7790
7791 if (mUseAsyncWrite) {
7792 // discard any pending drain or write ack by incrementing sequence
7793 mWriteAckSequence = (mWriteAckSequence + 2) & ~1;
7794 mDrainSequence = (mDrainSequence + 2) & ~1;
7795 ALOG_ASSERT(mCallbackThread != 0);
7796 mCallbackThread->setWriteBlocked(mWriteAckSequence);
7797 mCallbackThread->setDraining(mDrainSequence);
7798 }
7799 }
7800
invalidateTracks(audio_stream_type_t streamType)7801 void OffloadThread::invalidateTracks(audio_stream_type_t streamType)
7802 {
7803 audio_utils::lock_guard _l(mutex());
7804 if (PlaybackThread::invalidateTracks_l(streamType)) {
7805 mFlushPending = true;
7806 }
7807 }
7808
invalidateTracks(std::set<audio_port_handle_t> & portIds)7809 void OffloadThread::invalidateTracks(std::set<audio_port_handle_t>& portIds) {
7810 audio_utils::lock_guard _l(mutex());
7811 if (PlaybackThread::invalidateTracks_l(portIds)) {
7812 mFlushPending = true;
7813 }
7814 }
7815
7816 // ----------------------------------------------------------------------------
7817
7818 /* static */
create(const sp<IAfThreadCallback> & afThreadCallback,IAfPlaybackThread * mainThread,audio_io_handle_t id,bool systemReady)7819 sp<IAfDuplicatingThread> IAfDuplicatingThread::create(
7820 const sp<IAfThreadCallback>& afThreadCallback,
7821 IAfPlaybackThread* mainThread, audio_io_handle_t id, bool systemReady) {
7822 return sp<DuplicatingThread>::make(afThreadCallback, mainThread, id, systemReady);
7823 }
7824
DuplicatingThread(const sp<IAfThreadCallback> & afThreadCallback,IAfPlaybackThread * mainThread,audio_io_handle_t id,bool systemReady)7825 DuplicatingThread::DuplicatingThread(const sp<IAfThreadCallback>& afThreadCallback,
7826 IAfPlaybackThread* mainThread, audio_io_handle_t id, bool systemReady)
7827 : MixerThread(afThreadCallback, mainThread->getOutput(), id,
7828 systemReady, DUPLICATING),
7829 mWaitTimeMs(UINT_MAX)
7830 {
7831 addOutputTrack(mainThread);
7832 }
7833
~DuplicatingThread()7834 DuplicatingThread::~DuplicatingThread()
7835 {
7836 for (size_t i = 0; i < mOutputTracks.size(); i++) {
7837 mOutputTracks[i]->destroy();
7838 }
7839 }
7840
threadLoop_mix()7841 void DuplicatingThread::threadLoop_mix()
7842 {
7843 // mix buffers...
7844 if (outputsReady()) {
7845 mAudioMixer->process();
7846 } else {
7847 if (mMixerBufferValid) {
7848 memset(mMixerBuffer, 0, mMixerBufferSize);
7849 } else {
7850 memset(mSinkBuffer, 0, mSinkBufferSize);
7851 }
7852 }
7853 mSleepTimeUs = 0;
7854 writeFrames = mNormalFrameCount;
7855 mCurrentWriteLength = mSinkBufferSize;
7856 mStandbyTimeNs = systemTime() + mStandbyDelayNs;
7857 }
7858
threadLoop_sleepTime()7859 void DuplicatingThread::threadLoop_sleepTime()
7860 {
7861 if (mSleepTimeUs == 0) {
7862 if (mMixerStatus == MIXER_TRACKS_ENABLED) {
7863 mSleepTimeUs = mActiveSleepTimeUs;
7864 } else {
7865 mSleepTimeUs = mIdleSleepTimeUs;
7866 }
7867 } else if (mBytesWritten != 0) {
7868 if (mMixerStatus == MIXER_TRACKS_ENABLED) {
7869 writeFrames = mNormalFrameCount;
7870 memset(mSinkBuffer, 0, mSinkBufferSize);
7871 } else {
7872 // flush remaining overflow buffers in output tracks
7873 writeFrames = 0;
7874 }
7875 mSleepTimeUs = 0;
7876 }
7877 }
7878
threadLoop_write()7879 ssize_t DuplicatingThread::threadLoop_write()
7880 {
7881 ATRACE_BEGIN("write");
7882 for (size_t i = 0; i < outputTracks.size(); i++) {
7883 const ssize_t actualWritten = outputTracks[i]->write(mSinkBuffer, writeFrames);
7884
7885 // Consider the first OutputTrack for timestamp and frame counting.
7886
7887 // The threadLoop() generally assumes writing a full sink buffer size at a time.
7888 // Here, we correct for writeFrames of 0 (a stop) or underruns because
7889 // we always claim success.
7890 if (i == 0) {
7891 const ssize_t correction = mSinkBufferSize / mFrameSize - actualWritten;
7892 ALOGD_IF(correction != 0 && writeFrames != 0,
7893 "%s: writeFrames:%u actualWritten:%zd correction:%zd mFramesWritten:%lld",
7894 __func__, writeFrames, actualWritten, correction, (long long)mFramesWritten);
7895 mFramesWritten -= correction;
7896 }
7897
7898 // TODO: Report correction for the other output tracks and show in the dump.
7899 }
7900 ATRACE_END();
7901 if (mStandby) {
7902 mThreadMetrics.logBeginInterval();
7903 mThreadSnapshot.onBegin();
7904 mStandby = false;
7905 }
7906 return (ssize_t)mSinkBufferSize;
7907 }
7908
threadLoop_standby()7909 void DuplicatingThread::threadLoop_standby()
7910 {
7911 // DuplicatingThread implements standby by stopping all tracks
7912 for (size_t i = 0; i < outputTracks.size(); i++) {
7913 outputTracks[i]->stop();
7914 }
7915 }
7916
threadLoop_exit()7917 void DuplicatingThread::threadLoop_exit()
7918 {
7919 // Prevent calling the OutputTrack dtor in the DuplicatingThread dtor
7920 // where other mutexes (i.e. AudioPolicyService_Mutex) may be held.
7921 // Do so here in the threadLoop_exit().
7922
7923 SortedVector <sp<IAfOutputTrack>> localTracks;
7924 {
7925 audio_utils::lock_guard l(mutex());
7926 localTracks = std::move(mOutputTracks);
7927 mOutputTracks.clear();
7928 for (size_t i = 0; i < localTracks.size(); ++i) {
7929 localTracks[i]->destroy();
7930 }
7931 }
7932 localTracks.clear();
7933 outputTracks.clear();
7934 PlaybackThread::threadLoop_exit();
7935 }
7936
dumpInternals_l(int fd,const Vector<String16> & args)7937 void DuplicatingThread::dumpInternals_l(int fd, const Vector<String16>& args)
7938 {
7939 MixerThread::dumpInternals_l(fd, args);
7940
7941 std::stringstream ss;
7942 const size_t numTracks = mOutputTracks.size();
7943 ss << " " << numTracks << " OutputTracks";
7944 if (numTracks > 0) {
7945 ss << ":";
7946 for (const auto &track : mOutputTracks) {
7947 const auto thread = track->thread().promote();
7948 ss << " (" << track->id() << " : ";
7949 if (thread.get() != nullptr) {
7950 ss << thread.get() << ", " << thread->id();
7951 } else {
7952 ss << "null";
7953 }
7954 ss << ")";
7955 }
7956 }
7957 ss << "\n";
7958 std::string result = ss.str();
7959 write(fd, result.c_str(), result.size());
7960 }
7961
saveOutputTracks()7962 void DuplicatingThread::saveOutputTracks()
7963 {
7964 outputTracks = mOutputTracks;
7965 }
7966
clearOutputTracks()7967 void DuplicatingThread::clearOutputTracks()
7968 {
7969 outputTracks.clear();
7970 }
7971
addOutputTrack(IAfPlaybackThread * thread)7972 void DuplicatingThread::addOutputTrack(IAfPlaybackThread* thread)
7973 {
7974 audio_utils::lock_guard _l(mutex());
7975 // The downstream MixerThread consumes thread->frameCount() amount of frames per mix pass.
7976 // Adjust for thread->sampleRate() to determine minimum buffer frame count.
7977 // Then triple buffer because Threads do not run synchronously and may not be clock locked.
7978 const size_t frameCount =
7979 3 * sourceFramesNeeded(mSampleRate, thread->frameCount(), thread->sampleRate());
7980 // TODO: Consider asynchronous sample rate conversion to handle clock disparity
7981 // from different OutputTracks and their associated MixerThreads (e.g. one may
7982 // nearly empty and the other may be dropping data).
7983
7984 // TODO b/182392769: use attribution source util, move to server edge
7985 AttributionSourceState attributionSource = AttributionSourceState();
7986 attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(
7987 IPCThreadState::self()->getCallingUid()));
7988 attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(
7989 IPCThreadState::self()->getCallingPid()));
7990 attributionSource.token = sp<BBinder>::make();
7991 sp<IAfOutputTrack> outputTrack = IAfOutputTrack::create(thread,
7992 this,
7993 mSampleRate,
7994 mFormat,
7995 mChannelMask,
7996 frameCount,
7997 attributionSource);
7998 status_t status = outputTrack != 0 ? outputTrack->initCheck() : (status_t) NO_MEMORY;
7999 if (status != NO_ERROR) {
8000 ALOGE("addOutputTrack() initCheck failed %d", status);
8001 return;
8002 }
8003 if (!audioserver_flags::portid_volume_management()) {
8004 thread->setStreamVolume(AUDIO_STREAM_PATCH, /*volume=*/1.0f, /*muted=*/false);
8005 }
8006
8007 mOutputTracks.add(outputTrack);
8008 ALOGV("addOutputTrack() track %p, on thread %p", outputTrack.get(), thread);
8009 updateWaitTime_l();
8010 }
8011
removeOutputTrack(IAfPlaybackThread * thread)8012 void DuplicatingThread::removeOutputTrack(IAfPlaybackThread* thread)
8013 {
8014 audio_utils::lock_guard _l(mutex());
8015 for (size_t i = 0; i < mOutputTracks.size(); i++) {
8016 if (mOutputTracks[i]->thread() == thread) {
8017 mOutputTracks[i]->destroy();
8018 mOutputTracks.removeAt(i);
8019 updateWaitTime_l();
8020 // NO_THREAD_SAFETY_ANALYSIS
8021 // Lambda workaround: as thread != this
8022 // we can safely call the remote thread getOutput.
8023 const bool equalOutput =
8024 [&](){ return thread->getOutput() == mOutput; }();
8025 if (equalOutput) {
8026 mOutput = nullptr;
8027 }
8028 return;
8029 }
8030 }
8031 ALOGV("removeOutputTrack(): unknown thread: %p", thread);
8032 }
8033
8034 // caller must hold mutex()
updateWaitTime_l()8035 void DuplicatingThread::updateWaitTime_l()
8036 {
8037 // Initialize mWaitTimeMs according to the mixer buffer size.
8038 mWaitTimeMs = mNormalFrameCount * 2 * 1000 / mSampleRate;
8039 for (size_t i = 0; i < mOutputTracks.size(); i++) {
8040 const auto strong = mOutputTracks[i]->thread().promote();
8041 if (strong != 0) {
8042 uint32_t waitTimeMs = (strong->frameCount() * 2 * 1000) / strong->sampleRate();
8043 if (waitTimeMs < mWaitTimeMs) {
8044 mWaitTimeMs = waitTimeMs;
8045 }
8046 }
8047 }
8048 }
8049
outputsReady()8050 bool DuplicatingThread::outputsReady()
8051 {
8052 for (size_t i = 0; i < outputTracks.size(); i++) {
8053 const auto thread = outputTracks[i]->thread().promote();
8054 if (thread == 0) {
8055 ALOGW("DuplicatingThread::outputsReady() could not promote thread on output track %p",
8056 outputTracks[i].get());
8057 return false;
8058 }
8059 IAfPlaybackThread* const playbackThread = thread->asIAfPlaybackThread().get();
8060 // see note at standby() declaration
8061 if (playbackThread->inStandby() && !playbackThread->isSuspended()) {
8062 ALOGV("DuplicatingThread output track %p on thread %p Not Ready", outputTracks[i].get(),
8063 thread.get());
8064 return false;
8065 }
8066 }
8067 return true;
8068 }
8069
sendMetadataToBackend_l(const StreamOutHalInterface::SourceMetadata & metadata)8070 void DuplicatingThread::sendMetadataToBackend_l(
8071 const StreamOutHalInterface::SourceMetadata& metadata)
8072 {
8073 for (auto& outputTrack : outputTracks) { // not mOutputTracks
8074 outputTrack->setMetadatas(metadata.tracks);
8075 }
8076 }
8077
activeSleepTimeUs() const8078 uint32_t DuplicatingThread::activeSleepTimeUs() const
8079 {
8080 // return half the wait time in microseconds.
8081 return std::min(mWaitTimeMs * 500ULL, (unsigned long long)UINT32_MAX); // prevent overflow.
8082 }
8083
cacheParameters_l()8084 void DuplicatingThread::cacheParameters_l()
8085 {
8086 // updateWaitTime_l() sets mWaitTimeMs, which affects activeSleepTimeUs(), so call it first
8087 updateWaitTime_l();
8088
8089 MixerThread::cacheParameters_l();
8090 }
8091
8092 // ----------------------------------------------------------------------------
8093
8094 /* static */
createSpatializerThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,audio_config_base_t * mixerConfig)8095 sp<IAfPlaybackThread> IAfPlaybackThread::createSpatializerThread(
8096 const sp<IAfThreadCallback>& afThreadCallback,
8097 AudioStreamOut* output,
8098 audio_io_handle_t id,
8099 bool systemReady,
8100 audio_config_base_t* mixerConfig) {
8101 return sp<SpatializerThread>::make(afThreadCallback, output, id, systemReady, mixerConfig);
8102 }
8103
SpatializerThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady,audio_config_base_t * mixerConfig)8104 SpatializerThread::SpatializerThread(const sp<IAfThreadCallback>& afThreadCallback,
8105 AudioStreamOut* output,
8106 audio_io_handle_t id,
8107 bool systemReady,
8108 audio_config_base_t *mixerConfig)
8109 : MixerThread(afThreadCallback, output, id, systemReady, SPATIALIZER, mixerConfig)
8110 {
8111 }
8112
setHalLatencyMode_l()8113 void SpatializerThread::setHalLatencyMode_l() {
8114 // if mSupportedLatencyModes is empty, the HAL stream does not support
8115 // latency mode control and we can exit.
8116 if (mSupportedLatencyModes.empty()) {
8117 return;
8118 }
8119 // Do not update the HAL latency mode if no track is active
8120 if (mActiveTracks.isEmpty()) {
8121 return;
8122 }
8123
8124 audio_latency_mode_t latencyMode = AUDIO_LATENCY_MODE_FREE;
8125 if (mSupportedLatencyModes.size() == 1) {
8126 // If the HAL only support one latency mode currently, confirm the choice
8127 latencyMode = mSupportedLatencyModes[0];
8128 } else if (mSupportedLatencyModes.size() > 1) {
8129 // Request low latency if:
8130 // - The low latency mode is requested by the spatializer controller
8131 // (mRequestedLatencyMode = AUDIO_LATENCY_MODE_LOW)
8132 // AND
8133 // - At least one active track is spatialized
8134 for (const auto& track : mActiveTracks) {
8135 if (track->isSpatialized()) {
8136 latencyMode = mRequestedLatencyMode;
8137 break;
8138 }
8139 }
8140 }
8141
8142 if (latencyMode != mSetLatencyMode) {
8143 status_t status = mOutput->stream->setLatencyMode(latencyMode);
8144 ALOGD("%s: thread(%d) setLatencyMode(%s) returned %d",
8145 __func__, mId, toString(latencyMode).c_str(), status);
8146 if (status == NO_ERROR) {
8147 mSetLatencyMode = latencyMode;
8148 }
8149 }
8150 }
8151
setRequestedLatencyMode(audio_latency_mode_t mode)8152 status_t SpatializerThread::setRequestedLatencyMode(audio_latency_mode_t mode) {
8153 if (mode < 0 || mode >= AUDIO_LATENCY_MODE_CNT) {
8154 return BAD_VALUE;
8155 }
8156 audio_utils::lock_guard _l(mutex());
8157 mRequestedLatencyMode = mode;
8158 return NO_ERROR;
8159 }
8160
checkOutputStageEffects()8161 void SpatializerThread::checkOutputStageEffects()
8162 NO_THREAD_SAFETY_ANALYSIS
8163 // 'createEffect_l' requires holding mutex 'AudioFlinger_Mutex' exclusively
8164 {
8165 bool hasVirtualizer = false;
8166 bool hasDownMixer = false;
8167 sp<IAfEffectHandle> finalDownMixer;
8168 {
8169 audio_utils::lock_guard _l(mutex());
8170 sp<IAfEffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE);
8171 if (chain != 0) {
8172 hasVirtualizer = chain->getEffectFromType_l(FX_IID_SPATIALIZER) != nullptr;
8173 hasDownMixer = chain->getEffectFromType_l(EFFECT_UIID_DOWNMIX) != nullptr;
8174 }
8175
8176 finalDownMixer = mFinalDownMixer;
8177 mFinalDownMixer.clear();
8178 }
8179
8180 if (hasVirtualizer) {
8181 if (finalDownMixer != nullptr) {
8182 int32_t ret;
8183 finalDownMixer->asIEffect()->disable(&ret);
8184 }
8185 finalDownMixer.clear();
8186 } else if (!hasDownMixer) {
8187 std::vector<effect_descriptor_t> descriptors;
8188 status_t status = mAfThreadCallback->getEffectsFactoryHal()->getDescriptors(
8189 EFFECT_UIID_DOWNMIX, &descriptors);
8190 if (status != NO_ERROR) {
8191 return;
8192 }
8193 ALOG_ASSERT(!descriptors.empty(),
8194 "%s getDescriptors() returned no error but empty list", __func__);
8195
8196 finalDownMixer = createEffect_l(nullptr /*client*/, nullptr /*effectClient*/,
8197 0 /*priority*/, AUDIO_SESSION_OUTPUT_STAGE, &descriptors[0], nullptr /*enabled*/,
8198 &status, false /*pinned*/, false /*probe*/, false /*notifyFramesProcessed*/);
8199
8200 if (finalDownMixer == nullptr || (status != NO_ERROR && status != ALREADY_EXISTS)) {
8201 ALOGW("%s error creating downmixer %d", __func__, status);
8202 finalDownMixer.clear();
8203 } else {
8204 int32_t ret;
8205 finalDownMixer->asIEffect()->enable(&ret);
8206 }
8207 }
8208
8209 {
8210 audio_utils::lock_guard _l(mutex());
8211 mFinalDownMixer = finalDownMixer;
8212 }
8213 }
8214
threadLoop_exit()8215 void SpatializerThread::threadLoop_exit()
8216 {
8217 // The Spatializer EffectHandle must be released on the PlaybackThread
8218 // threadLoop() to prevent lock inversion in the SpatializerThread dtor.
8219 mFinalDownMixer.clear();
8220
8221 PlaybackThread::threadLoop_exit();
8222 }
8223
8224 // ----------------------------------------------------------------------------
8225 // Record
8226 // ----------------------------------------------------------------------------
8227
create(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamIn * input,audio_io_handle_t id,bool systemReady)8228 sp<IAfRecordThread> IAfRecordThread::create(const sp<IAfThreadCallback>& afThreadCallback,
8229 AudioStreamIn* input,
8230 audio_io_handle_t id,
8231 bool systemReady) {
8232 return sp<RecordThread>::make(afThreadCallback, input, id, systemReady);
8233 }
8234
RecordThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamIn * input,audio_io_handle_t id,bool systemReady)8235 RecordThread::RecordThread(const sp<IAfThreadCallback>& afThreadCallback,
8236 AudioStreamIn *input,
8237 audio_io_handle_t id,
8238 bool systemReady
8239 ) :
8240 ThreadBase(afThreadCallback, id, RECORD, systemReady, false /* isOut */),
8241 mInput(input),
8242 mSource(mInput),
8243 mActiveTracks(&this->mLocalLog),
8244 mRsmpInBuffer(NULL),
8245 // mRsmpInFrames, mRsmpInFramesP2, and mRsmpInFramesOA are set by readInputParameters_l()
8246 mRsmpInRear(0)
8247 , mReadOnlyHeap(new MemoryDealer(kRecordThreadReadOnlyHeapSize,
8248 "RecordThreadRO", MemoryHeapBase::READ_ONLY))
8249 // mFastCapture below
8250 , mFastCaptureFutex(0)
8251 // mInputSource
8252 // mPipeSink
8253 // mPipeSource
8254 , mPipeFramesP2(0)
8255 // mPipeMemory
8256 // mFastCaptureNBLogWriter
8257 , mFastTrackAvail(false)
8258 , mBtNrecSuspended(false)
8259 {
8260 snprintf(mThreadName, kThreadNameLength, "AudioIn_%X", id);
8261 mFlagsAsString = toString(input->flags);
8262 mNBLogWriter = afThreadCallback->newWriter_l(kLogSize, mThreadName);
8263
8264 if (mInput->audioHwDev != nullptr) {
8265 mIsMsdDevice = strcmp(
8266 mInput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
8267 }
8268
8269 readInputParameters_l();
8270
8271 // TODO: We may also match on address as well as device type for
8272 // AUDIO_DEVICE_IN_BUS, AUDIO_DEVICE_IN_BLUETOOTH_A2DP, AUDIO_DEVICE_IN_REMOTE_SUBMIX
8273 // TODO: This property should be ensure that only contains one single device type.
8274 mTimestampCorrectedDevice = (audio_devices_t)property_get_int64(
8275 "audio.timestamp.corrected_input_device",
8276 (int64_t)(mIsMsdDevice ? AUDIO_DEVICE_IN_BUS // turn on by default for MSD
8277 : AUDIO_DEVICE_NONE));
8278
8279 // create an NBAIO source for the HAL input stream, and negotiate
8280 mInputSource = new AudioStreamInSource(input->stream);
8281 size_t numCounterOffers = 0;
8282 const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)};
8283 #if !LOG_NDEBUG
8284 [[maybe_unused]] ssize_t index =
8285 #else
8286 (void)
8287 #endif
8288 mInputSource->negotiate(offers, 1, NULL, numCounterOffers);
8289 ALOG_ASSERT(index == 0);
8290
8291 // initialize fast capture depending on configuration
8292 bool initFastCapture;
8293 switch (kUseFastCapture) {
8294 case FastCapture_Never:
8295 initFastCapture = false;
8296 ALOGV("%p kUseFastCapture = Never, initFastCapture = false", this);
8297 break;
8298 case FastCapture_Always:
8299 initFastCapture = true;
8300 ALOGV("%p kUseFastCapture = Always, initFastCapture = true", this);
8301 break;
8302 case FastCapture_Static:
8303 initFastCapture = !mIsMsdDevice // Disable fast capture for MSD BUS devices.
8304 && audio_is_linear_pcm(mFormat)
8305 && (mFrameCount * 1000) / mSampleRate < kMinNormalCaptureBufferSizeMs;
8306 ALOGV("%p kUseFastCapture = Static, format = 0x%x, (%lld * 1000) / %u vs %u, "
8307 "initFastCapture = %d, mIsMsdDevice = %d", this, mFormat, (long long)mFrameCount,
8308 mSampleRate, kMinNormalCaptureBufferSizeMs, initFastCapture, mIsMsdDevice);
8309 break;
8310 // case FastCapture_Dynamic:
8311 }
8312
8313 if (initFastCapture) {
8314 // create a Pipe for FastCapture to write to, and for us and fast tracks to read from
8315 NBAIO_Format format = mInputSource->format();
8316 // quadruple-buffering of 20 ms each; this ensures we can sleep for 20ms in RecordThread
8317 size_t pipeFramesP2 = roundup(4 * FMS_20 * mSampleRate / 1000);
8318 size_t pipeSize = pipeFramesP2 * Format_frameSize(format);
8319 void *pipeBuffer = nullptr;
8320 const sp<MemoryDealer> roHeap(readOnlyHeap());
8321 sp<IMemory> pipeMemory;
8322 if ((roHeap == 0) ||
8323 (pipeMemory = roHeap->allocate(pipeSize)) == 0 ||
8324 (pipeBuffer = pipeMemory->unsecurePointer()) == nullptr) {
8325 ALOGE("not enough memory for pipe buffer size=%zu; "
8326 "roHeap=%p, pipeMemory=%p, pipeBuffer=%p; roHeapSize: %lld",
8327 pipeSize, roHeap.get(), pipeMemory.get(), pipeBuffer,
8328 (long long)kRecordThreadReadOnlyHeapSize);
8329 goto failed;
8330 }
8331 // pipe will be shared directly with fast clients, so clear to avoid leaking old information
8332 memset(pipeBuffer, 0, pipeSize);
8333 Pipe *pipe = new Pipe(pipeFramesP2, format, pipeBuffer);
8334 const NBAIO_Format offersFast[1] = {format};
8335 size_t numCounterOffersFast = 0;
8336 [[maybe_unused]] ssize_t index2 = pipe->negotiate(offersFast, std::size(offersFast),
8337 nullptr /* counterOffers */, numCounterOffersFast);
8338 ALOG_ASSERT(index2 == 0);
8339 mPipeSink = pipe;
8340 PipeReader *pipeReader = new PipeReader(*pipe);
8341 numCounterOffersFast = 0;
8342 index2 = pipeReader->negotiate(offersFast, std::size(offersFast),
8343 nullptr /* counterOffers */, numCounterOffersFast);
8344 ALOG_ASSERT(index2 == 0);
8345 mPipeSource = pipeReader;
8346 mPipeFramesP2 = pipeFramesP2;
8347 mPipeMemory = pipeMemory;
8348
8349 // create fast capture
8350 mFastCapture = new FastCapture();
8351 FastCaptureStateQueue *sq = mFastCapture->sq();
8352 #ifdef STATE_QUEUE_DUMP
8353 // FIXME
8354 #endif
8355 FastCaptureState *state = sq->begin();
8356 state->mCblk = NULL;
8357 state->mInputSource = mInputSource.get();
8358 state->mInputSourceGen++;
8359 state->mPipeSink = pipe;
8360 state->mPipeSinkGen++;
8361 state->mFrameCount = mFrameCount;
8362 state->mCommand = FastCaptureState::COLD_IDLE;
8363 // already done in constructor initialization list
8364 //mFastCaptureFutex = 0;
8365 state->mColdFutexAddr = &mFastCaptureFutex;
8366 state->mColdGen++;
8367 state->mDumpState = &mFastCaptureDumpState;
8368 #ifdef TEE_SINK
8369 // FIXME
8370 #endif
8371 mFastCaptureNBLogWriter =
8372 afThreadCallback->newWriter_l(kFastCaptureLogSize, "FastCapture");
8373 state->mNBLogWriter = mFastCaptureNBLogWriter.get();
8374 sq->end();
8375 {
8376 audio_utils::mutex::scoped_queue_wait_check queueWaitCheck(mFastCapture->getTid());
8377 sq->push(FastCaptureStateQueue::BLOCK_UNTIL_PUSHED);
8378 }
8379 // start the fast capture
8380 mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
8381 pid_t tid = mFastCapture->getTid();
8382 sendPrioConfigEvent(getpid(), tid, kPriorityFastCapture, false /*forApp*/);
8383 stream()->setHalThreadPriority(kPriorityFastCapture);
8384 #ifdef AUDIO_WATCHDOG
8385 // FIXME
8386 #endif
8387
8388 mFastTrackAvail = true;
8389 }
8390 #ifdef TEE_SINK
8391 mTee.set(mInputSource->format(), NBAIO_Tee::TEE_FLAG_INPUT_THREAD);
8392 mTee.setId(std::string("_") + std::to_string(mId) + "_C");
8393 #endif
8394 failed: ;
8395
8396 // FIXME mNormalSource
8397 }
8398
~RecordThread()8399 RecordThread::~RecordThread()
8400 {
8401 if (mFastCapture != 0) {
8402 FastCaptureStateQueue *sq = mFastCapture->sq();
8403 FastCaptureState *state = sq->begin();
8404 if (state->mCommand == FastCaptureState::COLD_IDLE) {
8405 int32_t old = android_atomic_inc(&mFastCaptureFutex);
8406 if (old == -1) {
8407 (void) syscall(__NR_futex, &mFastCaptureFutex, FUTEX_WAKE_PRIVATE, 1);
8408 }
8409 }
8410 state->mCommand = FastCaptureState::EXIT;
8411 sq->end();
8412 {
8413 audio_utils::mutex::scoped_join_wait_check queueWaitCheck(mFastCapture->getTid());
8414 sq->push(FastCaptureStateQueue::BLOCK_UNTIL_PUSHED);
8415 mFastCapture->join();
8416 }
8417 mFastCapture.clear();
8418 }
8419 mAfThreadCallback->unregisterWriter(mFastCaptureNBLogWriter);
8420 mAfThreadCallback->unregisterWriter(mNBLogWriter);
8421 free(mRsmpInBuffer);
8422 }
8423
onFirstRef()8424 void RecordThread::onFirstRef()
8425 {
8426 run(mThreadName, PRIORITY_URGENT_AUDIO);
8427 }
8428
preExit()8429 void RecordThread::preExit()
8430 {
8431 ALOGV(" preExit()");
8432 audio_utils::lock_guard _l(mutex());
8433 for (size_t i = 0; i < mTracks.size(); i++) {
8434 sp<IAfRecordTrack> track = mTracks[i];
8435 track->invalidate();
8436 }
8437 mActiveTracks.clear();
8438 mStartStopCV.notify_all();
8439 }
8440
threadLoop()8441 bool RecordThread::threadLoop()
8442 {
8443 nsecs_t lastWarning = 0;
8444
8445 inputStandBy();
8446
8447 reacquire_wakelock:
8448 {
8449 audio_utils::lock_guard _l(mutex());
8450 acquireWakeLock_l();
8451 }
8452
8453 // used to request a deferred sleep, to be executed later while mutex is unlocked
8454 uint32_t sleepUs = 0;
8455
8456 // timestamp correction enable is determined under lock, used in processing step.
8457 bool timestampCorrectionEnabled = false;
8458
8459 int64_t lastLoopCountRead = -2; // never matches "previous" loop, when loopCount = 0.
8460
8461 // loop while there is work to do
8462 for (int64_t loopCount = 0;; ++loopCount) { // loopCount used for statistics tracking
8463 // Note: these sp<> are released at the end of the for loop outside of the mutex() lock.
8464 sp<IAfRecordTrack> activeTrack;
8465 std::vector<sp<IAfRecordTrack>> oldActiveTracks;
8466 Vector<sp<IAfEffectChain>> effectChains;
8467
8468 // activeTracks accumulates a copy of a subset of mActiveTracks
8469 Vector<sp<IAfRecordTrack>> activeTracks;
8470
8471 // reference to the (first and only) active fast track
8472 sp<IAfRecordTrack> fastTrack;
8473
8474 // reference to a fast track which is about to be removed
8475 sp<IAfRecordTrack> fastTrackToRemove;
8476
8477 bool silenceFastCapture = false;
8478
8479 { // scope for mutex()
8480 audio_utils::unique_lock _l(mutex());
8481
8482 processConfigEvents_l();
8483
8484 // check exitPending here because checkForNewParameters_l() and
8485 // checkForNewParameters_l() can temporarily release mutex()
8486 if (exitPending()) {
8487 break;
8488 }
8489
8490 // sleep with mutex unlocked
8491 if (sleepUs > 0) {
8492 ATRACE_BEGIN("sleepC");
8493 (void)mWaitWorkCV.wait_for(_l, std::chrono::microseconds(sleepUs));
8494 ATRACE_END();
8495 sleepUs = 0;
8496 continue;
8497 }
8498
8499 // if no active track(s), then standby and release wakelock
8500 size_t size = mActiveTracks.size();
8501 if (size == 0) {
8502 standbyIfNotAlreadyInStandby();
8503 // exitPending() can't become true here
8504 releaseWakeLock_l();
8505 ALOGV("RecordThread: loop stopping");
8506 // go to sleep
8507 mWaitWorkCV.wait(_l);
8508 ALOGV("RecordThread: loop starting");
8509 goto reacquire_wakelock;
8510 }
8511
8512 bool doBroadcast = false;
8513 bool allStopped = true;
8514 for (size_t i = 0; i < size; ) {
8515 if (activeTrack) { // ensure track release is outside lock.
8516 oldActiveTracks.emplace_back(std::move(activeTrack));
8517 }
8518 activeTrack = mActiveTracks[i];
8519 if (activeTrack->isTerminated()) {
8520 if (activeTrack->isFastTrack()) {
8521 ALOG_ASSERT(fastTrackToRemove == 0);
8522 fastTrackToRemove = activeTrack;
8523 }
8524 removeTrack_l(activeTrack);
8525 mActiveTracks.remove(activeTrack);
8526 size--;
8527 continue;
8528 }
8529
8530 IAfTrackBase::track_state activeTrackState = activeTrack->state();
8531 switch (activeTrackState) {
8532
8533 case IAfTrackBase::PAUSING:
8534 mActiveTracks.remove(activeTrack);
8535 activeTrack->setState(IAfTrackBase::PAUSED);
8536 if (activeTrack->isFastTrack()) {
8537 ALOGV("%s fast track is paused, thus removed from active list", __func__);
8538 // Keep a ref on fast track to wait for FastCapture thread to get updated
8539 // state before potential track removal
8540 fastTrackToRemove = activeTrack;
8541 }
8542 doBroadcast = true;
8543 size--;
8544 continue;
8545
8546 case IAfTrackBase::STARTING_1:
8547 sleepUs = 10000;
8548 i++;
8549 allStopped = false;
8550 continue;
8551
8552 case IAfTrackBase::STARTING_2:
8553 doBroadcast = true;
8554 if (mStandby) {
8555 mThreadMetrics.logBeginInterval();
8556 mThreadSnapshot.onBegin();
8557 mStandby = false;
8558 }
8559 activeTrack->setState(IAfTrackBase::ACTIVE);
8560 allStopped = false;
8561 break;
8562
8563 case IAfTrackBase::ACTIVE:
8564 allStopped = false;
8565 break;
8566
8567 case IAfTrackBase::IDLE: // cannot be on ActiveTracks if idle
8568 case IAfTrackBase::PAUSED: // cannot be on ActiveTracks if paused
8569 case IAfTrackBase::STOPPED: // cannot be on ActiveTracks if destroyed/terminated
8570 default:
8571 LOG_ALWAYS_FATAL("%s: Unexpected active track state:%d, id:%d, tracks:%zu",
8572 __func__, activeTrackState, activeTrack->id(), size);
8573 }
8574
8575 if (activeTrack->isFastTrack()) {
8576 ALOG_ASSERT(!mFastTrackAvail);
8577 ALOG_ASSERT(fastTrack == 0);
8578 // if the active fast track is silenced either:
8579 // 1) silence the whole capture from fast capture buffer if this is
8580 // the only active track
8581 // 2) invalidate this track: this will cause the client to reconnect and possibly
8582 // be invalidated again until unsilenced
8583 bool invalidate = false;
8584 if (activeTrack->isSilenced()) {
8585 if (size > 1) {
8586 invalidate = true;
8587 } else {
8588 silenceFastCapture = true;
8589 }
8590 }
8591 // Invalidate fast tracks if access to audio history is required as this is not
8592 // possible with fast tracks. Once the fast track has been invalidated, no new
8593 // fast track will be created until mMaxSharedAudioHistoryMs is cleared.
8594 if (mMaxSharedAudioHistoryMs != 0) {
8595 invalidate = true;
8596 }
8597 if (invalidate) {
8598 activeTrack->invalidate();
8599 fastTrackToRemove = activeTrack;
8600 removeTrack_l(activeTrack);
8601 mActiveTracks.remove(activeTrack);
8602 size--;
8603 continue;
8604 }
8605 fastTrack = activeTrack;
8606 }
8607
8608 activeTracks.add(activeTrack);
8609 i++;
8610
8611 }
8612
8613 mActiveTracks.updatePowerState_l(this);
8614
8615 // check if traces have been enabled.
8616 bool atraceEnabled = ATRACE_ENABLED();
8617 if (atraceEnabled != mAtraceEnabled) [[unlikely]] {
8618 mAtraceEnabled = atraceEnabled;
8619 if (atraceEnabled) {
8620 const auto devices = patchSourcesToString(&mPatch);
8621 for (const auto& track : activeTracks) {
8622 track->logRefreshInterval(devices);
8623 }
8624 }
8625 }
8626
8627 updateMetadata_l();
8628
8629 if (allStopped) {
8630 standbyIfNotAlreadyInStandby();
8631 }
8632 if (doBroadcast) {
8633 mStartStopCV.notify_all();
8634 }
8635
8636 // sleep if there are no active tracks to process
8637 if (activeTracks.isEmpty()) {
8638 if (sleepUs == 0) {
8639 sleepUs = kRecordThreadSleepUs;
8640 }
8641 continue;
8642 }
8643 sleepUs = 0;
8644
8645 timestampCorrectionEnabled = isTimestampCorrectionEnabled_l();
8646 lockEffectChains_l(effectChains);
8647 // We're exiting locked scope with non empty activeTracks, make sure
8648 // that we're not in standby mode which we could have entered if some
8649 // tracks were muted/unmuted.
8650 mStandby = false;
8651 }
8652
8653 // thread mutex is now unlocked, mActiveTracks unknown, activeTracks.size() > 0
8654
8655 size_t size = effectChains.size();
8656 for (size_t i = 0; i < size; i++) {
8657 // thread mutex is not locked, but effect chain is locked
8658 effectChains[i]->process_l();
8659 }
8660
8661 // Push a new fast capture state if fast capture is not already running, or cblk change
8662 if (mFastCapture != 0) {
8663 FastCaptureStateQueue *sq = mFastCapture->sq();
8664 FastCaptureState *state = sq->begin();
8665 bool didModify = false;
8666 FastCaptureStateQueue::block_t block = FastCaptureStateQueue::BLOCK_UNTIL_PUSHED;
8667 if (state->mCommand != FastCaptureState::READ_WRITE /* FIXME &&
8668 (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)*/) {
8669 if (state->mCommand == FastCaptureState::COLD_IDLE) {
8670 int32_t old = android_atomic_inc(&mFastCaptureFutex);
8671 if (old == -1) {
8672 (void) syscall(__NR_futex, &mFastCaptureFutex, FUTEX_WAKE_PRIVATE, 1);
8673 }
8674 }
8675 state->mCommand = FastCaptureState::READ_WRITE;
8676 #if 0 // FIXME
8677 mFastCaptureDumpState.increaseSamplingN(mAfThreadCallback->isLowRamDevice() ?
8678 FastThreadDumpState::kSamplingNforLowRamDevice :
8679 FastThreadDumpState::kSamplingN);
8680 #endif
8681 didModify = true;
8682 }
8683 audio_track_cblk_t *cblkOld = state->mCblk;
8684 audio_track_cblk_t *cblkNew = fastTrack != 0 ? fastTrack->cblk() : NULL;
8685 if (cblkNew != cblkOld) {
8686 state->mCblk = cblkNew;
8687 // block until acked if removing a fast track
8688 if (cblkOld != NULL) {
8689 block = FastCaptureStateQueue::BLOCK_UNTIL_ACKED;
8690 }
8691 didModify = true;
8692 }
8693 AudioBufferProvider* abp = (fastTrack != 0 && fastTrack->isPatchTrack()) ?
8694 reinterpret_cast<AudioBufferProvider*>(fastTrack.get()) : nullptr;
8695 if (state->mFastPatchRecordBufferProvider != abp) {
8696 state->mFastPatchRecordBufferProvider = abp;
8697 state->mFastPatchRecordFormat = fastTrack == 0 ?
8698 AUDIO_FORMAT_INVALID : fastTrack->format();
8699 didModify = true;
8700 }
8701 if (state->mSilenceCapture != silenceFastCapture) {
8702 state->mSilenceCapture = silenceFastCapture;
8703 didModify = true;
8704 }
8705 sq->end(didModify);
8706 if (didModify) {
8707 sq->push(block);
8708 #if 0
8709 if (kUseFastCapture == FastCapture_Dynamic) {
8710 mNormalSource = mPipeSource;
8711 }
8712 #endif
8713 }
8714 }
8715
8716 // now run the fast track destructor with thread mutex unlocked
8717 fastTrackToRemove.clear();
8718
8719 // Read from HAL to keep up with fastest client if multiple active tracks, not slowest one.
8720 // Only the client(s) that are too slow will overrun. But if even the fastest client is too
8721 // slow, then this RecordThread will overrun by not calling HAL read often enough.
8722 // If destination is non-contiguous, first read past the nominal end of buffer, then
8723 // copy to the right place. Permitted because mRsmpInBuffer was over-allocated.
8724
8725 int32_t rear = mRsmpInRear & (mRsmpInFramesP2 - 1);
8726 ssize_t framesRead = 0; // not needed, remove clang-tidy warning.
8727 const int64_t lastIoBeginNs = systemTime(); // start IO timing
8728
8729 // If an NBAIO source is present, use it to read the normal capture's data
8730 if (mPipeSource != 0) {
8731 size_t framesToRead = min(mRsmpInFramesOA - rear, mRsmpInFramesP2 / 2);
8732
8733 // The audio fifo read() returns OVERRUN on overflow, and advances the read pointer
8734 // to the full buffer point (clearing the overflow condition). Upon OVERRUN error,
8735 // we immediately retry the read() to get data and prevent another overflow.
8736 for (int retries = 0; retries <= 2; ++retries) {
8737 ALOGW_IF(retries > 0, "overrun on read from pipe, retry #%d", retries);
8738 framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
8739 framesToRead);
8740 if (framesRead != OVERRUN) break;
8741 }
8742
8743 const ssize_t availableToRead = mPipeSource->availableToRead();
8744 if (availableToRead >= 0) {
8745 mMonopipePipeDepthStats.add(availableToRead);
8746 // PipeSource is the primary clock. It is up to the AudioRecord client to keep up.
8747 LOG_ALWAYS_FATAL_IF((size_t)availableToRead > mPipeFramesP2,
8748 "more frames to read than fifo size, %zd > %zu",
8749 availableToRead, mPipeFramesP2);
8750 const size_t pipeFramesFree = mPipeFramesP2 - availableToRead;
8751 const size_t sleepFrames = min(pipeFramesFree, mRsmpInFramesP2) / 2;
8752 ALOGVV("mPipeFramesP2:%zu mRsmpInFramesP2:%zu sleepFrames:%zu availableToRead:%zd",
8753 mPipeFramesP2, mRsmpInFramesP2, sleepFrames, availableToRead);
8754 sleepUs = (sleepFrames * 1000000LL) / mSampleRate;
8755 }
8756 if (framesRead < 0) {
8757 status_t status = (status_t) framesRead;
8758 switch (status) {
8759 case OVERRUN:
8760 ALOGW("overrun on read from pipe");
8761 framesRead = 0;
8762 break;
8763 case NEGOTIATE:
8764 ALOGE("re-negotiation is needed");
8765 framesRead = -1; // Will cause an attempt to recover.
8766 break;
8767 default:
8768 ALOGE("unknown error %d on read from pipe", status);
8769 break;
8770 }
8771 }
8772 // otherwise use the HAL / AudioStreamIn directly
8773 } else {
8774 ATRACE_BEGIN("read");
8775 size_t bytesRead;
8776 status_t result = mSource->read(
8777 (uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize, &bytesRead);
8778 ATRACE_END();
8779 if (result < 0) {
8780 framesRead = result;
8781 } else {
8782 framesRead = bytesRead / mFrameSize;
8783 }
8784 }
8785
8786 const int64_t lastIoEndNs = systemTime(); // end IO timing
8787
8788 // Update server timestamp with server stats
8789 // systemTime() is optional if the hardware supports timestamps.
8790 if (framesRead >= 0) {
8791 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += framesRead;
8792 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = lastIoEndNs;
8793 }
8794
8795 // Update server timestamp with kernel stats
8796 if (mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
8797 int64_t position, time;
8798 if (mStandby) {
8799 mTimestampVerifier.discontinuity(audio_is_linear_pcm(mFormat) ?
8800 mTimestampVerifier.DISCONTINUITY_MODE_CONTINUOUS :
8801 mTimestampVerifier.DISCONTINUITY_MODE_ZERO);
8802 } else if (mSource->getCapturePosition(&position, &time) == NO_ERROR
8803 && time > mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]) {
8804
8805 mTimestampVerifier.add(position, time, mSampleRate);
8806 if (timestampCorrectionEnabled) {
8807 ALOGVV("TS_BEFORE: %d %lld %lld",
8808 id(), (long long)time, (long long)position);
8809 auto correctedTimestamp = mTimestampVerifier.getLastCorrectedTimestamp();
8810 position = correctedTimestamp.mFrames;
8811 time = correctedTimestamp.mTimeNs;
8812 ALOGVV("TS_AFTER: %d %lld %lld",
8813 id(), (long long)time, (long long)position);
8814 }
8815
8816 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL] = position;
8817 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] = time;
8818 // Note: In general record buffers should tend to be empty in
8819 // a properly running pipeline.
8820 //
8821 // Also, it is not advantageous to call get_presentation_position during the read
8822 // as the read obtains a lock, preventing the timestamp call from executing.
8823 } else {
8824 mTimestampVerifier.error();
8825 }
8826 }
8827
8828 // From the timestamp, input read latency is negative output write latency.
8829 const audio_input_flags_t flags = mInput != NULL ? mInput->flags : AUDIO_INPUT_FLAG_NONE;
8830 const double latencyMs = IAfRecordTrack::checkServerLatencySupported(mFormat, flags)
8831 ? - mTimestamp.getOutputServerLatencyMs(mSampleRate) : 0.;
8832 if (latencyMs != 0.) { // note 0. means timestamp is empty.
8833 mLatencyMs.add(latencyMs);
8834 }
8835
8836 // Use this to track timestamp information
8837 // ALOGD("%s", mTimestamp.toString().c_str());
8838
8839 if (framesRead < 0 || (framesRead == 0 && mPipeSource == 0)) {
8840 ALOGE("read failed: framesRead=%zd", framesRead);
8841 // Force input into standby so that it tries to recover at next read attempt
8842 inputStandBy();
8843 sleepUs = kRecordThreadSleepUs;
8844 }
8845 if (framesRead <= 0) {
8846 goto unlock;
8847 }
8848 ALOG_ASSERT(framesRead > 0);
8849 mFramesRead += framesRead;
8850
8851 #ifdef TEE_SINK
8852 (void)mTee.write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
8853 #endif
8854 // If destination is non-contiguous, we now correct for reading past end of buffer.
8855 {
8856 size_t part1 = mRsmpInFramesP2 - rear;
8857 if ((size_t) framesRead > part1) {
8858 memcpy(mRsmpInBuffer, (uint8_t*)mRsmpInBuffer + mRsmpInFramesP2 * mFrameSize,
8859 (framesRead - part1) * mFrameSize);
8860 }
8861 }
8862 mRsmpInRear = audio_utils::safe_add_overflow(mRsmpInRear, (int32_t)framesRead);
8863
8864 size = activeTracks.size();
8865
8866 // loop over each active track
8867 for (size_t i = 0; i < size; i++) {
8868 if (activeTrack) { // ensure track release is outside lock.
8869 oldActiveTracks.emplace_back(std::move(activeTrack));
8870 }
8871 activeTrack = activeTracks[i];
8872
8873 // skip fast tracks, as those are handled directly by FastCapture
8874 if (activeTrack->isFastTrack()) {
8875 continue;
8876 }
8877
8878 // TODO: This code probably should be moved to RecordTrack.
8879 // TODO: Update the activeTrack buffer converter in case of reconfigure.
8880
8881 enum {
8882 OVERRUN_UNKNOWN,
8883 OVERRUN_TRUE,
8884 OVERRUN_FALSE
8885 } overrun = OVERRUN_UNKNOWN;
8886
8887 // loop over getNextBuffer to handle circular sink
8888 for (;;) {
8889
8890 activeTrack->sinkBuffer().frameCount = ~0;
8891 status_t status = activeTrack->getNextBuffer(&activeTrack->sinkBuffer());
8892 size_t framesOut = activeTrack->sinkBuffer().frameCount;
8893 LOG_ALWAYS_FATAL_IF((status == OK) != (framesOut > 0));
8894
8895 // check available frames and handle overrun conditions
8896 // if the record track isn't draining fast enough.
8897 bool hasOverrun;
8898 size_t framesIn;
8899 activeTrack->resamplerBufferProvider()->sync(&framesIn, &hasOverrun);
8900 if (hasOverrun) {
8901 overrun = OVERRUN_TRUE;
8902 }
8903 if (framesOut == 0 || framesIn == 0) {
8904 break;
8905 }
8906
8907 // Don't allow framesOut to be larger than what is possible with resampling
8908 // from framesIn.
8909 // This isn't strictly necessary but helps limit buffer resizing in
8910 // RecordBufferConverter. TODO: remove when no longer needed.
8911 if (audio_is_linear_pcm(activeTrack->format())) {
8912 framesOut = min(framesOut,
8913 destinationFramesPossible(
8914 framesIn, mSampleRate, activeTrack->sampleRate()));
8915 }
8916
8917 if (activeTrack->isDirect()) {
8918 // No RecordBufferConverter used for direct streams. Pass
8919 // straight from RecordThread buffer to RecordTrack buffer.
8920 AudioBufferProvider::Buffer buffer;
8921 buffer.frameCount = framesOut;
8922 const status_t getNextBufferStatus =
8923 activeTrack->resamplerBufferProvider()->getNextBuffer(&buffer);
8924 if (getNextBufferStatus == OK && buffer.frameCount != 0) {
8925 ALOGV_IF(buffer.frameCount != framesOut,
8926 "%s() read less than expected (%zu vs %zu)",
8927 __func__, buffer.frameCount, framesOut);
8928 framesOut = buffer.frameCount;
8929 memcpy(activeTrack->sinkBuffer().raw,
8930 buffer.raw, buffer.frameCount * mFrameSize);
8931 activeTrack->resamplerBufferProvider()->releaseBuffer(&buffer);
8932 } else {
8933 framesOut = 0;
8934 ALOGE("%s() cannot fill request, status: %d, frameCount: %zu",
8935 __func__, getNextBufferStatus, buffer.frameCount);
8936 }
8937 } else {
8938 // process frames from the RecordThread buffer provider to the RecordTrack
8939 // buffer
8940 framesOut = activeTrack->recordBufferConverter()->convert(
8941 activeTrack->sinkBuffer().raw,
8942 activeTrack->resamplerBufferProvider(),
8943 framesOut);
8944 }
8945
8946 if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) {
8947 overrun = OVERRUN_FALSE;
8948 }
8949
8950 // MediaSyncEvent handling: Synchronize AudioRecord to AudioTrack completion.
8951 const ssize_t framesToDrop =
8952 activeTrack->synchronizedRecordState().updateRecordFrames(framesOut);
8953 if (framesToDrop == 0) {
8954 // no sync event, process normally, otherwise ignore.
8955 if (framesOut > 0) {
8956 activeTrack->sinkBuffer().frameCount = framesOut;
8957 // Sanitize before releasing if the track has no access to the source data
8958 // An idle UID receives silence from non virtual devices until active
8959 if (activeTrack->isSilenced()) {
8960 memset(activeTrack->sinkBuffer().raw,
8961 0, framesOut * activeTrack->frameSize());
8962 }
8963 activeTrack->releaseBuffer(&activeTrack->sinkBuffer());
8964 }
8965 }
8966 if (framesOut == 0) {
8967 break;
8968 }
8969 }
8970
8971 switch (overrun) {
8972 case OVERRUN_TRUE:
8973 // client isn't retrieving buffers fast enough
8974 if (!activeTrack->setOverflow()) {
8975 nsecs_t now = systemTime();
8976 // FIXME should lastWarning per track?
8977 if ((now - lastWarning) > kWarningThrottleNs) {
8978 ALOGW("RecordThread: buffer overflow");
8979 lastWarning = now;
8980 }
8981 }
8982 break;
8983 case OVERRUN_FALSE:
8984 activeTrack->clearOverflow();
8985 break;
8986 case OVERRUN_UNKNOWN:
8987 break;
8988 }
8989
8990 // update frame information and push timestamp out
8991 activeTrack->updateTrackFrameInfo(
8992 activeTrack->serverProxy()->framesReleased(),
8993 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER],
8994 mSampleRate, mTimestamp);
8995 }
8996
8997 unlock:
8998 // enable changes in effect chain
8999 unlockEffectChains(effectChains);
9000 // effectChains doesn't need to be cleared, since it is cleared by destructor at scope end
9001 if (audio_has_proportional_frames(mFormat)
9002 && loopCount == lastLoopCountRead + 1) {
9003 const int64_t readPeriodNs = lastIoEndNs - mLastIoEndNs;
9004 const double jitterMs =
9005 TimestampVerifier<int64_t, int64_t>::computeJitterMs(
9006 {framesRead, readPeriodNs},
9007 {0, 0} /* lastTimestamp */, mSampleRate);
9008 const double processMs = (lastIoBeginNs - mLastIoEndNs) * 1e-6;
9009
9010 audio_utils::lock_guard _l(mutex());
9011 mIoJitterMs.add(jitterMs);
9012 mProcessTimeMs.add(processMs);
9013 }
9014 mThreadloopExecutor.process();
9015 // update timing info.
9016 mLastIoBeginNs = lastIoBeginNs;
9017 mLastIoEndNs = lastIoEndNs;
9018 lastLoopCountRead = loopCount;
9019 }
9020 mThreadloopExecutor.process(); // process any remaining deferred actions.
9021 // deferred actions after this point are ignored.
9022
9023 standbyIfNotAlreadyInStandby();
9024
9025 {
9026 audio_utils::lock_guard _l(mutex());
9027 for (size_t i = 0; i < mTracks.size(); i++) {
9028 sp<IAfRecordTrack> track = mTracks[i];
9029 track->invalidate();
9030 }
9031 mActiveTracks.clear();
9032 mStartStopCV.notify_all();
9033 }
9034
9035 releaseWakeLock();
9036
9037 ALOGV("RecordThread %p exiting", this);
9038 return false;
9039 }
9040
standbyIfNotAlreadyInStandby()9041 void RecordThread::standbyIfNotAlreadyInStandby()
9042 {
9043 if (!mStandby) {
9044 inputStandBy();
9045 mThreadMetrics.logEndInterval();
9046 mThreadSnapshot.onEnd();
9047 mStandby = true;
9048 }
9049 }
9050
inputStandBy()9051 void RecordThread::inputStandBy()
9052 {
9053 // Idle the fast capture if it's currently running
9054 if (mFastCapture != 0) {
9055 FastCaptureStateQueue *sq = mFastCapture->sq();
9056 FastCaptureState *state = sq->begin();
9057 if (!(state->mCommand & FastCaptureState::IDLE)) {
9058 state->mCommand = FastCaptureState::COLD_IDLE;
9059 state->mColdFutexAddr = &mFastCaptureFutex;
9060 state->mColdGen++;
9061 mFastCaptureFutex = 0;
9062 sq->end();
9063 // BLOCK_UNTIL_PUSHED would be insufficient, as we need it to stop doing I/O now
9064 {
9065 audio_utils::mutex::scoped_queue_wait_check queueWaitCheck(mFastCapture->getTid());
9066 sq->push(FastCaptureStateQueue::BLOCK_UNTIL_ACKED);
9067 }
9068
9069 #if 0
9070 if (kUseFastCapture == FastCapture_Dynamic) {
9071 // FIXME
9072 }
9073 #endif
9074 #ifdef AUDIO_WATCHDOG
9075 // FIXME
9076 #endif
9077 } else {
9078 sq->end(false /*didModify*/);
9079 }
9080 }
9081 status_t result = mSource->standby();
9082 ALOGE_IF(result != OK, "Error when putting input stream into standby: %d", result);
9083
9084 // If going into standby, flush the pipe source.
9085 if (mPipeSource.get() != nullptr) {
9086 const ssize_t flushed = mPipeSource->flush();
9087 if (flushed > 0) {
9088 ALOGV("Input standby flushed PipeSource %zd frames", flushed);
9089 mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += flushed;
9090 mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = systemTime();
9091 }
9092 }
9093 }
9094
9095 // RecordThread::createRecordTrack_l() must be called with AudioFlinger::mutex() held
createRecordTrack_l(const sp<Client> & client,const audio_attributes_t & attr,uint32_t * pSampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t * pFrameCount,audio_session_t sessionId,size_t * pNotificationFrameCount,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_input_flags_t * flags,pid_t tid,status_t * status,audio_port_handle_t portId,int32_t maxSharedAudioHistoryMs)9096 sp<IAfRecordTrack> RecordThread::createRecordTrack_l(
9097 const sp<Client>& client,
9098 const audio_attributes_t& attr,
9099 uint32_t *pSampleRate,
9100 audio_format_t format,
9101 audio_channel_mask_t channelMask,
9102 size_t *pFrameCount,
9103 audio_session_t sessionId,
9104 size_t *pNotificationFrameCount,
9105 pid_t creatorPid,
9106 const AttributionSourceState& attributionSource,
9107 audio_input_flags_t *flags,
9108 pid_t tid,
9109 status_t *status,
9110 audio_port_handle_t portId,
9111 int32_t maxSharedAudioHistoryMs)
9112 {
9113 size_t frameCount = *pFrameCount;
9114 size_t notificationFrameCount = *pNotificationFrameCount;
9115 sp<IAfRecordTrack> track;
9116 status_t lStatus;
9117 audio_input_flags_t inputFlags = mInput->flags;
9118 audio_input_flags_t requestedFlags = *flags;
9119 uint32_t sampleRate;
9120
9121 lStatus = initCheck();
9122 if (lStatus != NO_ERROR) {
9123 ALOGE("createRecordTrack_l() audio driver not initialized");
9124 goto Exit;
9125 }
9126
9127 if (!audio_is_linear_pcm(mFormat) && (*flags & AUDIO_INPUT_FLAG_DIRECT) == 0) {
9128 ALOGE("createRecordTrack_l() on an encoded stream requires AUDIO_INPUT_FLAG_DIRECT");
9129 lStatus = BAD_VALUE;
9130 goto Exit;
9131 }
9132
9133 if (maxSharedAudioHistoryMs != 0) {
9134 if (!captureHotwordAllowed(attributionSource)) {
9135 lStatus = PERMISSION_DENIED;
9136 goto Exit;
9137 }
9138 if (maxSharedAudioHistoryMs < 0
9139 || maxSharedAudioHistoryMs > kMaxSharedAudioHistoryMs) {
9140 lStatus = BAD_VALUE;
9141 goto Exit;
9142 }
9143 }
9144 if (*pSampleRate == 0) {
9145 *pSampleRate = mSampleRate;
9146 }
9147 sampleRate = *pSampleRate;
9148
9149 // special case for FAST flag considered OK if fast capture is present and access to
9150 // audio history is not required
9151 if (hasFastCapture() && mMaxSharedAudioHistoryMs == 0) {
9152 inputFlags = (audio_input_flags_t)(inputFlags | AUDIO_INPUT_FLAG_FAST);
9153 }
9154
9155 // Check if requested flags are compatible with input stream flags
9156 if ((*flags & inputFlags) != *flags) {
9157 ALOGW("createRecordTrack_l(): mismatch between requested flags (%08x) and"
9158 " input flags (%08x)",
9159 *flags, inputFlags);
9160 *flags = (audio_input_flags_t)(*flags & inputFlags);
9161 }
9162
9163 // client expresses a preference for FAST and no access to audio history,
9164 // but we get the final say
9165 if (*flags & AUDIO_INPUT_FLAG_FAST && maxSharedAudioHistoryMs == 0) {
9166 if (
9167 // we formerly checked for a callback handler (non-0 tid),
9168 // but that is no longer required for TRANSFER_OBTAIN mode
9169 // No need to match hardware format, format conversion will be done in client side.
9170 //
9171 // Frame count is not specified (0), or is less than or equal the pipe depth.
9172 // It is OK to provide a higher capacity than requested.
9173 // We will force it to mPipeFramesP2 below.
9174 (frameCount <= mPipeFramesP2) &&
9175 // PCM data
9176 audio_is_linear_pcm(format) &&
9177 // hardware channel mask
9178 (channelMask == mChannelMask) &&
9179 // hardware sample rate
9180 (sampleRate == mSampleRate) &&
9181 // record thread has an associated fast capture
9182 hasFastCapture() &&
9183 // there are sufficient fast track slots available
9184 mFastTrackAvail
9185 ) {
9186 // check compatibility with audio effects.
9187 audio_utils::lock_guard _l(mutex());
9188 // Do not accept FAST flag if the session has software effects
9189 sp<IAfEffectChain> chain = getEffectChain_l(sessionId);
9190 if (chain != 0) {
9191 audio_input_flags_t old = *flags;
9192 chain->checkInputFlagCompatibility(flags);
9193 if (old != *flags) {
9194 ALOGV("%p AUDIO_INPUT_FLAGS denied by effect old=%#x new=%#x",
9195 this, (int)old, (int)*flags);
9196 }
9197 }
9198 ALOGV_IF((*flags & AUDIO_INPUT_FLAG_FAST) != 0,
9199 "%p AUDIO_INPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
9200 this, frameCount, mFrameCount);
9201 } else {
9202 ALOGV("%p AUDIO_INPUT_FLAG_FAST denied: frameCount=%zu mFrameCount=%zu mPipeFramesP2=%zu "
9203 "format=%#x isLinear=%d mFormat=%#x channelMask=%#x sampleRate=%u mSampleRate=%u "
9204 "hasFastCapture=%d tid=%d mFastTrackAvail=%d",
9205 this, frameCount, mFrameCount, mPipeFramesP2,
9206 format, audio_is_linear_pcm(format), mFormat, channelMask, sampleRate, mSampleRate,
9207 hasFastCapture(), tid, mFastTrackAvail);
9208 *flags = (audio_input_flags_t)(*flags & ~AUDIO_INPUT_FLAG_FAST);
9209 }
9210 }
9211
9212 // If FAST or RAW flags were corrected, ask caller to request new input from audio policy
9213 if ((*flags & AUDIO_INPUT_FLAG_FAST) !=
9214 (requestedFlags & AUDIO_INPUT_FLAG_FAST)) {
9215 *flags = (audio_input_flags_t) (*flags & ~(AUDIO_INPUT_FLAG_FAST | AUDIO_INPUT_FLAG_RAW));
9216 lStatus = BAD_TYPE;
9217 goto Exit;
9218 }
9219
9220 // compute track buffer size in frames, and suggest the notification frame count
9221 if (*flags & AUDIO_INPUT_FLAG_FAST) {
9222 // fast track: frame count is exactly the pipe depth
9223 frameCount = mPipeFramesP2;
9224 // ignore requested notificationFrames, and always notify exactly once every HAL buffer
9225 notificationFrameCount = mFrameCount;
9226 } else {
9227 // not fast track: max notification period is resampled equivalent of one HAL buffer time
9228 // or 20 ms if there is a fast capture
9229 // TODO This could be a roundupRatio inline, and const
9230 size_t maxNotificationFrames = ((int64_t) (hasFastCapture() ? mSampleRate/50 : mFrameCount)
9231 * sampleRate + mSampleRate - 1) / mSampleRate;
9232 // minimum number of notification periods is at least kMinNotifications,
9233 // and at least kMinMs rounded up to a whole notification period (minNotificationsByMs)
9234 static const size_t kMinNotifications = 3;
9235 static const uint32_t kMinMs = 30;
9236 // TODO This could be a roundupRatio inline
9237 const size_t minFramesByMs = (sampleRate * kMinMs + 1000 - 1) / 1000;
9238 // TODO This could be a roundupRatio inline
9239 const size_t minNotificationsByMs = (minFramesByMs + maxNotificationFrames - 1) /
9240 maxNotificationFrames;
9241 const size_t minFrameCount = maxNotificationFrames *
9242 max(kMinNotifications, minNotificationsByMs);
9243 frameCount = max(frameCount, minFrameCount);
9244 if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) {
9245 notificationFrameCount = maxNotificationFrames;
9246 }
9247 }
9248 *pFrameCount = frameCount;
9249 *pNotificationFrameCount = notificationFrameCount;
9250
9251 { // scope for mutex()
9252 audio_utils::lock_guard _l(mutex());
9253 int32_t startFrames = -1;
9254 if (!mSharedAudioPackageName.empty()
9255 && mSharedAudioPackageName == attributionSource.packageName
9256 && mSharedAudioSessionId == sessionId
9257 && captureHotwordAllowed(attributionSource)) {
9258 startFrames = mSharedAudioStartFrames;
9259 }
9260
9261 track = IAfRecordTrack::create(this, client, attr, sampleRate,
9262 format, channelMask, frameCount,
9263 nullptr /* buffer */, (size_t)0 /* bufferSize */, sessionId, creatorPid,
9264 attributionSource, *flags, IAfTrackBase::TYPE_DEFAULT, portId,
9265 startFrames);
9266
9267 lStatus = track->initCheck();
9268 if (lStatus != NO_ERROR) {
9269 ALOGE("createRecordTrack_l() initCheck failed %d; no control block?", lStatus);
9270 // track must be cleared from the caller as the caller has the AF lock
9271 goto Exit;
9272 }
9273 mTracks.add(track);
9274
9275 if ((*flags & AUDIO_INPUT_FLAG_FAST) && (tid != -1)) {
9276 pid_t callingPid = IPCThreadState::self()->getCallingPid();
9277 // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
9278 // so ask activity manager to do this on our behalf
9279 sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp, true /*forApp*/);
9280 }
9281
9282 if (maxSharedAudioHistoryMs != 0) {
9283 sendResizeBufferConfigEvent_l(maxSharedAudioHistoryMs);
9284 }
9285 }
9286
9287 lStatus = NO_ERROR;
9288
9289 Exit:
9290 *status = lStatus;
9291 return track;
9292 }
9293
start(IAfRecordTrack * recordTrack,AudioSystem::sync_event_t event,audio_session_t triggerSession)9294 status_t RecordThread::start(IAfRecordTrack* recordTrack,
9295 AudioSystem::sync_event_t event,
9296 audio_session_t triggerSession)
9297 {
9298 ALOGV("RecordThread::start event %d, triggerSession %d", event, triggerSession);
9299 sp<ThreadBase> strongMe = this;
9300 status_t status = NO_ERROR;
9301
9302 if (event == AudioSystem::SYNC_EVENT_NONE) {
9303 recordTrack->clearSyncStartEvent();
9304 } else if (event != AudioSystem::SYNC_EVENT_SAME) {
9305 recordTrack->synchronizedRecordState().startRecording(
9306 mAfThreadCallback->createSyncEvent(
9307 event, triggerSession,
9308 recordTrack->sessionId(), syncStartEventCallback, recordTrack));
9309 }
9310
9311 {
9312 // This section is a rendezvous between binder thread executing start() and RecordThread
9313 audio_utils::lock_guard lock(mutex());
9314 if (recordTrack->isInvalid()) {
9315 recordTrack->clearSyncStartEvent();
9316 ALOGW("%s track %d: invalidated before startInput", __func__, recordTrack->portId());
9317 return DEAD_OBJECT;
9318 }
9319 if (mActiveTracks.indexOf(recordTrack) >= 0) {
9320 if (recordTrack->state() == IAfTrackBase::PAUSING) {
9321 // We haven't stopped yet (moved to PAUSED and not in mActiveTracks)
9322 // so no need to startInput().
9323 ALOGV("active record track PAUSING -> ACTIVE");
9324 recordTrack->setState(IAfTrackBase::ACTIVE);
9325 } else {
9326 ALOGV("active record track state %d", (int)recordTrack->state());
9327 }
9328 return status;
9329 }
9330
9331 // TODO consider other ways of handling this, such as changing the state to :STARTING and
9332 // adding the track to mActiveTracks after returning from AudioSystem::startInput(),
9333 // or using a separate command thread
9334 recordTrack->setState(IAfTrackBase::STARTING_1);
9335 mActiveTracks.add(recordTrack);
9336 if (recordTrack->isExternalTrack()) {
9337 mutex().unlock();
9338 status = AudioSystem::startInput(recordTrack->portId());
9339 mutex().lock();
9340 if (recordTrack->isInvalid()) {
9341 recordTrack->clearSyncStartEvent();
9342 if (status == NO_ERROR && recordTrack->state() == IAfTrackBase::STARTING_1) {
9343 recordTrack->setState(IAfTrackBase::STARTING_2);
9344 // STARTING_2 forces destroy to call stopInput.
9345 }
9346 ALOGW("%s track %d: invalidated after startInput", __func__, recordTrack->portId());
9347 return DEAD_OBJECT;
9348 }
9349 if (recordTrack->state() != IAfTrackBase::STARTING_1) {
9350 ALOGW("%s(%d): unsynchronized mState:%d change",
9351 __func__, recordTrack->id(), (int)recordTrack->state());
9352 // Someone else has changed state, let them take over,
9353 // leave mState in the new state.
9354 recordTrack->clearSyncStartEvent();
9355 return INVALID_OPERATION;
9356 }
9357 // we're ok, but perhaps startInput has failed
9358 if (status != NO_ERROR) {
9359 ALOGW("%s(%d): startInput failed, status %d",
9360 __func__, recordTrack->id(), status);
9361 // We are in ActiveTracks if STARTING_1 and valid, so remove from ActiveTracks,
9362 // leave in STARTING_1, so destroy() will not call stopInput.
9363 mActiveTracks.remove(recordTrack);
9364 recordTrack->clearSyncStartEvent();
9365 return status;
9366 }
9367 sendIoConfigEvent_l(
9368 AUDIO_CLIENT_STARTED, recordTrack->creatorPid(), recordTrack->portId());
9369 }
9370
9371 recordTrack->logBeginInterval(patchSourcesToString(&mPatch)); // log to MediaMetrics
9372
9373 // Catch up with current buffer indices if thread is already running.
9374 // This is what makes a new client discard all buffered data. If the track's mRsmpInFront
9375 // was initialized to some value closer to the thread's mRsmpInFront, then the track could
9376 // see previously buffered data before it called start(), but with greater risk of overrun.
9377
9378 recordTrack->resamplerBufferProvider()->reset();
9379 if (!recordTrack->isDirect()) {
9380 // clear any converter state as new data will be discontinuous
9381 recordTrack->recordBufferConverter()->reset();
9382 }
9383 recordTrack->setState(IAfTrackBase::STARTING_2);
9384 // signal thread to start
9385 mWaitWorkCV.notify_all();
9386 return status;
9387 }
9388 }
9389
syncStartEventCallback(const wp<SyncEvent> & event)9390 void RecordThread::syncStartEventCallback(const wp<SyncEvent>& event)
9391 {
9392 const sp<SyncEvent> strongEvent = event.promote();
9393
9394 if (strongEvent != 0) {
9395 sp<IAfTrackBase> ptr =
9396 std::any_cast<const wp<IAfTrackBase>>(strongEvent->cookie()).promote();
9397 if (ptr != nullptr) {
9398 // TODO(b/291317898) handleSyncStartEvent is in IAfTrackBase not IAfRecordTrack.
9399 ptr->handleSyncStartEvent(strongEvent);
9400 }
9401 }
9402 }
9403
stop(IAfRecordTrack * recordTrack)9404 bool RecordThread::stop(IAfRecordTrack* recordTrack) {
9405 ALOGV("RecordThread::stop");
9406 audio_utils::unique_lock _l(mutex());
9407 // if we're invalid, we can't be on the ActiveTracks.
9408 if (mActiveTracks.indexOf(recordTrack) < 0 || recordTrack->state() == IAfTrackBase::PAUSING) {
9409 return false;
9410 }
9411 // note that threadLoop may still be processing the track at this point [without lock]
9412 recordTrack->setState(IAfTrackBase::PAUSING);
9413
9414 // NOTE: Waiting here is important to keep stop synchronous.
9415 // This is needed for proper patchRecord peer release.
9416 while (recordTrack->state() == IAfTrackBase::PAUSING && !recordTrack->isInvalid()) {
9417 mWaitWorkCV.notify_all(); // signal thread to stop
9418 mStartStopCV.wait(_l, getTid());
9419 }
9420
9421 if (recordTrack->state() == IAfTrackBase::PAUSED) { // successful stop
9422 ALOGV("Record stopped OK");
9423 return true;
9424 }
9425
9426 // don't handle anything - we've been invalidated or restarted and in a different state
9427 ALOGW_IF("%s(%d): unsynchronized stop, state: %d",
9428 __func__, recordTrack->id(), recordTrack->state());
9429 return false;
9430 }
9431
isValidSyncEvent(const sp<SyncEvent> &) const9432 bool RecordThread::isValidSyncEvent(const sp<SyncEvent>& /* event */) const
9433 {
9434 return false;
9435 }
9436
setSyncEvent(const sp<SyncEvent> &)9437 status_t RecordThread::setSyncEvent(const sp<SyncEvent>& /* event */)
9438 {
9439 #if 0 // This branch is currently dead code, but is preserved in case it will be needed in future
9440 if (!isValidSyncEvent(event)) {
9441 return BAD_VALUE;
9442 }
9443
9444 audio_session_t eventSession = event->triggerSession();
9445 status_t ret = NAME_NOT_FOUND;
9446
9447 audio_utils::lock_guard _l(mutex());
9448
9449 for (size_t i = 0; i < mTracks.size(); i++) {
9450 sp<IAfRecordTrack> track = mTracks[i];
9451 if (eventSession == track->sessionId()) {
9452 (void) track->setSyncEvent(event);
9453 ret = NO_ERROR;
9454 }
9455 }
9456 return ret;
9457 #else
9458 return BAD_VALUE;
9459 #endif
9460 }
9461
getActiveMicrophones(std::vector<media::MicrophoneInfoFw> * activeMicrophones) const9462 status_t RecordThread::getActiveMicrophones(
9463 std::vector<media::MicrophoneInfoFw>* activeMicrophones) const
9464 {
9465 ALOGV("RecordThread::getActiveMicrophones");
9466 audio_utils::lock_guard _l(mutex());
9467 if (!isStreamInitialized()) {
9468 return NO_INIT;
9469 }
9470 status_t status = mInput->stream->getActiveMicrophones(activeMicrophones);
9471 return status;
9472 }
9473
setPreferredMicrophoneDirection(audio_microphone_direction_t direction)9474 status_t RecordThread::setPreferredMicrophoneDirection(
9475 audio_microphone_direction_t direction)
9476 {
9477 ALOGV("setPreferredMicrophoneDirection(%d)", direction);
9478 audio_utils::lock_guard _l(mutex());
9479 if (!isStreamInitialized()) {
9480 return NO_INIT;
9481 }
9482 return mInput->stream->setPreferredMicrophoneDirection(direction);
9483 }
9484
setPreferredMicrophoneFieldDimension(float zoom)9485 status_t RecordThread::setPreferredMicrophoneFieldDimension(float zoom)
9486 {
9487 ALOGV("setPreferredMicrophoneFieldDimension(%f)", zoom);
9488 audio_utils::lock_guard _l(mutex());
9489 if (!isStreamInitialized()) {
9490 return NO_INIT;
9491 }
9492 return mInput->stream->setPreferredMicrophoneFieldDimension(zoom);
9493 }
9494
shareAudioHistory(const std::string & sharedAudioPackageName,audio_session_t sharedSessionId,int64_t sharedAudioStartMs)9495 status_t RecordThread::shareAudioHistory(
9496 const std::string& sharedAudioPackageName, audio_session_t sharedSessionId,
9497 int64_t sharedAudioStartMs) {
9498 audio_utils::lock_guard _l(mutex());
9499 return shareAudioHistory_l(sharedAudioPackageName, sharedSessionId, sharedAudioStartMs);
9500 }
9501
shareAudioHistory_l(const std::string & sharedAudioPackageName,audio_session_t sharedSessionId,int64_t sharedAudioStartMs)9502 status_t RecordThread::shareAudioHistory_l(
9503 const std::string& sharedAudioPackageName, audio_session_t sharedSessionId,
9504 int64_t sharedAudioStartMs) {
9505
9506 if ((hasAudioSession_l(sharedSessionId) & ThreadBase::TRACK_SESSION) == 0) {
9507 return BAD_VALUE;
9508 }
9509
9510 if (sharedAudioStartMs < 0
9511 || sharedAudioStartMs > INT64_MAX / mSampleRate) {
9512 return BAD_VALUE;
9513 }
9514
9515 // Current implementation of the input resampling buffer wraps around indexes at 32 bit.
9516 // As we cannot detect more than one wraparound, only accept values up current write position
9517 // after one wraparound
9518 // We assume recent wraparounds on mRsmpInRear only given it is unlikely that the requesting
9519 // app waits several hours after the start time was computed.
9520 int64_t sharedAudioStartFrames = sharedAudioStartMs * mSampleRate / 1000;
9521 const int32_t sharedOffset = audio_utils::safe_sub_overflow(mRsmpInRear,
9522 (int32_t)sharedAudioStartFrames);
9523 // Bring the start frame position within the input buffer to match the documented
9524 // "best effort" behavior of the API.
9525 if (sharedOffset < 0) {
9526 sharedAudioStartFrames = mRsmpInRear;
9527 } else if (sharedOffset > static_cast<signed>(mRsmpInFrames)) {
9528 sharedAudioStartFrames =
9529 audio_utils::safe_sub_overflow(mRsmpInRear, (int32_t)mRsmpInFrames);
9530 }
9531
9532 mSharedAudioPackageName = sharedAudioPackageName;
9533 if (mSharedAudioPackageName.empty()) {
9534 resetAudioHistory_l();
9535 } else {
9536 mSharedAudioSessionId = sharedSessionId;
9537 mSharedAudioStartFrames = (int32_t)sharedAudioStartFrames;
9538 }
9539 return NO_ERROR;
9540 }
9541
resetAudioHistory_l()9542 void RecordThread::resetAudioHistory_l() {
9543 mSharedAudioSessionId = AUDIO_SESSION_NONE;
9544 mSharedAudioStartFrames = -1;
9545 mSharedAudioPackageName = "";
9546 }
9547
updateMetadata_l()9548 ThreadBase::MetadataUpdate RecordThread::updateMetadata_l()
9549 {
9550 if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
9551 return {}; // nothing to do
9552 }
9553 StreamInHalInterface::SinkMetadata metadata;
9554 auto backInserter = std::back_inserter(metadata.tracks);
9555 for (const sp<IAfRecordTrack>& track : mActiveTracks) {
9556 track->copyMetadataTo(backInserter);
9557 }
9558 mInput->stream->updateSinkMetadata(metadata);
9559 MetadataUpdate change;
9560 change.recordMetadataUpdate = metadata.tracks;
9561 return change;
9562 }
9563
9564 // destroyTrack_l() must be called with ThreadBase::mutex() held
destroyTrack_l(const sp<IAfRecordTrack> & track)9565 void RecordThread::destroyTrack_l(const sp<IAfRecordTrack>& track)
9566 {
9567 track->terminate();
9568 track->setState(IAfTrackBase::STOPPED);
9569
9570 // active tracks are removed by threadLoop()
9571 if (mActiveTracks.indexOf(track) < 0) {
9572 removeTrack_l(track);
9573 }
9574 }
9575
removeTrack_l(const sp<IAfRecordTrack> & track)9576 void RecordThread::removeTrack_l(const sp<IAfRecordTrack>& track)
9577 {
9578 String8 result;
9579 track->appendDump(result, false /* active */);
9580 mLocalLog.log("removeTrack_l (%p) %s", track.get(), result.c_str());
9581
9582 mTracks.remove(track);
9583 // need anything related to effects here?
9584 if (track->isFastTrack()) {
9585 ALOG_ASSERT(!mFastTrackAvail);
9586 mFastTrackAvail = true;
9587 }
9588 }
9589
dumpInternals_l(int fd,const Vector<String16> &)9590 void RecordThread::dumpInternals_l(int fd, const Vector<String16>& /* args */)
9591 {
9592 AudioStreamIn *input = mInput;
9593 audio_input_flags_t flags = input != NULL ? input->flags : AUDIO_INPUT_FLAG_NONE;
9594 dprintf(fd, " AudioStreamIn: %p flags %#x (%s)\n",
9595 input, flags, toString(flags).c_str());
9596 dprintf(fd, " Frames read: %lld\n", (long long)mFramesRead);
9597 if (mActiveTracks.isEmpty()) {
9598 dprintf(fd, " No active record clients\n");
9599 }
9600
9601 if (input != nullptr) {
9602 dprintf(fd, " Hal stream dump:\n");
9603 (void)input->stream->dump(fd);
9604 }
9605
9606 dprintf(fd, " Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
9607 dprintf(fd, " Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
9608
9609 // Make a non-atomic copy of fast capture dump state so it won't change underneath us
9610 // while we are dumping it. It may be inconsistent, but it won't mutate!
9611 // This is a large object so we place it on the heap.
9612 // FIXME 25972958: Need an intelligent copy constructor that does not touch unused pages.
9613 const std::unique_ptr<FastCaptureDumpState> copy =
9614 std::make_unique<FastCaptureDumpState>(mFastCaptureDumpState);
9615 copy->dump(fd);
9616 }
9617
dumpTracks_l(int fd,const Vector<String16> &)9618 void RecordThread::dumpTracks_l(int fd, const Vector<String16>& /* args */)
9619 {
9620 String8 result;
9621 size_t numtracks = mTracks.size();
9622 size_t numactive = mActiveTracks.size();
9623 size_t numactiveseen = 0;
9624 dprintf(fd, " %zu Tracks", numtracks);
9625 const char *prefix = " ";
9626 if (numtracks) {
9627 dprintf(fd, " of which %zu are active\n", numactive);
9628 result.append(prefix);
9629 mTracks[0]->appendDumpHeader(result);
9630 for (size_t i = 0; i < numtracks ; ++i) {
9631 sp<IAfRecordTrack> track = mTracks[i];
9632 if (track != 0) {
9633 bool active = mActiveTracks.indexOf(track) >= 0;
9634 if (active) {
9635 numactiveseen++;
9636 }
9637 result.append(prefix);
9638 track->appendDump(result, active);
9639 }
9640 }
9641 } else {
9642 dprintf(fd, "\n");
9643 }
9644
9645 if (numactiveseen != numactive) {
9646 result.append(" The following tracks are in the active list but"
9647 " not in the track list\n");
9648 result.append(prefix);
9649 mActiveTracks[0]->appendDumpHeader(result);
9650 for (size_t i = 0; i < numactive; ++i) {
9651 sp<IAfRecordTrack> track = mActiveTracks[i];
9652 if (mTracks.indexOf(track) < 0) {
9653 result.append(prefix);
9654 track->appendDump(result, true /* active */);
9655 }
9656 }
9657
9658 }
9659 write(fd, result.c_str(), result.size());
9660 }
9661
setRecordSilenced(audio_port_handle_t portId,bool silenced)9662 void RecordThread::setRecordSilenced(audio_port_handle_t portId, bool silenced)
9663 {
9664 audio_utils::lock_guard _l(mutex());
9665 for (size_t i = 0; i < mTracks.size() ; i++) {
9666 sp<IAfRecordTrack> track = mTracks[i];
9667 if (track != 0 && track->portId() == portId) {
9668 track->setSilenced(silenced);
9669 }
9670 }
9671 }
9672
reset()9673 void ResamplerBufferProvider::reset()
9674 {
9675 const auto threadBase = mRecordTrack->thread().promote();
9676 auto* const recordThread = static_cast<RecordThread *>(threadBase->asIAfRecordThread().get());
9677 mRsmpInUnrel = 0;
9678 const int32_t rear = recordThread->mRsmpInRear;
9679 ssize_t deltaFrames = 0;
9680 if (mRecordTrack->startFrames() >= 0) {
9681 int32_t startFrames = mRecordTrack->startFrames();
9682 // Accept a recent wraparound of mRsmpInRear
9683 if (startFrames <= rear) {
9684 deltaFrames = rear - startFrames;
9685 } else {
9686 deltaFrames = (int32_t)((int64_t)rear + UINT32_MAX + 1 - startFrames);
9687 }
9688 // start frame cannot be further in the past than start of resampling buffer
9689 if ((size_t) deltaFrames > recordThread->mRsmpInFrames) {
9690 deltaFrames = recordThread->mRsmpInFrames;
9691 }
9692 }
9693 mRsmpInFront = audio_utils::safe_sub_overflow(rear, static_cast<int32_t>(deltaFrames));
9694 }
9695
sync(size_t * framesAvailable,bool * hasOverrun)9696 void ResamplerBufferProvider::sync(
9697 size_t *framesAvailable, bool *hasOverrun)
9698 {
9699 const auto threadBase = mRecordTrack->thread().promote();
9700 auto* const recordThread = static_cast<RecordThread *>(threadBase->asIAfRecordThread().get());
9701 const int32_t rear = recordThread->mRsmpInRear;
9702 const int32_t front = mRsmpInFront;
9703 const ssize_t filled = audio_utils::safe_sub_overflow(rear, front);
9704
9705 size_t framesIn;
9706 bool overrun = false;
9707 if (filled < 0) {
9708 // should not happen, but treat like a massive overrun and re-sync
9709 framesIn = 0;
9710 mRsmpInFront = rear;
9711 overrun = true;
9712 } else if ((size_t) filled <= recordThread->mRsmpInFrames) {
9713 framesIn = (size_t) filled;
9714 } else {
9715 // client is not keeping up with server, but give it latest data
9716 framesIn = recordThread->mRsmpInFrames;
9717 mRsmpInFront = /* front = */ audio_utils::safe_sub_overflow(
9718 rear, static_cast<int32_t>(framesIn));
9719 overrun = true;
9720 }
9721 if (framesAvailable != NULL) {
9722 *framesAvailable = framesIn;
9723 }
9724 if (hasOverrun != NULL) {
9725 *hasOverrun = overrun;
9726 }
9727 }
9728
9729 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)9730 status_t ResamplerBufferProvider::getNextBuffer(
9731 AudioBufferProvider::Buffer* buffer)
9732 {
9733 const auto threadBase = mRecordTrack->thread().promote();
9734 if (threadBase == 0) {
9735 buffer->frameCount = 0;
9736 buffer->raw = NULL;
9737 return NOT_ENOUGH_DATA;
9738 }
9739 auto* const recordThread = static_cast<RecordThread *>(threadBase->asIAfRecordThread().get());
9740 int32_t rear = recordThread->mRsmpInRear;
9741 int32_t front = mRsmpInFront;
9742 ssize_t filled = audio_utils::safe_sub_overflow(rear, front);
9743 // FIXME should not be P2 (don't want to increase latency)
9744 // FIXME if client not keeping up, discard
9745 LOG_ALWAYS_FATAL_IF(!(0 <= filled && (size_t) filled <= recordThread->mRsmpInFrames));
9746 // 'filled' may be non-contiguous, so return only the first contiguous chunk
9747
9748 front &= recordThread->mRsmpInFramesP2 - 1;
9749 size_t part1 = recordThread->mRsmpInFramesP2 - front;
9750 if (part1 > (size_t) filled) {
9751 part1 = filled;
9752 }
9753 size_t ask = buffer->frameCount;
9754 ALOG_ASSERT(ask > 0);
9755 if (part1 > ask) {
9756 part1 = ask;
9757 }
9758 if (part1 == 0) {
9759 // out of data is fine since the resampler will return a short-count.
9760 buffer->raw = NULL;
9761 buffer->frameCount = 0;
9762 mRsmpInUnrel = 0;
9763 return NOT_ENOUGH_DATA;
9764 }
9765
9766 buffer->raw = (uint8_t*)recordThread->mRsmpInBuffer + front * recordThread->mFrameSize;
9767 buffer->frameCount = part1;
9768 mRsmpInUnrel = part1;
9769 return NO_ERROR;
9770 }
9771
9772 // AudioBufferProvider interface
releaseBuffer(AudioBufferProvider::Buffer * buffer)9773 void ResamplerBufferProvider::releaseBuffer(
9774 AudioBufferProvider::Buffer* buffer)
9775 {
9776 int32_t stepCount = static_cast<int32_t>(buffer->frameCount);
9777 if (stepCount == 0) {
9778 return;
9779 }
9780 ALOG_ASSERT(stepCount <= (int32_t)mRsmpInUnrel);
9781 mRsmpInUnrel -= stepCount;
9782 mRsmpInFront = audio_utils::safe_add_overflow(mRsmpInFront, stepCount);
9783 buffer->raw = NULL;
9784 buffer->frameCount = 0;
9785 }
9786
checkBtNrec()9787 void RecordThread::checkBtNrec()
9788 {
9789 audio_utils::lock_guard _l(mutex());
9790 checkBtNrec_l();
9791 }
9792
checkBtNrec_l()9793 void RecordThread::checkBtNrec_l()
9794 {
9795 // disable AEC and NS if the device is a BT SCO headset supporting those
9796 // pre processings
9797 bool suspend = audio_is_bluetooth_sco_device(inDeviceType_l()) &&
9798 mAfThreadCallback->btNrecIsOff();
9799 if (mBtNrecSuspended.exchange(suspend) != suspend) {
9800 for (size_t i = 0; i < mEffectChains.size(); i++) {
9801 setEffectSuspended_l(FX_IID_AEC, suspend, mEffectChains[i]->sessionId());
9802 setEffectSuspended_l(FX_IID_NS, suspend, mEffectChains[i]->sessionId());
9803 }
9804 }
9805 }
9806
9807
checkForNewParameter_l(const String8 & keyValuePair,status_t & status)9808 bool RecordThread::checkForNewParameter_l(const String8& keyValuePair,
9809 status_t& status)
9810 {
9811 bool reconfig = false;
9812
9813 status = NO_ERROR;
9814
9815 audio_format_t reqFormat = mFormat;
9816 uint32_t samplingRate = mSampleRate;
9817 // TODO this may change if we want to support capture from HDMI PCM multi channel (e.g on TVs).
9818 [[maybe_unused]] audio_channel_mask_t channelMask =
9819 audio_channel_in_mask_from_count(mChannelCount);
9820
9821 AudioParameter param = AudioParameter(keyValuePair);
9822 int value;
9823
9824 // scope for AutoPark extends to end of method
9825 AutoPark<FastCapture> park(mFastCapture);
9826
9827 // TODO Investigate when this code runs. Check with audio policy when a sample rate and
9828 // channel count change can be requested. Do we mandate the first client defines the
9829 // HAL sampling rate and channel count or do we allow changes on the fly?
9830 if (param.getInt(String8(AudioParameter::keySamplingRate), value) == NO_ERROR) {
9831 samplingRate = value;
9832 reconfig = true;
9833 }
9834 if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
9835 if (!audio_is_linear_pcm((audio_format_t) value)) {
9836 status = BAD_VALUE;
9837 } else {
9838 reqFormat = (audio_format_t) value;
9839 reconfig = true;
9840 }
9841 }
9842 if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
9843 audio_channel_mask_t mask = (audio_channel_mask_t) value;
9844 if (!audio_is_input_channel(mask) ||
9845 audio_channel_count_from_in_mask(mask) > FCC_LIMIT) {
9846 status = BAD_VALUE;
9847 } else {
9848 channelMask = mask;
9849 reconfig = true;
9850 }
9851 }
9852 if (param.getInt(String8(AudioParameter::keyFrameCount), value) == NO_ERROR) {
9853 // do not accept frame count changes if tracks are open as the track buffer
9854 // size depends on frame count and correct behavior would not be guaranteed
9855 // if frame count is changed after track creation
9856 if (mActiveTracks.size() > 0) {
9857 status = INVALID_OPERATION;
9858 } else {
9859 reconfig = true;
9860 }
9861 }
9862 if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
9863 LOG_FATAL("Should not set routing device in RecordThread");
9864 }
9865 if (param.getInt(String8(AudioParameter::keyInputSource), value) == NO_ERROR &&
9866 mAudioSource != (audio_source_t)value) {
9867 LOG_FATAL("Should not set audio source in RecordThread");
9868 }
9869
9870 if (status == NO_ERROR) {
9871 status = mInput->stream->setParameters(keyValuePair);
9872 if (status == INVALID_OPERATION) {
9873 inputStandBy();
9874 status = mInput->stream->setParameters(keyValuePair);
9875 }
9876 if (reconfig) {
9877 if (status == BAD_VALUE) {
9878 audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
9879 if (mInput->stream->getAudioProperties(&config) == OK &&
9880 audio_is_linear_pcm(config.format) && audio_is_linear_pcm(reqFormat) &&
9881 config.sample_rate <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate) &&
9882 audio_channel_count_from_in_mask(config.channel_mask) <= FCC_LIMIT) {
9883 status = NO_ERROR;
9884 }
9885 }
9886 if (status == NO_ERROR) {
9887 readInputParameters_l();
9888 sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
9889 }
9890 }
9891 }
9892
9893 return reconfig;
9894 }
9895
getParameters(const String8 & keys)9896 String8 RecordThread::getParameters(const String8& keys)
9897 {
9898 audio_utils::lock_guard _l(mutex());
9899 if (initCheck() == NO_ERROR) {
9900 String8 out_s8;
9901 if (mInput->stream->getParameters(keys, &out_s8) == OK) {
9902 return out_s8;
9903 }
9904 }
9905 return {};
9906 }
9907
ioConfigChanged_l(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId)9908 void RecordThread::ioConfigChanged_l(audio_io_config_event_t event, pid_t pid,
9909 audio_port_handle_t portId) {
9910 sp<AudioIoDescriptor> desc;
9911 switch (event) {
9912 case AUDIO_INPUT_OPENED:
9913 case AUDIO_INPUT_REGISTERED:
9914 case AUDIO_INPUT_CONFIG_CHANGED:
9915 desc = sp<AudioIoDescriptor>::make(mId, mPatch, true /*isInput*/,
9916 mSampleRate, mFormat, mChannelMask, mFrameCount, mFrameCount);
9917 break;
9918 case AUDIO_CLIENT_STARTED:
9919 desc = sp<AudioIoDescriptor>::make(mId, mPatch, portId);
9920 break;
9921 case AUDIO_INPUT_CLOSED:
9922 default:
9923 desc = sp<AudioIoDescriptor>::make(mId);
9924 break;
9925 }
9926 mAfThreadCallback->ioConfigChanged_l(event, desc, pid);
9927 }
9928
readInputParameters_l()9929 void RecordThread::readInputParameters_l()
9930 {
9931 const audio_config_base_t audioConfig = mInput->getAudioProperties();
9932 mSampleRate = audioConfig.sample_rate;
9933 mChannelMask = audioConfig.channel_mask;
9934 if (!audio_is_input_channel(mChannelMask)) {
9935 LOG_ALWAYS_FATAL("Channel mask %#x not valid for input", mChannelMask);
9936 }
9937
9938 mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
9939
9940 // Get actual HAL format.
9941 status_t result = mInput->stream->getAudioProperties(nullptr, nullptr, &mHALFormat);
9942 LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving input stream format: %d", result);
9943 // Get format from the shim, which will be different than the HAL format
9944 // if recording compressed audio from IEC61937 wrapped sources.
9945 mFormat = audioConfig.format;
9946 if (!audio_is_valid_format(mFormat)) {
9947 LOG_ALWAYS_FATAL("Format %#x not valid for input", mFormat);
9948 }
9949 if (audio_is_linear_pcm(mFormat)) {
9950 LOG_ALWAYS_FATAL_IF(mChannelCount > FCC_LIMIT, "HAL channel count %d > %d",
9951 mChannelCount, FCC_LIMIT);
9952 } else {
9953 // Can have more that FCC_LIMIT channels in encoded streams.
9954 ALOGI("HAL format %#x is not linear pcm", mFormat);
9955 }
9956 mFrameSize = mInput->getFrameSize();
9957 LOG_ALWAYS_FATAL_IF(mFrameSize <= 0, "Error frame size was %zu but must be greater than zero",
9958 mFrameSize);
9959 result = mInput->stream->getBufferSize(&mBufferSize);
9960 LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving buffer size from HAL: %d", result);
9961 mFrameCount = mBufferSize / mFrameSize;
9962 ALOGV("%p RecordThread params: mChannelCount=%u, mFormat=%#x, mFrameSize=%zu, "
9963 "mBufferSize=%zu, mFrameCount=%zu",
9964 this, mChannelCount, mFormat, mFrameSize, mBufferSize, mFrameCount);
9965
9966 // mRsmpInFrames must be 0 before calling resizeInputBuffer_l for the first time
9967 mRsmpInFrames = 0;
9968 resizeInputBuffer_l(0 /*maxSharedAudioHistoryMs*/);
9969
9970 // AudioRecord mSampleRate and mChannelCount are constant due to AudioRecord API constraints.
9971 // But if thread's mSampleRate or mChannelCount changes, how will that affect active tracks?
9972
9973 audio_input_flags_t flags = mInput->flags;
9974 mediametrics::LogItem item(mThreadMetrics.getMetricsId());
9975 item.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_READPARAMETERS)
9976 .set(AMEDIAMETRICS_PROP_ENCODING, IAfThreadBase::formatToString(mFormat).c_str())
9977 .set(AMEDIAMETRICS_PROP_FLAGS, toString(flags).c_str())
9978 .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
9979 .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
9980 .set(AMEDIAMETRICS_PROP_CHANNELCOUNT, (int32_t)mChannelCount)
9981 .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
9982 .record();
9983 }
9984
getInputFramesLost() const9985 uint32_t RecordThread::getInputFramesLost() const
9986 {
9987 audio_utils::lock_guard _l(mutex());
9988 uint32_t result;
9989 if (initCheck() == NO_ERROR && mInput->stream->getInputFramesLost(&result) == OK) {
9990 return result;
9991 }
9992 return 0;
9993 }
9994
sessionIds() const9995 KeyedVector<audio_session_t, bool> RecordThread::sessionIds() const
9996 {
9997 KeyedVector<audio_session_t, bool> ids;
9998 audio_utils::lock_guard _l(mutex());
9999 for (size_t j = 0; j < mTracks.size(); ++j) {
10000 sp<IAfRecordTrack> track = mTracks[j];
10001 audio_session_t sessionId = track->sessionId();
10002 if (ids.indexOfKey(sessionId) < 0) {
10003 ids.add(sessionId, true);
10004 }
10005 }
10006 return ids;
10007 }
10008
clearInput()10009 AudioStreamIn* RecordThread::clearInput()
10010 {
10011 audio_utils::lock_guard _l(mutex());
10012 AudioStreamIn *input = mInput;
10013 mInput = NULL;
10014 mInputSource.clear();
10015 return input;
10016 }
10017
10018 // this method must always be called either with ThreadBase mutex() held or inside the thread loop
stream() const10019 sp<StreamHalInterface> RecordThread::stream() const
10020 {
10021 if (mInput == NULL) {
10022 return NULL;
10023 }
10024 return mInput->stream;
10025 }
10026
addEffectChain_l(const sp<IAfEffectChain> & chain)10027 status_t RecordThread::addEffectChain_l(const sp<IAfEffectChain>& chain)
10028 {
10029 ALOGV("addEffectChain_l() %p on thread %p", chain.get(), this);
10030 chain->setThread(this);
10031 chain->setInBuffer(NULL);
10032 chain->setOutBuffer(NULL);
10033
10034 checkSuspendOnAddEffectChain_l(chain);
10035
10036 // make sure enabled pre processing effects state is communicated to the HAL as we
10037 // just moved them to a new input stream.
10038 chain->syncHalEffectsState_l();
10039
10040 mEffectChains.add(chain);
10041
10042 return NO_ERROR;
10043 }
10044
removeEffectChain_l(const sp<IAfEffectChain> & chain)10045 size_t RecordThread::removeEffectChain_l(const sp<IAfEffectChain>& chain)
10046 {
10047 ALOGV("removeEffectChain_l() %p from thread %p", chain.get(), this);
10048
10049 for (size_t i = 0; i < mEffectChains.size(); i++) {
10050 if (chain == mEffectChains[i]) {
10051 mEffectChains.removeAt(i);
10052 break;
10053 }
10054 }
10055 return mEffectChains.size();
10056 }
10057
createAudioPatch_l(const struct audio_patch * patch,audio_patch_handle_t * handle)10058 status_t RecordThread::createAudioPatch_l(const struct audio_patch* patch,
10059 audio_patch_handle_t *handle)
10060 {
10061 status_t status = NO_ERROR;
10062
10063 // store new device and send to effects
10064 mInDeviceTypeAddr.mType = patch->sources[0].ext.device.type;
10065 mInDeviceTypeAddr.setAddress(patch->sources[0].ext.device.address);
10066 audio_port_handle_t deviceId = patch->sources[0].id;
10067 for (size_t i = 0; i < mEffectChains.size(); i++) {
10068 mEffectChains[i]->setInputDevice_l(inDeviceTypeAddr());
10069 }
10070
10071 checkBtNrec_l();
10072
10073 // store new source and send to effects
10074 if (mAudioSource != patch->sinks[0].ext.mix.usecase.source) {
10075 mAudioSource = patch->sinks[0].ext.mix.usecase.source;
10076 for (size_t i = 0; i < mEffectChains.size(); i++) {
10077 mEffectChains[i]->setAudioSource_l(mAudioSource);
10078 }
10079 }
10080
10081 if (mInput->audioHwDev->supportsAudioPatches()) {
10082 sp<DeviceHalInterface> hwDevice = mInput->audioHwDev->hwDevice();
10083 status = hwDevice->createAudioPatch(patch->num_sources,
10084 patch->sources,
10085 patch->num_sinks,
10086 patch->sinks,
10087 handle);
10088 } else {
10089 status = mInput->stream->legacyCreateAudioPatch(patch->sources[0],
10090 patch->sinks[0].ext.mix.usecase.source,
10091 patch->sources[0].ext.device.type);
10092 *handle = AUDIO_PATCH_HANDLE_NONE;
10093 }
10094
10095 if ((mPatch.num_sources == 0) || (mPatch.sources[0].id != deviceId)) {
10096 sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
10097 mPatch = *patch;
10098 }
10099
10100 const std::string pathSourcesAsString = patchSourcesToString(patch);
10101 mThreadMetrics.logEndInterval();
10102 mThreadMetrics.logCreatePatch(pathSourcesAsString, /* outDevices */ {});
10103 mThreadMetrics.logBeginInterval();
10104 // also dispatch to active AudioRecords
10105 for (const auto &track : mActiveTracks) {
10106 track->logEndInterval();
10107 track->logBeginInterval(pathSourcesAsString);
10108 }
10109 // Force meteadata update after a route change
10110 mActiveTracks.setHasChanged();
10111
10112 return status;
10113 }
10114
releaseAudioPatch_l(const audio_patch_handle_t handle)10115 status_t RecordThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
10116 {
10117 status_t status = NO_ERROR;
10118
10119 mPatch = audio_patch{};
10120 mInDeviceTypeAddr.reset();
10121
10122 if (mInput->audioHwDev->supportsAudioPatches()) {
10123 sp<DeviceHalInterface> hwDevice = mInput->audioHwDev->hwDevice();
10124 status = hwDevice->releaseAudioPatch(handle);
10125 } else {
10126 status = mInput->stream->legacyReleaseAudioPatch();
10127 }
10128 // Force meteadata update after a route change
10129 mActiveTracks.setHasChanged();
10130
10131 return status;
10132 }
10133
updateOutDevices(const DeviceDescriptorBaseVector & outDevices)10134 void RecordThread::updateOutDevices(const DeviceDescriptorBaseVector& outDevices)
10135 {
10136 audio_utils::lock_guard _l(mutex());
10137 mOutDevices = outDevices;
10138 mOutDeviceTypeAddrs = deviceTypeAddrsFromDescriptors(mOutDevices);
10139 for (size_t i = 0; i < mEffectChains.size(); i++) {
10140 mEffectChains[i]->setDevices_l(outDeviceTypeAddrs());
10141 }
10142 }
10143
getOldestFront_l()10144 int32_t RecordThread::getOldestFront_l()
10145 {
10146 if (mTracks.size() == 0) {
10147 return mRsmpInRear;
10148 }
10149 int32_t oldestFront = mRsmpInRear;
10150 int32_t maxFilled = 0;
10151 for (size_t i = 0; i < mTracks.size(); i++) {
10152 int32_t front = mTracks[i]->resamplerBufferProvider()->getFront();
10153 int32_t filled;
10154 (void)__builtin_sub_overflow(mRsmpInRear, front, &filled);
10155 if (filled > maxFilled) {
10156 oldestFront = front;
10157 maxFilled = filled;
10158 }
10159 }
10160 if (maxFilled > static_cast<signed>(mRsmpInFrames)) {
10161 (void)__builtin_sub_overflow(mRsmpInRear, mRsmpInFrames, &oldestFront);
10162 }
10163 return oldestFront;
10164 }
10165
updateFronts_l(int32_t offset)10166 void RecordThread::updateFronts_l(int32_t offset)
10167 {
10168 if (offset == 0) {
10169 return;
10170 }
10171 for (size_t i = 0; i < mTracks.size(); i++) {
10172 int32_t front = mTracks[i]->resamplerBufferProvider()->getFront();
10173 front = audio_utils::safe_sub_overflow(front, offset);
10174 mTracks[i]->resamplerBufferProvider()->setFront(front);
10175 }
10176 }
10177
resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs)10178 void RecordThread::resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs)
10179 {
10180 // This is the formula for calculating the temporary buffer size.
10181 // With 7 HAL buffers, we can guarantee ability to down-sample the input by ratio of 6:1 to
10182 // 1 full output buffer, regardless of the alignment of the available input.
10183 // The value is somewhat arbitrary, and could probably be even larger.
10184 // A larger value should allow more old data to be read after a track calls start(),
10185 // without increasing latency.
10186 //
10187 // Note this is independent of the maximum downsampling ratio permitted for capture.
10188 size_t minRsmpInFrames = mFrameCount * 7;
10189
10190 // maxSharedAudioHistoryMs != 0 indicates a request to possibly make some part of the audio
10191 // capture history available to another client using the same session ID:
10192 // dimension the resampler input buffer accordingly.
10193
10194 // Get oldest client read position: getOldestFront_l() must be called before altering
10195 // mRsmpInRear, or mRsmpInFrames
10196 int32_t previousFront = getOldestFront_l();
10197 size_t previousRsmpInFramesP2 = mRsmpInFramesP2;
10198 int32_t previousRear = mRsmpInRear;
10199 mRsmpInRear = 0;
10200
10201 ALOG_ASSERT(maxSharedAudioHistoryMs >= 0
10202 && maxSharedAudioHistoryMs <= kMaxSharedAudioHistoryMs,
10203 "resizeInputBuffer_l() called with invalid max shared history %d",
10204 maxSharedAudioHistoryMs);
10205 if (maxSharedAudioHistoryMs != 0) {
10206 // resizeInputBuffer_l should never be called with a non zero shared history if the
10207 // buffer was not already allocated
10208 ALOG_ASSERT(mRsmpInBuffer != nullptr && mRsmpInFrames != 0,
10209 "resizeInputBuffer_l() called with shared history and unallocated buffer");
10210 size_t rsmpInFrames = (size_t)maxSharedAudioHistoryMs * mSampleRate / 1000;
10211 // never reduce resampler input buffer size
10212 if (rsmpInFrames <= mRsmpInFrames) {
10213 return;
10214 }
10215 mRsmpInFrames = rsmpInFrames;
10216 }
10217 mMaxSharedAudioHistoryMs = maxSharedAudioHistoryMs;
10218 // Note: mRsmpInFrames is 0 when called with maxSharedAudioHistoryMs equals to 0 so it is always
10219 // initialized
10220 if (mRsmpInFrames < minRsmpInFrames) {
10221 mRsmpInFrames = minRsmpInFrames;
10222 }
10223 mRsmpInFramesP2 = roundup(mRsmpInFrames);
10224
10225 // TODO optimize audio capture buffer sizes ...
10226 // Here we calculate the size of the sliding buffer used as a source
10227 // for resampling. mRsmpInFramesP2 is currently roundup(mFrameCount * 7).
10228 // For current HAL frame counts, this is usually 2048 = 40 ms. It would
10229 // be better to have it derived from the pipe depth in the long term.
10230 // The current value is higher than necessary. However it should not add to latency.
10231
10232 // Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer
10233 mRsmpInFramesOA = mRsmpInFramesP2 + mFrameCount - 1;
10234
10235 void *rsmpInBuffer;
10236 (void)posix_memalign(&rsmpInBuffer, 32, mRsmpInFramesOA * mFrameSize);
10237 // if posix_memalign fails, will segv here.
10238 memset(rsmpInBuffer, 0, mRsmpInFramesOA * mFrameSize);
10239
10240 // Copy audio history if any from old buffer before freeing it
10241 if (previousRear != 0) {
10242 ALOG_ASSERT(mRsmpInBuffer != nullptr,
10243 "resizeInputBuffer_l() called with null buffer but frames already read from HAL");
10244
10245 ssize_t unread = audio_utils::safe_sub_overflow(previousRear, previousFront);
10246 previousFront &= previousRsmpInFramesP2 - 1;
10247 size_t part1 = previousRsmpInFramesP2 - previousFront;
10248 if (part1 > (size_t) unread) {
10249 part1 = unread;
10250 }
10251 if (part1 != 0) {
10252 memcpy(rsmpInBuffer, (const uint8_t*)mRsmpInBuffer + previousFront * mFrameSize,
10253 part1 * mFrameSize);
10254 mRsmpInRear = part1;
10255 part1 = unread - part1;
10256 if (part1 != 0) {
10257 memcpy((uint8_t*)rsmpInBuffer + mRsmpInRear * mFrameSize,
10258 (const uint8_t*)mRsmpInBuffer, part1 * mFrameSize);
10259 mRsmpInRear += part1;
10260 }
10261 }
10262 // Update front for all clients according to new rear
10263 updateFronts_l(audio_utils::safe_sub_overflow(previousRear, mRsmpInRear));
10264 } else {
10265 mRsmpInRear = 0;
10266 }
10267 free(mRsmpInBuffer);
10268 mRsmpInBuffer = rsmpInBuffer;
10269 }
10270
addPatchTrack(const sp<IAfPatchRecord> & record)10271 void RecordThread::addPatchTrack(const sp<IAfPatchRecord>& record)
10272 {
10273 audio_utils::lock_guard _l(mutex());
10274 mTracks.add(record);
10275 if (record->getSource()) {
10276 mSource = record->getSource();
10277 }
10278 }
10279
deletePatchTrack(const sp<IAfPatchRecord> & record)10280 void RecordThread::deletePatchTrack(const sp<IAfPatchRecord>& record)
10281 {
10282 audio_utils::lock_guard _l(mutex());
10283 if (mSource == record->getSource()) {
10284 mSource = mInput;
10285 }
10286 destroyTrack_l(record);
10287 }
10288
toAudioPortConfig(struct audio_port_config * config)10289 void RecordThread::toAudioPortConfig(struct audio_port_config* config)
10290 {
10291 ThreadBase::toAudioPortConfig(config);
10292 config->role = AUDIO_PORT_ROLE_SINK;
10293 config->ext.mix.hw_module = mInput->audioHwDev->handle();
10294 config->ext.mix.usecase.source = mAudioSource;
10295 if (mInput && mInput->flags != AUDIO_INPUT_FLAG_NONE) {
10296 config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
10297 config->flags.input = mInput->flags;
10298 }
10299 }
10300
getLocalLogHeader() const10301 std::string RecordThread::getLocalLogHeader() const {
10302 using namespace std::literals;
10303 static constexpr auto indent = " "
10304 " "sv;
10305 return std::string{indent}.append(IAfRecordTrack::getLogHeader());
10306 }
10307
10308 // ----------------------------------------------------------------------------
10309 // Mmap
10310 // ----------------------------------------------------------------------------
10311
10312 // Mmap stream control interface implementation. Each MmapThreadHandle controls one
10313 // MmapPlaybackThread or MmapCaptureThread instance.
10314 class MmapThreadHandle : public MmapStreamInterface {
10315 public:
10316 explicit MmapThreadHandle(const sp<IAfMmapThread>& thread);
10317 ~MmapThreadHandle() override;
10318
10319 // MmapStreamInterface virtuals
10320 status_t createMmapBuffer(int32_t minSizeFrames,
10321 struct audio_mmap_buffer_info* info) final;
10322 status_t getMmapPosition(struct audio_mmap_position* position) final;
10323 status_t getExternalPosition(uint64_t* position, int64_t* timeNanos) final;
10324 status_t start(const AudioClient& client,
10325 const audio_attributes_t* attr, audio_port_handle_t* handle) final;
10326 status_t stop(audio_port_handle_t handle) final;
10327 status_t standby() final;
10328 status_t reportData(const void* buffer, size_t frameCount) final;
10329 private:
10330 const sp<IAfMmapThread> mThread;
10331 };
10332
10333 /* static */
createMmapStreamInterfaceAdapter(const sp<IAfMmapThread> & mmapThread)10334 sp<MmapStreamInterface> IAfMmapThread::createMmapStreamInterfaceAdapter(
10335 const sp<IAfMmapThread>& mmapThread) {
10336 return sp<MmapThreadHandle>::make(mmapThread);
10337 }
10338
MmapThreadHandle(const sp<IAfMmapThread> & thread)10339 MmapThreadHandle::MmapThreadHandle(const sp<IAfMmapThread>& thread)
10340 : mThread(thread)
10341 {
10342 assert(thread != 0); // thread must start non-null and stay non-null
10343 }
10344
10345 // MmapStreamInterface could be directly implemented by MmapThread excepting this
10346 // special handling on adapter dtor.
~MmapThreadHandle()10347 MmapThreadHandle::~MmapThreadHandle()
10348 {
10349 mThread->disconnect();
10350 }
10351
createMmapBuffer(int32_t minSizeFrames,struct audio_mmap_buffer_info * info)10352 status_t MmapThreadHandle::createMmapBuffer(int32_t minSizeFrames,
10353 struct audio_mmap_buffer_info *info)
10354 {
10355 return mThread->createMmapBuffer(minSizeFrames, info);
10356 }
10357
getMmapPosition(struct audio_mmap_position * position)10358 status_t MmapThreadHandle::getMmapPosition(struct audio_mmap_position* position)
10359 {
10360 return mThread->getMmapPosition(position);
10361 }
10362
getExternalPosition(uint64_t * position,int64_t * timeNanos)10363 status_t MmapThreadHandle::getExternalPosition(uint64_t* position,
10364 int64_t *timeNanos) {
10365 return mThread->getExternalPosition(position, timeNanos);
10366 }
10367
start(const AudioClient & client,const audio_attributes_t * attr,audio_port_handle_t * handle)10368 status_t MmapThreadHandle::start(const AudioClient& client,
10369 const audio_attributes_t *attr, audio_port_handle_t *handle)
10370 {
10371 return mThread->start(client, attr, handle);
10372 }
10373
stop(audio_port_handle_t handle)10374 status_t MmapThreadHandle::stop(audio_port_handle_t handle)
10375 {
10376 return mThread->stop(handle);
10377 }
10378
standby()10379 status_t MmapThreadHandle::standby()
10380 {
10381 return mThread->standby();
10382 }
10383
reportData(const void * buffer,size_t frameCount)10384 status_t MmapThreadHandle::reportData(const void* buffer, size_t frameCount)
10385 {
10386 return mThread->reportData(buffer, frameCount);
10387 }
10388
10389
MmapThread(const sp<IAfThreadCallback> & afThreadCallback,audio_io_handle_t id,AudioHwDevice * hwDev,const sp<StreamHalInterface> & stream,bool systemReady,bool isOut)10390 MmapThread::MmapThread(
10391 const sp<IAfThreadCallback>& afThreadCallback, audio_io_handle_t id,
10392 AudioHwDevice *hwDev, const sp<StreamHalInterface>& stream, bool systemReady, bool isOut)
10393 : ThreadBase(afThreadCallback, id, (isOut ? MMAP_PLAYBACK : MMAP_CAPTURE), systemReady, isOut),
10394 mSessionId(AUDIO_SESSION_NONE),
10395 mPortId(AUDIO_PORT_HANDLE_NONE),
10396 mHalStream(stream), mHalDevice(hwDev->hwDevice()), mAudioHwDev(hwDev),
10397 mActiveTracks(&this->mLocalLog),
10398 mHalVolFloat(-1.0f), // Initialize to illegal value so it always gets set properly later.
10399 mNoCallbackWarningCount(0)
10400 {
10401 mStandby = true;
10402 readHalParameters_l();
10403 }
10404
onFirstRef()10405 void MmapThread::onFirstRef()
10406 {
10407 run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO);
10408 }
10409
disconnect()10410 void MmapThread::disconnect()
10411 {
10412 ActiveTracks<IAfMmapTrack> activeTracks;
10413 audio_port_handle_t localPortId;
10414 {
10415 audio_utils::lock_guard _l(mutex());
10416 for (const sp<IAfMmapTrack>& t : mActiveTracks) {
10417 activeTracks.add(t);
10418 }
10419 localPortId = mPortId;
10420 }
10421 for (const sp<IAfMmapTrack>& t : activeTracks) {
10422 stop(t->portId());
10423 }
10424 // This will decrement references and may cause the destruction of this thread.
10425 if (isOutput()) {
10426 AudioSystem::releaseOutput(localPortId);
10427 } else {
10428 AudioSystem::releaseInput(localPortId);
10429 }
10430 }
10431
10432
configure_l(const audio_attributes_t * attr,audio_stream_type_t streamType __unused,audio_session_t sessionId,const sp<MmapStreamCallback> & callback,const DeviceIdVector & deviceIds,audio_port_handle_t portId)10433 void MmapThread::configure_l(const audio_attributes_t* attr,
10434 audio_stream_type_t streamType __unused,
10435 audio_session_t sessionId,
10436 const sp<MmapStreamCallback>& callback,
10437 const DeviceIdVector& deviceIds,
10438 audio_port_handle_t portId)
10439 {
10440 mAttr = *attr;
10441 mSessionId = sessionId;
10442 mCallback = callback;
10443 mDeviceIds = deviceIds;
10444 mPortId = portId;
10445 }
10446
createMmapBuffer(int32_t minSizeFrames,struct audio_mmap_buffer_info * info)10447 status_t MmapThread::createMmapBuffer(int32_t minSizeFrames,
10448 struct audio_mmap_buffer_info *info)
10449 {
10450 audio_utils::lock_guard l(mutex());
10451 if (mHalStream == 0) {
10452 return NO_INIT;
10453 }
10454 mStandby = true;
10455 return mHalStream->createMmapBuffer(minSizeFrames, info);
10456 }
10457
getMmapPosition(struct audio_mmap_position * position) const10458 status_t MmapThread::getMmapPosition(struct audio_mmap_position* position) const
10459 {
10460 audio_utils::lock_guard l(mutex());
10461 if (mHalStream == 0) {
10462 return NO_INIT;
10463 }
10464 return mHalStream->getMmapPosition(position);
10465 }
10466
exitStandby_l()10467 status_t MmapThread::exitStandby_l()
10468 {
10469 // The HAL must receive track metadata before starting the stream
10470 updateMetadata_l();
10471 status_t ret = mHalStream->start();
10472 if (ret != NO_ERROR) {
10473 ALOGE("%s: error mHalStream->start() = %d for first track", __FUNCTION__, ret);
10474 return ret;
10475 }
10476 if (mStandby) {
10477 mThreadMetrics.logBeginInterval();
10478 mThreadSnapshot.onBegin();
10479 mStandby = false;
10480 }
10481 return NO_ERROR;
10482 }
10483
start(const AudioClient & client,const audio_attributes_t * attr,audio_port_handle_t * handle)10484 status_t MmapThread::start(const AudioClient& client,
10485 const audio_attributes_t *attr,
10486 audio_port_handle_t *handle)
10487 {
10488 audio_utils::lock_guard l(mutex());
10489 ALOGV("%s clientUid %d mStandby %d mPortId %d *handle %d", __FUNCTION__,
10490 client.attributionSource.uid, mStandby, mPortId, *handle);
10491 if (mHalStream == 0) {
10492 return NO_INIT;
10493 }
10494
10495 status_t ret;
10496
10497 // For the first track, reuse portId and session allocated when the stream was opened.
10498 if (*handle == mPortId) {
10499 acquireWakeLock_l();
10500 return NO_ERROR;
10501 }
10502
10503 audio_port_handle_t portId = AUDIO_PORT_HANDLE_NONE;
10504
10505 audio_io_handle_t io = mId;
10506 AttributionSourceState adjAttributionSource;
10507 if (!com::android::media::audio::audioserver_permissions()) {
10508 adjAttributionSource = afutils::checkAttributionSourcePackage(
10509 client.attributionSource);
10510 } else {
10511 // TODO(b/342475009) validate in oboeservice, and plumb downwards
10512 auto validatedRes = ValidatedAttributionSourceState::createFromTrustedUidNoPackage(
10513 client.attributionSource,
10514 mAfThreadCallback->getPermissionProvider()
10515 );
10516 if (!validatedRes.has_value()) {
10517 ALOGE("MMAP client package validation fail: %s",
10518 validatedRes.error().toString8().c_str());
10519 return aidl_utils::statusTFromBinderStatus(validatedRes.error());
10520 }
10521 adjAttributionSource = std::move(validatedRes.value()).unwrapInto();
10522 }
10523
10524 const auto localSessionId = mSessionId;
10525 auto localAttr = mAttr;
10526 float volume = 0.0f;
10527 bool muted = false;
10528 if (isOutput()) {
10529 audio_config_t config = AUDIO_CONFIG_INITIALIZER;
10530 config.sample_rate = mSampleRate;
10531 config.channel_mask = mChannelMask;
10532 config.format = mFormat;
10533 audio_stream_type_t stream = streamType_l();
10534 audio_output_flags_t flags =
10535 (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_MMAP_NOIRQ | AUDIO_OUTPUT_FLAG_DIRECT);
10536 DeviceIdVector deviceIds = mDeviceIds;
10537 std::vector<audio_io_handle_t> secondaryOutputs;
10538 bool isSpatialized;
10539 bool isBitPerfect;
10540 mutex().unlock();
10541 ret = AudioSystem::getOutputForAttr(&localAttr, &io,
10542 localSessionId,
10543 &stream,
10544 adjAttributionSource,
10545 &config,
10546 flags,
10547 &deviceIds,
10548 &portId,
10549 &secondaryOutputs,
10550 &isSpatialized,
10551 &isBitPerfect,
10552 &volume,
10553 &muted);
10554 mutex().lock();
10555 mAttr = localAttr;
10556 ALOGD_IF(!secondaryOutputs.empty(),
10557 "MmapThread::start does not support secondary outputs, ignoring them");
10558 } else {
10559 audio_config_base_t config;
10560 config.sample_rate = mSampleRate;
10561 config.channel_mask = mChannelMask;
10562 config.format = mFormat;
10563 audio_port_handle_t deviceId = getFirstDeviceId(mDeviceIds);
10564 mutex().unlock();
10565 ret = AudioSystem::getInputForAttr(&localAttr, &io,
10566 RECORD_RIID_INVALID,
10567 localSessionId,
10568 adjAttributionSource,
10569 &config,
10570 AUDIO_INPUT_FLAG_MMAP_NOIRQ,
10571 &deviceId,
10572 &portId);
10573 mutex().lock();
10574 // localAttr is const for getInputForAttr.
10575 }
10576 // APM should not chose a different input or output stream for the same set of attributes
10577 // and audo configuration
10578 if (ret != NO_ERROR || io != mId) {
10579 ALOGE("%s: error getting output or input from APM (error %d, io %d expected io %d)",
10580 __FUNCTION__, ret, io, mId);
10581 return BAD_VALUE;
10582 }
10583
10584 if (isOutput()) {
10585 mutex().unlock();
10586 ret = AudioSystem::startOutput(portId);
10587 mutex().lock();
10588 } else {
10589 {
10590 // Add the track record before starting input so that the silent status for the
10591 // client can be cached.
10592 setClientSilencedState_l(portId, false /*silenced*/);
10593 }
10594 mutex().unlock();
10595 ret = AudioSystem::startInput(portId);
10596 mutex().lock();
10597 }
10598
10599 // abort if start is rejected by audio policy manager
10600 if (ret != NO_ERROR) {
10601 ALOGE("%s: error start rejected by AudioPolicyManager = %d", __FUNCTION__, ret);
10602 if (!mActiveTracks.isEmpty()) {
10603 mutex().unlock();
10604 if (isOutput()) {
10605 AudioSystem::releaseOutput(portId);
10606 } else {
10607 AudioSystem::releaseInput(portId);
10608 }
10609 mutex().lock();
10610 } else {
10611 mHalStream->stop();
10612 }
10613 eraseClientSilencedState_l(portId);
10614 return PERMISSION_DENIED;
10615 }
10616
10617 // Given that MmapThread::mAttr is mutable, should a MmapTrack have attributes ?
10618 sp<IAfMmapTrack> track = IAfMmapTrack::create(
10619 this, attr == nullptr ? mAttr : *attr, mSampleRate, mFormat,
10620 mChannelMask, mSessionId, isOutput(),
10621 client.attributionSource,
10622 IPCThreadState::self()->getCallingPid(), portId,
10623 volume, muted);
10624 if (!isOutput()) {
10625 track->setSilenced_l(isClientSilenced_l(portId));
10626 }
10627
10628 if (isOutput()) {
10629 // force volume update when a new track is added
10630 mHalVolFloat = -1.0f;
10631 } else if (!track->isSilenced_l()) {
10632 for (const sp<IAfMmapTrack>& t : mActiveTracks) {
10633 if (t->isSilenced_l()
10634 && t->uid() != static_cast<uid_t>(client.attributionSource.uid)) {
10635 t->invalidate();
10636 }
10637 }
10638 }
10639
10640 mActiveTracks.add(track);
10641 sp<IAfEffectChain> chain = getEffectChain_l(mSessionId);
10642 if (chain != 0) {
10643 chain->setStrategy(getStrategyForStream(streamType_l()));
10644 chain->incTrackCnt();
10645 chain->incActiveTrackCnt();
10646 }
10647
10648 track->logBeginInterval(patchSinksToString(&mPatch)); // log to MediaMetrics
10649 *handle = portId;
10650
10651 if (mActiveTracks.size() == 1) {
10652 ret = exitStandby_l();
10653 }
10654
10655 broadcast_l();
10656
10657 ALOGV("%s DONE status %d handle %d stream %p", __FUNCTION__, ret, *handle, mHalStream.get());
10658
10659 return ret;
10660 }
10661
stop(audio_port_handle_t handle)10662 status_t MmapThread::stop(audio_port_handle_t handle)
10663 {
10664 ALOGV("%s handle %d", __FUNCTION__, handle);
10665 audio_utils::lock_guard l(mutex());
10666
10667 if (mHalStream == 0) {
10668 return NO_INIT;
10669 }
10670
10671 if (handle == mPortId) {
10672 releaseWakeLock_l();
10673 return NO_ERROR;
10674 }
10675
10676 sp<IAfMmapTrack> track;
10677 for (const sp<IAfMmapTrack>& t : mActiveTracks) {
10678 if (handle == t->portId()) {
10679 track = t;
10680 break;
10681 }
10682 }
10683 if (track == 0) {
10684 return BAD_VALUE;
10685 }
10686
10687 mActiveTracks.remove(track);
10688 eraseClientSilencedState_l(track->portId());
10689
10690 mutex().unlock();
10691 if (isOutput()) {
10692 AudioSystem::stopOutput(track->portId());
10693 AudioSystem::releaseOutput(track->portId());
10694 } else {
10695 AudioSystem::stopInput(track->portId());
10696 AudioSystem::releaseInput(track->portId());
10697 }
10698 mutex().lock();
10699
10700 sp<IAfEffectChain> chain = getEffectChain_l(track->sessionId());
10701 if (chain != 0) {
10702 chain->decActiveTrackCnt();
10703 chain->decTrackCnt();
10704 }
10705
10706 if (mActiveTracks.isEmpty()) {
10707 mHalStream->stop();
10708 }
10709
10710 broadcast_l();
10711
10712 return NO_ERROR;
10713 }
10714
standby()10715 status_t MmapThread::standby()
10716 NO_THREAD_SAFETY_ANALYSIS // clang bug
10717 {
10718 ALOGV("%s", __FUNCTION__);
10719 audio_utils::lock_guard l_{mutex()};
10720
10721 if (mHalStream == 0) {
10722 return NO_INIT;
10723 }
10724 if (!mActiveTracks.isEmpty()) {
10725 return INVALID_OPERATION;
10726 }
10727 mHalStream->standby();
10728 if (!mStandby) {
10729 mThreadMetrics.logEndInterval();
10730 mThreadSnapshot.onEnd();
10731 mStandby = true;
10732 }
10733 releaseWakeLock_l();
10734 return NO_ERROR;
10735 }
10736
reportData(const void *,size_t)10737 status_t MmapThread::reportData(const void* /*buffer*/, size_t /*frameCount*/) {
10738 // This is a stub implementation. The MmapPlaybackThread overrides this function.
10739 return INVALID_OPERATION;
10740 }
10741
readHalParameters_l()10742 void MmapThread::readHalParameters_l()
10743 {
10744 status_t result = mHalStream->getAudioProperties(&mSampleRate, &mChannelMask, &mHALFormat);
10745 LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving audio properties from HAL: %d", result);
10746 mFormat = mHALFormat;
10747 LOG_ALWAYS_FATAL_IF(!audio_is_linear_pcm(mFormat), "HAL format %#x is not linear pcm", mFormat);
10748 result = mHalStream->getFrameSize(&mFrameSize);
10749 LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving frame size from HAL: %d", result);
10750 LOG_ALWAYS_FATAL_IF(mFrameSize <= 0, "Error frame size was %zu but must be greater than zero",
10751 mFrameSize);
10752 result = mHalStream->getBufferSize(&mBufferSize);
10753 LOG_ALWAYS_FATAL_IF(result != OK, "Error retrieving buffer size from HAL: %d", result);
10754 mFrameCount = mBufferSize / mFrameSize;
10755
10756 // TODO: make a readHalParameters call?
10757 mediametrics::LogItem item(mThreadMetrics.getMetricsId());
10758 item.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_READPARAMETERS)
10759 .set(AMEDIAMETRICS_PROP_ENCODING, IAfThreadBase::formatToString(mFormat).c_str())
10760 .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
10761 .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
10762 .set(AMEDIAMETRICS_PROP_CHANNELCOUNT, (int32_t)mChannelCount)
10763 .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
10764 /*
10765 .set(AMEDIAMETRICS_PROP_FLAGS, toString(flags).c_str())
10766 .set(AMEDIAMETRICS_PROP_PREFIX_HAPTIC AMEDIAMETRICS_PROP_CHANNELMASK,
10767 (int32_t)mHapticChannelMask)
10768 .set(AMEDIAMETRICS_PROP_PREFIX_HAPTIC AMEDIAMETRICS_PROP_CHANNELCOUNT,
10769 (int32_t)mHapticChannelCount)
10770 */
10771 .set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_ENCODING,
10772 IAfThreadBase::formatToString(mHALFormat).c_str())
10773 .set(AMEDIAMETRICS_PROP_PREFIX_HAL AMEDIAMETRICS_PROP_FRAMECOUNT,
10774 (int32_t)mFrameCount) // sic - added HAL
10775 .record();
10776 }
10777
threadLoop()10778 bool MmapThread::threadLoop()
10779 {
10780 {
10781 audio_utils::unique_lock _l(mutex());
10782 checkSilentMode_l();
10783 }
10784
10785 const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid()));
10786
10787 while (!exitPending())
10788 {
10789 Vector<sp<IAfEffectChain>> effectChains;
10790
10791 { // under Thread lock
10792 audio_utils::unique_lock _l(mutex());
10793
10794 if (mSignalPending) {
10795 // A signal was raised while we were unlocked
10796 mSignalPending = false;
10797 } else {
10798 if (mConfigEvents.isEmpty()) {
10799 // we're about to wait, flush the binder command buffer
10800 IPCThreadState::self()->flushCommands();
10801
10802 if (exitPending()) {
10803 break;
10804 }
10805
10806 // wait until we have something to do...
10807 ALOGV("%s going to sleep", myName.c_str());
10808 mWaitWorkCV.wait(_l);
10809 ALOGV("%s waking up", myName.c_str());
10810
10811 checkSilentMode_l();
10812
10813 continue;
10814 }
10815 }
10816
10817 processConfigEvents_l();
10818
10819 processVolume_l();
10820
10821 checkInvalidTracks_l();
10822
10823 mActiveTracks.updatePowerState_l(this);
10824
10825 updateMetadata_l();
10826
10827 lockEffectChains_l(effectChains);
10828 } // release Thread lock
10829
10830 for (size_t i = 0; i < effectChains.size(); i ++) {
10831 effectChains[i]->process_l(); // Thread is not locked, but effect chain is locked
10832 }
10833
10834 // enable changes in effect chain, including moving to another thread.
10835 unlockEffectChains(effectChains);
10836 // Effect chains will be actually deleted here if they were removed from
10837 // mEffectChains list during mixing or effects processing
10838 mThreadloopExecutor.process();
10839 }
10840 mThreadloopExecutor.process(); // process any remaining deferred actions.
10841 // deferred actions after this point are ignored.
10842
10843 threadLoop_exit();
10844
10845 if (!mStandby) {
10846 threadLoop_standby();
10847 mStandby = true;
10848 }
10849
10850 ALOGV("Thread %p type %d exiting", this, mType);
10851 return false;
10852 }
10853
10854 // checkForNewParameter_l() must be called with ThreadBase::mutex() held
checkForNewParameter_l(const String8 & keyValuePair,status_t & status)10855 bool MmapThread::checkForNewParameter_l(const String8& keyValuePair,
10856 status_t& status)
10857 {
10858 AudioParameter param = AudioParameter(keyValuePair);
10859 int value;
10860 bool sendToHal = true;
10861 if (param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) {
10862 LOG_FATAL("Should not happen set routing device in MmapThread");
10863 }
10864 if (sendToHal) {
10865 status = mHalStream->setParameters(keyValuePair);
10866 } else {
10867 status = NO_ERROR;
10868 }
10869
10870 return false;
10871 }
10872
getParameters(const String8 & keys)10873 String8 MmapThread::getParameters(const String8& keys)
10874 {
10875 audio_utils::lock_guard _l(mutex());
10876 String8 out_s8;
10877 if (initCheck() == NO_ERROR && mHalStream->getParameters(keys, &out_s8) == OK) {
10878 return out_s8;
10879 }
10880 return {};
10881 }
10882
ioConfigChanged_l(audio_io_config_event_t event,pid_t pid,audio_port_handle_t portId __unused)10883 void MmapThread::ioConfigChanged_l(audio_io_config_event_t event, pid_t pid,
10884 audio_port_handle_t portId __unused) {
10885 sp<AudioIoDescriptor> desc;
10886 bool isInput = false;
10887 switch (event) {
10888 case AUDIO_INPUT_OPENED:
10889 case AUDIO_INPUT_REGISTERED:
10890 case AUDIO_INPUT_CONFIG_CHANGED:
10891 isInput = true;
10892 FALLTHROUGH_INTENDED;
10893 case AUDIO_OUTPUT_OPENED:
10894 case AUDIO_OUTPUT_REGISTERED:
10895 case AUDIO_OUTPUT_CONFIG_CHANGED:
10896 desc = sp<AudioIoDescriptor>::make(mId, mPatch, isInput,
10897 mSampleRate, mFormat, mChannelMask, mFrameCount, mFrameCount);
10898 break;
10899 case AUDIO_INPUT_CLOSED:
10900 case AUDIO_OUTPUT_CLOSED:
10901 default:
10902 desc = sp<AudioIoDescriptor>::make(mId);
10903 break;
10904 }
10905 mAfThreadCallback->ioConfigChanged_l(event, desc, pid);
10906 }
10907
createAudioPatch_l(const struct audio_patch * patch,audio_patch_handle_t * handle)10908 status_t MmapThread::createAudioPatch_l(const struct audio_patch* patch,
10909 audio_patch_handle_t *handle)
10910 NO_THREAD_SAFETY_ANALYSIS // elease and re-acquire mutex()
10911 {
10912 status_t status = NO_ERROR;
10913
10914 // store new device and send to effects
10915 audio_devices_t type = AUDIO_DEVICE_NONE;
10916 DeviceIdVector deviceIds;
10917 AudioDeviceTypeAddrVector sinkDeviceTypeAddrs;
10918 AudioDeviceTypeAddr sourceDeviceTypeAddr;
10919 uint32_t numDevices = 0;
10920 if (isOutput()) {
10921 for (unsigned int i = 0; i < patch->num_sinks; i++) {
10922 LOG_ALWAYS_FATAL_IF(popcount(patch->sinks[i].ext.device.type) > 1
10923 && !mAudioHwDev->supportsAudioPatches(),
10924 "Enumerated device type(%#x) must not be used "
10925 "as it does not support audio patches",
10926 patch->sinks[i].ext.device.type);
10927 type = static_cast<audio_devices_t>(type | patch->sinks[i].ext.device.type);
10928 sinkDeviceTypeAddrs.emplace_back(patch->sinks[i].ext.device.type,
10929 patch->sinks[i].ext.device.address);
10930 deviceIds.push_back(patch->sinks[i].id);
10931 }
10932 numDevices = mPatch.num_sinks;
10933 } else {
10934 type = patch->sources[0].ext.device.type;
10935 deviceIds.push_back(patch->sources[0].id);
10936 numDevices = mPatch.num_sources;
10937 sourceDeviceTypeAddr.mType = patch->sources[0].ext.device.type;
10938 sourceDeviceTypeAddr.setAddress(patch->sources[0].ext.device.address);
10939 }
10940
10941 for (size_t i = 0; i < mEffectChains.size(); i++) {
10942 if (isOutput()) {
10943 mEffectChains[i]->setDevices_l(sinkDeviceTypeAddrs);
10944 } else {
10945 mEffectChains[i]->setInputDevice_l(sourceDeviceTypeAddr);
10946 }
10947 }
10948
10949 if (!isOutput()) {
10950 // store new source and send to effects
10951 if (mAudioSource != patch->sinks[0].ext.mix.usecase.source) {
10952 mAudioSource = patch->sinks[0].ext.mix.usecase.source;
10953 for (size_t i = 0; i < mEffectChains.size(); i++) {
10954 mEffectChains[i]->setAudioSource_l(mAudioSource);
10955 }
10956 }
10957 }
10958
10959 // For mmap streams, once the routing has changed, they will be disconnected. It should be
10960 // okay to notify the client earlier before the new patch creation.
10961 if (!areDeviceIdsEqual(deviceIds, mDeviceIds)) {
10962 if (const sp<MmapStreamCallback> callback = mCallback.promote()) {
10963 // The aaudioservice handle the routing changed event asynchronously. In that case,
10964 // it is safe to hold the lock here.
10965 callback->onRoutingChanged(deviceIds);
10966 }
10967 }
10968
10969 if (mAudioHwDev->supportsAudioPatches()) {
10970 status = mHalDevice->createAudioPatch(patch->num_sources, patch->sources, patch->num_sinks,
10971 patch->sinks, handle);
10972 } else {
10973 audio_port_config port;
10974 std::optional<audio_source_t> source;
10975 if (isOutput()) {
10976 port = patch->sinks[0];
10977 } else {
10978 port = patch->sources[0];
10979 source = patch->sinks[0].ext.mix.usecase.source;
10980 }
10981 status = mHalStream->legacyCreateAudioPatch(port, source, type);
10982 *handle = AUDIO_PATCH_HANDLE_NONE;
10983 }
10984
10985 if (numDevices == 0 || (!areDeviceIdsEqual(deviceIds, mDeviceIds))) {
10986 if (isOutput()) {
10987 sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
10988 mOutDeviceTypeAddrs = sinkDeviceTypeAddrs;
10989 checkSilentMode_l();
10990 } else {
10991 sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
10992 mInDeviceTypeAddr = sourceDeviceTypeAddr;
10993 }
10994 mPatch = *patch;
10995 mDeviceIds = deviceIds;
10996 }
10997 // Force meteadata update after a route change
10998 mActiveTracks.setHasChanged();
10999
11000 return status;
11001 }
11002
releaseAudioPatch_l(const audio_patch_handle_t handle)11003 status_t MmapThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
11004 {
11005 status_t status = NO_ERROR;
11006
11007 mPatch = audio_patch{};
11008 mOutDeviceTypeAddrs.clear();
11009 mInDeviceTypeAddr.reset();
11010
11011 bool supportsAudioPatches = mHalDevice->supportsAudioPatches(&supportsAudioPatches) == OK ?
11012 supportsAudioPatches : false;
11013
11014 if (supportsAudioPatches) {
11015 status = mHalDevice->releaseAudioPatch(handle);
11016 } else {
11017 status = mHalStream->legacyReleaseAudioPatch();
11018 }
11019 // Force meteadata update after a route change
11020 mActiveTracks.setHasChanged();
11021
11022 return status;
11023 }
11024
toAudioPortConfig(struct audio_port_config * config)11025 void MmapThread::toAudioPortConfig(struct audio_port_config* config)
11026 NO_THREAD_SAFETY_ANALYSIS // mAudioHwDev handle access
11027 {
11028 ThreadBase::toAudioPortConfig(config);
11029 if (isOutput()) {
11030 config->role = AUDIO_PORT_ROLE_SOURCE;
11031 config->ext.mix.hw_module = mAudioHwDev->handle();
11032 config->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
11033 } else {
11034 config->role = AUDIO_PORT_ROLE_SINK;
11035 config->ext.mix.hw_module = mAudioHwDev->handle();
11036 config->ext.mix.usecase.source = mAudioSource;
11037 }
11038 }
11039
addEffectChain_l(const sp<IAfEffectChain> & chain)11040 status_t MmapThread::addEffectChain_l(const sp<IAfEffectChain>& chain)
11041 {
11042 audio_session_t session = chain->sessionId();
11043
11044 ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
11045 // Attach all tracks with same session ID to this chain.
11046 // indicate all active tracks in the chain
11047 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
11048 if (session == track->sessionId()) {
11049 chain->incTrackCnt();
11050 chain->incActiveTrackCnt();
11051 }
11052 }
11053
11054 chain->setThread(this);
11055 chain->setInBuffer(nullptr);
11056 chain->setOutBuffer(nullptr);
11057 chain->syncHalEffectsState_l();
11058
11059 mEffectChains.add(chain);
11060 checkSuspendOnAddEffectChain_l(chain);
11061 return NO_ERROR;
11062 }
11063
removeEffectChain_l(const sp<IAfEffectChain> & chain)11064 size_t MmapThread::removeEffectChain_l(const sp<IAfEffectChain>& chain)
11065 {
11066 audio_session_t session = chain->sessionId();
11067
11068 ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session);
11069
11070 for (size_t i = 0; i < mEffectChains.size(); i++) {
11071 if (chain == mEffectChains[i]) {
11072 mEffectChains.removeAt(i);
11073 // detach all active tracks from the chain
11074 // detach all tracks with same session ID from this chain
11075 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
11076 if (session == track->sessionId()) {
11077 chain->decActiveTrackCnt();
11078 chain->decTrackCnt();
11079 }
11080 }
11081 break;
11082 }
11083 }
11084 return mEffectChains.size();
11085 }
11086
threadLoop_standby()11087 void MmapThread::threadLoop_standby()
11088 {
11089 mHalStream->standby();
11090 }
11091
threadLoop_exit()11092 void MmapThread::threadLoop_exit()
11093 {
11094 // Do not call callback->onTearDown() because it is redundant for thread exit
11095 // and because it can cause a recursive mutex lock on stop().
11096 }
11097
setSyncEvent(const sp<SyncEvent> &)11098 status_t MmapThread::setSyncEvent(const sp<SyncEvent>& /* event */)
11099 {
11100 return BAD_VALUE;
11101 }
11102
isValidSyncEvent(const sp<SyncEvent> &) const11103 bool MmapThread::isValidSyncEvent(
11104 const sp<SyncEvent>& /* event */) const
11105 {
11106 return false;
11107 }
11108
checkEffectCompatibility_l(const effect_descriptor_t * desc,audio_session_t sessionId)11109 status_t MmapThread::checkEffectCompatibility_l(
11110 const effect_descriptor_t *desc, audio_session_t sessionId)
11111 {
11112 // No global effect sessions on mmap threads
11113 if (audio_is_global_session(sessionId)) {
11114 ALOGW("checkEffectCompatibility_l(): global effect %s on MMAP thread %s",
11115 desc->name, mThreadName);
11116 return BAD_VALUE;
11117 }
11118
11119 if (!isOutput() && ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC)) {
11120 ALOGW("checkEffectCompatibility_l(): non pre processing effect %s on capture mmap thread",
11121 desc->name);
11122 return BAD_VALUE;
11123 }
11124 if (isOutput() && ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
11125 ALOGW("checkEffectCompatibility_l(): pre processing effect %s created on playback mmap "
11126 "thread", desc->name);
11127 return BAD_VALUE;
11128 }
11129
11130 // Only allow effects without processing load or latency
11131 if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) != EFFECT_FLAG_NO_PROCESS) {
11132 return BAD_VALUE;
11133 }
11134
11135 if (IAfEffectModule::isHapticGenerator(&desc->type)) {
11136 ALOGE("%s(): HapticGenerator is not supported for MmapThread", __func__);
11137 return BAD_VALUE;
11138 }
11139
11140 return NO_ERROR;
11141 }
11142
checkInvalidTracks_l()11143 void MmapThread::checkInvalidTracks_l()
11144 {
11145 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
11146 if (track->isInvalid()) {
11147 if (const sp<MmapStreamCallback> callback = mCallback.promote()) {
11148 // The aaudioservice handle the routing changed event asynchronously. In that case,
11149 // it is safe to hold the lock here.
11150 callback->onRoutingChanged({});
11151 } else if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
11152 ALOGW("Could not notify MMAP stream tear down: no onRoutingChanged callback!");
11153 mNoCallbackWarningCount++;
11154 }
11155 break;
11156 }
11157 }
11158 }
11159
dumpInternals_l(int fd,const Vector<String16> &)11160 void MmapThread::dumpInternals_l(int fd, const Vector<String16>& /* args */)
11161 {
11162 dprintf(fd, " Attributes: content type %d usage %d source %d\n",
11163 mAttr.content_type, mAttr.usage, mAttr.source);
11164 dprintf(fd, " Session: %d port Id: %d\n", mSessionId, mPortId);
11165 if (mActiveTracks.isEmpty()) {
11166 dprintf(fd, " No active clients\n");
11167 }
11168 }
11169
dumpTracks_l(int fd,const Vector<String16> &)11170 void MmapThread::dumpTracks_l(int fd, const Vector<String16>& /* args */)
11171 {
11172 String8 result;
11173 size_t numtracks = mActiveTracks.size();
11174 dprintf(fd, " %zu Tracks\n", numtracks);
11175 const char *prefix = " ";
11176 if (numtracks) {
11177 result.append(prefix);
11178 mActiveTracks[0]->appendDumpHeader(result);
11179 for (size_t i = 0; i < numtracks ; ++i) {
11180 sp<IAfMmapTrack> track = mActiveTracks[i];
11181 result.append(prefix);
11182 track->appendDump(result, true /* active */);
11183 }
11184 } else {
11185 dprintf(fd, "\n");
11186 }
11187 write(fd, result.c_str(), result.size());
11188 }
11189
getLocalLogHeader() const11190 std::string MmapThread::getLocalLogHeader() const {
11191 using namespace std::literals;
11192 static constexpr auto indent = " "
11193 " "sv;
11194 return std::string{indent}.append(IAfMmapTrack::getLogHeader());
11195 }
11196
11197 /* static */
create(const sp<IAfThreadCallback> & afThreadCallback,audio_io_handle_t id,AudioHwDevice * hwDev,AudioStreamOut * output,bool systemReady)11198 sp<IAfMmapPlaybackThread> IAfMmapPlaybackThread::create(
11199 const sp<IAfThreadCallback>& afThreadCallback, audio_io_handle_t id,
11200 AudioHwDevice* hwDev, AudioStreamOut* output, bool systemReady) {
11201 return sp<MmapPlaybackThread>::make(afThreadCallback, id, hwDev, output, systemReady);
11202 }
11203
MmapPlaybackThread(const sp<IAfThreadCallback> & afThreadCallback,audio_io_handle_t id,AudioHwDevice * hwDev,AudioStreamOut * output,bool systemReady)11204 MmapPlaybackThread::MmapPlaybackThread(
11205 const sp<IAfThreadCallback>& afThreadCallback, audio_io_handle_t id,
11206 AudioHwDevice *hwDev, AudioStreamOut *output, bool systemReady)
11207 : MmapThread(afThreadCallback, id, hwDev, output->stream, systemReady, true /* isOut */),
11208 mStreamType(AUDIO_STREAM_MUSIC),
11209 mOutput(output)
11210 {
11211 snprintf(mThreadName, kThreadNameLength, "AudioMmapOut_%X", id);
11212 mFlagsAsString = toString(output->flags);
11213 mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
11214 mMasterVolume = afThreadCallback->masterVolume_l();
11215 mMasterMute = afThreadCallback->masterMute_l();
11216 if (!audioserver_flags::portid_volume_management()) {
11217 for (int i = AUDIO_STREAM_MIN; i < AUDIO_STREAM_FOR_POLICY_CNT; ++i) {
11218 const audio_stream_type_t stream{static_cast<audio_stream_type_t>(i)};
11219 mStreamTypes[stream].volume = 0.0f;
11220 mStreamTypes[stream].mute = mAfThreadCallback->streamMute_l(stream);
11221 }
11222 // Audio patch and call assistant volume are always max
11223 mStreamTypes[AUDIO_STREAM_PATCH].volume = 1.0f;
11224 mStreamTypes[AUDIO_STREAM_PATCH].mute = false;
11225 mStreamTypes[AUDIO_STREAM_CALL_ASSISTANT].volume = 1.0f;
11226 mStreamTypes[AUDIO_STREAM_CALL_ASSISTANT].mute = false;
11227 }
11228 if (mAudioHwDev) {
11229 if (mAudioHwDev->canSetMasterVolume()) {
11230 mMasterVolume = 1.0;
11231 }
11232
11233 if (mAudioHwDev->canSetMasterMute()) {
11234 mMasterMute = false;
11235 }
11236 }
11237 }
11238
configure(const audio_attributes_t * attr,audio_stream_type_t streamType,audio_session_t sessionId,const sp<MmapStreamCallback> & callback,const DeviceIdVector & deviceIds,audio_port_handle_t portId)11239 void MmapPlaybackThread::configure(const audio_attributes_t* attr,
11240 audio_stream_type_t streamType,
11241 audio_session_t sessionId,
11242 const sp<MmapStreamCallback>& callback,
11243 const DeviceIdVector& deviceIds,
11244 audio_port_handle_t portId)
11245 {
11246 audio_utils::lock_guard l(mutex());
11247 MmapThread::configure_l(attr, streamType, sessionId, callback, deviceIds, portId);
11248 mStreamType = streamType;
11249 }
11250
clearOutput()11251 AudioStreamOut* MmapPlaybackThread::clearOutput()
11252 {
11253 audio_utils::lock_guard _l(mutex());
11254 AudioStreamOut *output = mOutput;
11255 mOutput = NULL;
11256 return output;
11257 }
11258
setMasterVolume(float value)11259 void MmapPlaybackThread::setMasterVolume(float value)
11260 {
11261 audio_utils::lock_guard _l(mutex());
11262 // Don't apply master volume in SW if our HAL can do it for us.
11263 if (mAudioHwDev &&
11264 mAudioHwDev->canSetMasterVolume()) {
11265 mMasterVolume = 1.0;
11266 } else {
11267 mMasterVolume = value;
11268 }
11269 }
11270
setMasterMute(bool muted)11271 void MmapPlaybackThread::setMasterMute(bool muted)
11272 {
11273 audio_utils::lock_guard _l(mutex());
11274 // Don't apply master mute in SW if our HAL can do it for us.
11275 if (mAudioHwDev && mAudioHwDev->canSetMasterMute()) {
11276 mMasterMute = false;
11277 } else {
11278 mMasterMute = muted;
11279 }
11280 }
11281
setStreamVolume(audio_stream_type_t stream,float value,bool muted)11282 void MmapPlaybackThread::setStreamVolume(audio_stream_type_t stream, float value, bool muted)
11283 {
11284 ALOGV("%s: stream %d value %f muted %d", __func__, stream, value, muted);
11285 audio_utils::lock_guard _l(mutex());
11286 mStreamTypes[stream].volume = value;
11287 if (com_android_media_audio_ring_my_car()) {
11288 mStreamTypes[stream].mute = muted;
11289 }
11290 if (stream == mStreamType) {
11291 broadcast_l();
11292 }
11293 }
11294
streamVolume(audio_stream_type_t stream) const11295 float MmapPlaybackThread::streamVolume(audio_stream_type_t stream) const
11296 {
11297 audio_utils::lock_guard _l(mutex());
11298 return mStreamTypes[stream].volume;
11299 }
11300
setStreamMute(audio_stream_type_t stream,bool muted)11301 void MmapPlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
11302 {
11303 audio_utils::lock_guard _l(mutex());
11304 mStreamTypes[stream].mute = muted;
11305 if (stream == mStreamType) {
11306 broadcast_l();
11307 }
11308 }
11309
setPortsVolume(const std::vector<audio_port_handle_t> & portIds,float volume,bool muted)11310 status_t MmapPlaybackThread::setPortsVolume(
11311 const std::vector<audio_port_handle_t>& portIds, float volume, bool muted) {
11312 audio_utils::lock_guard _l(mutex());
11313 for (const auto& portId : portIds) {
11314 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
11315 if (portId == track->portId()) {
11316 track->setPortVolume(volume);
11317 track->setPortMute(muted);
11318 break;
11319 }
11320 }
11321 }
11322 broadcast_l();
11323 return NO_ERROR;
11324 }
11325
invalidateTracks(audio_stream_type_t streamType)11326 void MmapPlaybackThread::invalidateTracks(audio_stream_type_t streamType)
11327 {
11328 audio_utils::lock_guard _l(mutex());
11329 if (streamType == mStreamType) {
11330 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
11331 track->invalidate();
11332 }
11333 broadcast_l();
11334 }
11335 }
11336
invalidateTracks(std::set<audio_port_handle_t> & portIds)11337 void MmapPlaybackThread::invalidateTracks(std::set<audio_port_handle_t>& portIds)
11338 {
11339 audio_utils::lock_guard _l(mutex());
11340 bool trackMatch = false;
11341 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
11342 if (portIds.find(track->portId()) != portIds.end()) {
11343 track->invalidate();
11344 trackMatch = true;
11345 portIds.erase(track->portId());
11346 }
11347 if (portIds.empty()) {
11348 break;
11349 }
11350 }
11351 if (trackMatch) {
11352 broadcast_l();
11353 }
11354 }
11355
processVolume_l()11356 void MmapPlaybackThread::processVolume_l()
11357 NO_THREAD_SAFETY_ANALYSIS // access of track->processMuteEvent_l
11358 {
11359 float volume = 0;
11360 if (!audioserver_flags::portid_volume_management()) {
11361 if (mMasterMute || streamMuted_l()) {
11362 volume = 0;
11363 } else {
11364 volume = mMasterVolume * streamVolume_l();
11365 }
11366 } else {
11367 if (mMasterMute) {
11368 volume = 0;
11369 } else {
11370 // All mmap tracks are declared with the same audio attributes to the audio policy
11371 // manager. Hence, they follow the same routing / volume group. Any change of volume
11372 // will be broadcasted to all tracks. Thus, take arbitrarily first track volume.
11373 size_t numtracks = mActiveTracks.size();
11374 if (numtracks) {
11375 if (mActiveTracks[0]->getPortMute()) {
11376 volume = 0;
11377 } else {
11378 volume = mMasterVolume * mActiveTracks[0]->getPortVolume();
11379 }
11380 }
11381 }
11382 }
11383 if (volume != mHalVolFloat) {
11384 // Convert volumes from float to 8.24
11385 uint32_t vol = (uint32_t)(volume * (1 << 24));
11386
11387 // Delegate volume control to effect in track effect chain if needed
11388 // only one effect chain can be present on DirectOutputThread, so if
11389 // there is one, the track is connected to it
11390 if (!mEffectChains.isEmpty()) {
11391 mEffectChains[0]->setVolume(&vol, &vol);
11392 volume = (float)vol / (1 << 24);
11393 }
11394 // Try to use HW volume control and fall back to SW control if not implemented
11395 if (mOutput->stream->setVolume(volume, volume) == NO_ERROR) {
11396 mHalVolFloat = volume; // HW volume control worked, so update value.
11397 mNoCallbackWarningCount = 0;
11398 } else {
11399 sp<MmapStreamCallback> callback = mCallback.promote();
11400 if (callback != 0) {
11401 mHalVolFloat = volume; // SW volume control worked, so update value.
11402 mNoCallbackWarningCount = 0;
11403 mutex().unlock();
11404 callback->onVolumeChanged(volume);
11405 mutex().lock();
11406 } else {
11407 if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
11408 ALOGW("Could not set MMAP stream volume: no volume callback!");
11409 mNoCallbackWarningCount++;
11410 }
11411 }
11412 }
11413 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
11414 track->setMetadataHasChanged();
11415 if (!audioserver_flags::portid_volume_management()) {
11416 track->processMuteEvent_l(mAfThreadCallback->getOrCreateAudioManager(),
11417 /*muteState=*/{mMasterMute,
11418 streamVolume_l() == 0.f,
11419 streamMuted_l(),
11420 // TODO(b/241533526): adjust logic to include mute from AppOps
11421 false /*muteFromPlaybackRestricted*/,
11422 false /*muteFromClientVolume*/,
11423 false /*muteFromVolumeShaper*/,
11424 false /*muteFromPortVolume*/});
11425 } else {
11426 track->processMuteEvent_l(mAfThreadCallback->getOrCreateAudioManager(),
11427 /*muteState=*/{mMasterMute,
11428 track->getPortVolume() == 0.f,
11429 /* muteFromStreamMuted= */ false,
11430 // TODO(b/241533526): adjust logic to include mute from AppOps
11431 false /*muteFromPlaybackRestricted*/,
11432 false /*muteFromClientVolume*/,
11433 false /*muteFromVolumeShaper*/,
11434 track->getPortMute()});
11435 }
11436 }
11437 }
11438 }
11439
updateMetadata_l()11440 ThreadBase::MetadataUpdate MmapPlaybackThread::updateMetadata_l()
11441 {
11442 if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
11443 return {}; // nothing to do
11444 }
11445 StreamOutHalInterface::SourceMetadata metadata;
11446 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
11447 // No track is invalid as this is called after prepareTrack_l in the same critical section
11448 playback_track_metadata_v7_t trackMetadata;
11449 trackMetadata.base = {
11450 .usage = track->attributes().usage,
11451 .content_type = track->attributes().content_type,
11452 .gain = mHalVolFloat, // TODO: propagate from aaudio pre-mix volume
11453 };
11454 trackMetadata.channel_mask = track->channelMask(),
11455 strncpy(trackMetadata.tags, track->attributes().tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
11456 metadata.tracks.push_back(trackMetadata);
11457 }
11458 mOutput->stream->updateSourceMetadata(metadata);
11459
11460 MetadataUpdate change;
11461 change.playbackMetadataUpdate = metadata.tracks;
11462 return change;
11463 };
11464
checkSilentMode_l()11465 void MmapPlaybackThread::checkSilentMode_l()
11466 {
11467 if (property_get_bool("ro.audio.silent", false)) {
11468 ALOGW("ro.audio.silent is now ignored");
11469 }
11470 }
11471
toAudioPortConfig(struct audio_port_config * config)11472 void MmapPlaybackThread::toAudioPortConfig(struct audio_port_config* config)
11473 {
11474 MmapThread::toAudioPortConfig(config);
11475 if (mOutput && mOutput->flags != AUDIO_OUTPUT_FLAG_NONE) {
11476 config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
11477 config->flags.output = mOutput->flags;
11478 }
11479 }
11480
getExternalPosition(uint64_t * position,int64_t * timeNanos) const11481 status_t MmapPlaybackThread::getExternalPosition(uint64_t* position,
11482 int64_t* timeNanos) const
11483 {
11484 if (mOutput == nullptr) {
11485 return NO_INIT;
11486 }
11487 struct timespec timestamp;
11488 status_t status = mOutput->getPresentationPosition(position, ×tamp);
11489 if (status == NO_ERROR) {
11490 *timeNanos = timestamp.tv_sec * NANOS_PER_SECOND + timestamp.tv_nsec;
11491 }
11492 return status;
11493 }
11494
reportData(const void * buffer,size_t frameCount)11495 status_t MmapPlaybackThread::reportData(const void* buffer, size_t frameCount) {
11496 // Send to MelProcessor for sound dose measurement.
11497 auto processor = mMelProcessor.load();
11498 if (processor) {
11499 processor->process(buffer, frameCount * mFrameSize);
11500 }
11501
11502 return NO_ERROR;
11503 }
11504
11505 // startMelComputation_l() must be called with AudioFlinger::mutex() held
startMelComputation_l(const sp<audio_utils::MelProcessor> & processor)11506 void MmapPlaybackThread::startMelComputation_l(
11507 const sp<audio_utils::MelProcessor>& processor)
11508 {
11509 ALOGV("%s: starting mel processor for thread %d", __func__, id());
11510 mMelProcessor.store(processor);
11511 if (processor) {
11512 processor->resume();
11513 }
11514
11515 // no need to update output format for MMapPlaybackThread since it is
11516 // assigned constant for each thread
11517 }
11518
11519 // stopMelComputation_l() must be called with AudioFlinger::mutex() held
stopMelComputation_l()11520 void MmapPlaybackThread::stopMelComputation_l()
11521 {
11522 ALOGV("%s: pausing mel processor for thread %d", __func__, id());
11523 auto melProcessor = mMelProcessor.load();
11524 if (melProcessor != nullptr) {
11525 melProcessor->pause();
11526 }
11527 }
11528
dumpInternals_l(int fd,const Vector<String16> & args)11529 void MmapPlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args)
11530 {
11531 MmapThread::dumpInternals_l(fd, args);
11532 if (!audioserver_flags::portid_volume_management()) {
11533 dprintf(fd, " Stream type: %d Stream volume: %f HAL volume: %f Stream mute %d",
11534 mStreamType, streamVolume_l(), mHalVolFloat, streamMuted_l());
11535 } else {
11536 dprintf(fd, " HAL volume: %f", mHalVolFloat);
11537 }
11538 dprintf(fd, "\n");
11539 dprintf(fd, " Master volume: %f Master mute %d\n", mMasterVolume, mMasterMute);
11540 }
11541
11542 /* static */
create(const sp<IAfThreadCallback> & afThreadCallback,audio_io_handle_t id,AudioHwDevice * hwDev,AudioStreamIn * input,bool systemReady)11543 sp<IAfMmapCaptureThread> IAfMmapCaptureThread::create(
11544 const sp<IAfThreadCallback>& afThreadCallback, audio_io_handle_t id,
11545 AudioHwDevice* hwDev, AudioStreamIn* input, bool systemReady) {
11546 return sp<MmapCaptureThread>::make(afThreadCallback, id, hwDev, input, systemReady);
11547 }
11548
MmapCaptureThread(const sp<IAfThreadCallback> & afThreadCallback,audio_io_handle_t id,AudioHwDevice * hwDev,AudioStreamIn * input,bool systemReady)11549 MmapCaptureThread::MmapCaptureThread(
11550 const sp<IAfThreadCallback>& afThreadCallback, audio_io_handle_t id,
11551 AudioHwDevice *hwDev, AudioStreamIn *input, bool systemReady)
11552 : MmapThread(afThreadCallback, id, hwDev, input->stream, systemReady, false /* isOut */),
11553 mInput(input)
11554 {
11555 snprintf(mThreadName, kThreadNameLength, "AudioMmapIn_%X", id);
11556 mFlagsAsString = toString(input->flags);
11557 mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
11558 }
11559
exitStandby_l()11560 status_t MmapCaptureThread::exitStandby_l()
11561 {
11562 {
11563 // mInput might have been cleared by clearInput()
11564 if (mInput != nullptr && mInput->stream != nullptr) {
11565 mInput->stream->setGain(1.0f);
11566 }
11567 }
11568 return MmapThread::exitStandby_l();
11569 }
11570
clearInput()11571 AudioStreamIn* MmapCaptureThread::clearInput()
11572 {
11573 audio_utils::lock_guard _l(mutex());
11574 AudioStreamIn *input = mInput;
11575 mInput = NULL;
11576 return input;
11577 }
11578
processVolume_l()11579 void MmapCaptureThread::processVolume_l()
11580 {
11581 bool changed = false;
11582 bool silenced = false;
11583
11584 sp<MmapStreamCallback> callback = mCallback.promote();
11585 if (callback == 0) {
11586 if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
11587 ALOGW("Could not set MMAP stream silenced: no onStreamSilenced callback!");
11588 mNoCallbackWarningCount++;
11589 }
11590 }
11591
11592 // After a change occurred in track silenced state, mute capture in audio DSP if at least one
11593 // track is silenced and unmute otherwise
11594 for (size_t i = 0; i < mActiveTracks.size() && !silenced; i++) {
11595 if (!mActiveTracks[i]->getAndSetSilencedNotified_l()) {
11596 changed = true;
11597 silenced = mActiveTracks[i]->isSilenced_l();
11598 }
11599 }
11600
11601 if (changed) {
11602 mInput->stream->setGain(silenced ? 0.0f: 1.0f);
11603 }
11604 }
11605
updateMetadata_l()11606 ThreadBase::MetadataUpdate MmapCaptureThread::updateMetadata_l()
11607 {
11608 if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
11609 return {}; // nothing to do
11610 }
11611 StreamInHalInterface::SinkMetadata metadata;
11612 for (const sp<IAfMmapTrack>& track : mActiveTracks) {
11613 // No track is invalid as this is called after prepareTrack_l in the same critical section
11614 record_track_metadata_v7_t trackMetadata;
11615 trackMetadata.base = {
11616 .source = track->attributes().source,
11617 .gain = 1, // capture tracks do not have volumes
11618 };
11619 trackMetadata.channel_mask = track->channelMask(),
11620 strncpy(trackMetadata.tags, track->attributes().tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
11621 metadata.tracks.push_back(trackMetadata);
11622 }
11623 mInput->stream->updateSinkMetadata(metadata);
11624 MetadataUpdate change;
11625 change.recordMetadataUpdate = metadata.tracks;
11626 return change;
11627 }
11628
setRecordSilenced(audio_port_handle_t portId,bool silenced)11629 void MmapCaptureThread::setRecordSilenced(audio_port_handle_t portId, bool silenced)
11630 {
11631 audio_utils::lock_guard _l(mutex());
11632 for (size_t i = 0; i < mActiveTracks.size() ; i++) {
11633 if (mActiveTracks[i]->portId() == portId) {
11634 mActiveTracks[i]->setSilenced_l(silenced);
11635 broadcast_l();
11636 }
11637 }
11638 setClientSilencedIfExists_l(portId, silenced);
11639 }
11640
toAudioPortConfig(struct audio_port_config * config)11641 void MmapCaptureThread::toAudioPortConfig(struct audio_port_config* config)
11642 {
11643 MmapThread::toAudioPortConfig(config);
11644 if (mInput && mInput->flags != AUDIO_INPUT_FLAG_NONE) {
11645 config->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
11646 config->flags.input = mInput->flags;
11647 }
11648 }
11649
getExternalPosition(uint64_t * position,int64_t * timeNanos) const11650 status_t MmapCaptureThread::getExternalPosition(
11651 uint64_t* position, int64_t* timeNanos) const
11652 {
11653 if (mInput == nullptr) {
11654 return NO_INIT;
11655 }
11656 return mInput->getCapturePosition((int64_t*)position, timeNanos);
11657 }
11658
11659 // ----------------------------------------------------------------------------
11660
11661 /* static */
createBitPerfectThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady)11662 sp<IAfPlaybackThread> IAfPlaybackThread::createBitPerfectThread(
11663 const sp<IAfThreadCallback>& afThreadCallback,
11664 AudioStreamOut* output, audio_io_handle_t id, bool systemReady) {
11665 return sp<BitPerfectThread>::make(afThreadCallback, output, id, systemReady);
11666 }
11667
BitPerfectThread(const sp<IAfThreadCallback> & afThreadCallback,AudioStreamOut * output,audio_io_handle_t id,bool systemReady)11668 BitPerfectThread::BitPerfectThread(const sp<IAfThreadCallback> &afThreadCallback,
11669 AudioStreamOut *output, audio_io_handle_t id, bool systemReady)
11670 : MixerThread(afThreadCallback, output, id, systemReady, BIT_PERFECT) {}
11671
prepareTracks_l(Vector<sp<IAfTrack>> * tracksToRemove)11672 PlaybackThread::mixer_state BitPerfectThread::prepareTracks_l(
11673 Vector<sp<IAfTrack>>* tracksToRemove) {
11674 mixer_state result = MixerThread::prepareTracks_l(tracksToRemove);
11675 // If there is only one active track and it is bit-perfect, enable tee buffer.
11676 float volumeLeft = 1.0f;
11677 float volumeRight = 1.0f;
11678 if (sp<IAfTrack> bitPerfectTrack = getTrackToStreamBitPerfectly_l();
11679 bitPerfectTrack != nullptr) {
11680 const int trackId = bitPerfectTrack->id();
11681 mAudioMixer->setParameter(
11682 trackId, AudioMixer::TRACK, AudioMixer::TEE_BUFFER, (void *)mSinkBuffer);
11683 mAudioMixer->setParameter(
11684 trackId, AudioMixer::TRACK, AudioMixer::TEE_BUFFER_FRAME_COUNT,
11685 (void *)(uintptr_t)mNormalFrameCount);
11686 bitPerfectTrack->getFinalVolume(&volumeLeft, &volumeRight);
11687 mIsBitPerfect = true;
11688 } else {
11689 mIsBitPerfect = false;
11690 // No need to copy bit-perfect data directly to sink buffer given there are multiple tracks
11691 // active.
11692 for (const auto& track : mActiveTracks) {
11693 const int trackId = track->id();
11694 mAudioMixer->setParameter(
11695 trackId, AudioMixer::TRACK, AudioMixer::TEE_BUFFER, nullptr);
11696 }
11697 }
11698 if (mVolumeLeft != volumeLeft || mVolumeRight != volumeRight) {
11699 mVolumeLeft = volumeLeft;
11700 mVolumeRight = volumeRight;
11701 setVolumeForOutput_l(volumeLeft, volumeRight);
11702 }
11703 return result;
11704 }
11705
threadLoop_mix()11706 void BitPerfectThread::threadLoop_mix() {
11707 MixerThread::threadLoop_mix();
11708 mHasDataCopiedToSinkBuffer = mIsBitPerfect;
11709 }
11710
setTracksInternalMute(std::map<audio_port_handle_t,bool> * tracksInternalMute)11711 void BitPerfectThread::setTracksInternalMute(
11712 std::map<audio_port_handle_t, bool>* tracksInternalMute) {
11713 audio_utils::lock_guard _l(mutex());
11714 for (auto& track : mTracks) {
11715 if (auto it = tracksInternalMute->find(track->portId()); it != tracksInternalMute->end()) {
11716 track->setInternalMute(it->second);
11717 tracksInternalMute->erase(it);
11718 }
11719 }
11720 }
11721
getTrackToStreamBitPerfectly_l()11722 sp<IAfTrack> BitPerfectThread::getTrackToStreamBitPerfectly_l() {
11723 if (com::android::media::audioserver::
11724 fix_concurrent_playback_behavior_with_bit_perfect_client()) {
11725 sp<IAfTrack> bitPerfectTrack = nullptr;
11726 bool allOtherTracksMuted = true;
11727 // Return the bit perfect track if all other tracks are muted
11728 for (const auto& track : mActiveTracks) {
11729 if (track->isBitPerfect()) {
11730 if (track->getInternalMute()) {
11731 // There can only be one bit-perfect client active. If it is mute internally,
11732 // there is no need to stream bit-perfectly.
11733 break;
11734 }
11735 bitPerfectTrack = track;
11736 } else if (track->getFinalVolume() != 0.f) {
11737 allOtherTracksMuted = false;
11738 if (bitPerfectTrack != nullptr) {
11739 break;
11740 }
11741 }
11742 }
11743 return allOtherTracksMuted ? bitPerfectTrack : nullptr;
11744 } else {
11745 if (mActiveTracks.size() == 1 && mActiveTracks[0]->isBitPerfect()) {
11746 return mActiveTracks[0];
11747 }
11748 }
11749 return nullptr;
11750 }
11751
11752 } // namespace android
11753