1 /*
2 **
3 ** Copyright 2012, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 ** http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17
18 #define LOG_TAG "AudioFlinger"
19 //#define LOG_NDEBUG 0
20 #define ATRACE_TAG ATRACE_TAG_AUDIO
21
22 #include "MmapTracks.h"
23 #include "PlaybackTracks.h"
24 #include "RecordTracks.h"
25
26 #include "Client.h"
27 #include "IAfEffect.h"
28 #include "IAfThread.h"
29 #include "ResamplerBufferProvider.h"
30
31 #include <audio_utils/StringUtils.h>
32 #include <audio_utils/minifloat.h>
33 #include <media/AudioValidator.h>
34 #include <media/RecordBufferConverter.h>
35 #include <media/nbaio/Pipe.h>
36 #include <media/nbaio/PipeReader.h>
37 #include <mediautils/ServiceUtilities.h>
38 #include <mediautils/SharedMemoryAllocator.h>
39 #include <private/media/AudioTrackShared.h>
40 #include <utils/Log.h>
41 #include <utils/Trace.h>
42
43 #include <linux/futex.h>
44 #include <math.h>
45 #include <sys/syscall.h>
46
47 // ----------------------------------------------------------------------------
48
49 // Note: the following macro is used for extremely verbose logging message. In
50 // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
51 // 0; but one side effect of this is to turn all LOGV's as well. Some messages
52 // are so verbose that we want to suppress them even when we have ALOG_ASSERT
53 // turned on. Do not uncomment the #def below unless you really know what you
54 // are doing and want to see all of the extremely verbose messages.
55 //#define VERY_VERY_VERBOSE_LOGGING
56 #ifdef VERY_VERY_VERBOSE_LOGGING
57 #define ALOGVV ALOGV
58 #else
59 #define ALOGVV(a...) do { } while(0)
60 #endif
61
62 // TODO: Remove when this is put into AidlConversionUtil.h
63 #define VALUE_OR_RETURN_BINDER_STATUS(x) \
64 ({ \
65 auto _tmp = (x); \
66 if (!_tmp.ok()) return ::android::aidl_utils::binderStatusFromStatusT(_tmp.error()); \
67 std::move(_tmp.value()); \
68 })
69
70 namespace android {
71
72 using ::android::aidl_utils::binderStatusFromStatusT;
73 using binder::Status;
74 using content::AttributionSourceState;
75 using media::VolumeShaper;
76 // ----------------------------------------------------------------------------
77 // TrackBase
78 // ----------------------------------------------------------------------------
79 #undef LOG_TAG
80 #define LOG_TAG "AF::TrackBase"
81
82 static volatile int32_t nextTrackId = 55;
83
84 // TrackBase constructor must be called with AudioFlinger::mLock held
TrackBase(IAfThreadBase * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,uid_t clientUid,bool isOut,const alloc_type alloc,track_type type,audio_port_handle_t portId,std::string metricsId)85 TrackBase::TrackBase(
86 IAfThreadBase *thread,
87 const sp<Client>& client,
88 const audio_attributes_t& attr,
89 uint32_t sampleRate,
90 audio_format_t format,
91 audio_channel_mask_t channelMask,
92 size_t frameCount,
93 void *buffer,
94 size_t bufferSize,
95 audio_session_t sessionId,
96 pid_t creatorPid,
97 uid_t clientUid,
98 bool isOut,
99 const alloc_type alloc,
100 track_type type,
101 audio_port_handle_t portId,
102 std::string metricsId)
103 :
104 mThread(thread),
105 mAllocType(alloc),
106 mClient(client),
107 mCblk(NULL),
108 // mBuffer, mBufferSize
109 mState(IDLE),
110 mAttr(attr),
111 mSampleRate(sampleRate),
112 mFormat(format),
113 mChannelMask(channelMask),
114 mChannelCount(isOut ?
115 audio_channel_count_from_out_mask(channelMask) :
116 audio_channel_count_from_in_mask(channelMask)),
117 mFrameSize(audio_bytes_per_frame(mChannelCount, format)),
118 mFrameCount(frameCount),
119 mSessionId(sessionId),
120 mIsOut(isOut),
121 mId(android_atomic_inc(&nextTrackId)),
122 mTerminated(false),
123 mType(type),
124 mThreadIoHandle(thread ? thread->id() : AUDIO_IO_HANDLE_NONE),
125 mPortId(portId),
126 mIsInvalid(false),
127 mTrackMetrics(std::move(metricsId), isOut, clientUid),
128 mCreatorPid(creatorPid),
129 mTraceSuffix{std::to_string(mPortId).append(".").append(std::to_string(mId))
130 .append(".").append(std::to_string(mThreadIoHandle))},
131 mTraceActionId{std::string(AUDIO_TRACE_PREFIX_AUDIO_TRACK_ACTION).append(mTraceSuffix)},
132 mTraceIntervalId{std::string(AUDIO_TRACE_PREFIX_AUDIO_TRACK_INTERVAL)
133 .append(mTraceSuffix)}
134 {
135 const uid_t callingUid = IPCThreadState::self()->getCallingUid();
136 if (!isAudioServerOrMediaServerUid(callingUid) || clientUid == AUDIO_UID_INVALID) {
137 ALOGW_IF(clientUid != AUDIO_UID_INVALID && clientUid != callingUid,
138 "%s(%d): uid %d tried to pass itself off as %d",
139 __func__, mId, callingUid, clientUid);
140 clientUid = callingUid;
141 }
142 // clientUid contains the uid of the app that is responsible for this track, so we can blame
143 // battery usage on it.
144 mUid = clientUid;
145
146 // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
147
148 size_t minBufferSize = buffer == NULL ? roundup(frameCount) : frameCount;
149 // check overflow when computing bufferSize due to multiplication by mFrameSize.
150 if (minBufferSize < frameCount // roundup rounds down for values above UINT_MAX / 2
151 || mFrameSize == 0 // format needs to be correct
152 || minBufferSize > SIZE_MAX / mFrameSize) {
153 android_errorWriteLog(0x534e4554, "34749571");
154 return;
155 }
156 minBufferSize *= mFrameSize;
157
158 if (buffer == nullptr) {
159 bufferSize = minBufferSize; // allocated here.
160 } else if (minBufferSize > bufferSize) {
161 android_errorWriteLog(0x534e4554, "38340117");
162 return;
163 }
164
165 size_t size = sizeof(audio_track_cblk_t);
166 if (buffer == NULL && alloc == ALLOC_CBLK) {
167 // check overflow when computing allocation size for streaming tracks.
168 if (size > SIZE_MAX - bufferSize) {
169 android_errorWriteLog(0x534e4554, "34749571");
170 return;
171 }
172 size += bufferSize;
173 }
174
175 if (client != 0) {
176 mCblkMemory = client->allocator().allocate(mediautils::NamedAllocRequest{{size},
177 std::string("Track ID: ").append(std::to_string(mId))});
178 if (mCblkMemory == 0 ||
179 (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) {
180 ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
181 ALOGE("%s", client->allocator().dump().c_str());
182 mCblkMemory.clear();
183 return;
184 }
185 } else {
186 mCblk = (audio_track_cblk_t *) malloc(size);
187 if (mCblk == NULL) {
188 ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
189 return;
190 }
191 }
192
193 // construct the shared structure in-place.
194 if (mCblk != NULL) {
195 new(mCblk) audio_track_cblk_t();
196 switch (alloc) {
197 case ALLOC_READONLY: {
198 const sp<MemoryDealer> roHeap(thread->readOnlyHeap());
199 if (roHeap == 0 ||
200 (mBufferMemory = roHeap->allocate(bufferSize)) == 0 ||
201 (mBuffer = mBufferMemory->unsecurePointer()) == NULL) {
202 ALOGE("%s(%d): not enough memory for read-only buffer size=%zu",
203 __func__, mId, bufferSize);
204 if (roHeap != 0) {
205 roHeap->dump("buffer");
206 }
207 mCblkMemory.clear();
208 mBufferMemory.clear();
209 return;
210 }
211 memset(mBuffer, 0, bufferSize);
212 } break;
213 case ALLOC_PIPE:
214 mBufferMemory = thread->pipeMemory();
215 // mBuffer is the virtual address as seen from current process (mediaserver),
216 // and should normally be coming from mBufferMemory->unsecurePointer().
217 // However in this case the TrackBase does not reference the buffer directly.
218 // It should references the buffer via the pipe.
219 // Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL.
220 mBuffer = NULL;
221 bufferSize = 0;
222 break;
223 case ALLOC_CBLK:
224 // clear all buffers
225 if (buffer == NULL) {
226 mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
227 memset(mBuffer, 0, bufferSize);
228 } else {
229 mBuffer = buffer;
230 #if 0
231 mCblk->mFlags = CBLK_FORCEREADY; // FIXME hack, need to fix the track ready logic
232 #endif
233 }
234 break;
235 case ALLOC_LOCAL:
236 mBuffer = calloc(1, bufferSize);
237 break;
238 case ALLOC_NONE:
239 mBuffer = buffer;
240 break;
241 default:
242 LOG_ALWAYS_FATAL("%s(%d): invalid allocation type: %d", __func__, mId, (int)alloc);
243 }
244 mBufferSize = bufferSize;
245
246 #ifdef TEE_SINK
247 mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
248 #endif
249 // mState is mirrored for the client to read.
250 mState.setMirror(&mCblk->mState);
251 // ensure our state matches up until we consolidate the enumeration.
252 static_assert(CBLK_STATE_IDLE == IDLE);
253 static_assert(CBLK_STATE_PAUSING == PAUSING);
254 }
255 }
256
257 // TODO b/182392769: use attribution source util
audioServerAttributionSource(pid_t pid)258 static AttributionSourceState audioServerAttributionSource(pid_t pid) {
259 AttributionSourceState attributionSource{};
260 attributionSource.uid = AID_AUDIOSERVER;
261 attributionSource.pid = pid;
262 attributionSource.token = sp<BBinder>::make();
263 return attributionSource;
264 }
265
initCheck() const266 status_t TrackBase::initCheck() const
267 {
268 status_t status;
269 if (mType == TYPE_OUTPUT || mType == TYPE_PATCH) {
270 status = cblk() != NULL ? NO_ERROR : NO_MEMORY;
271 } else {
272 status = getCblk() != 0 ? NO_ERROR : NO_MEMORY;
273 }
274 return status;
275 }
276
~TrackBase()277 TrackBase::~TrackBase()
278 {
279 // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
280 mServerProxy.clear();
281 releaseCblk();
282 mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to
283 if (mClient != 0) {
284 // Client destructor must run with AudioFlinger client mutex locked
285 audio_utils::lock_guard _l(mClient->afClientCallback()->clientMutex());
286 // If the client's reference count drops to zero, the associated destructor
287 // must run with AudioFlinger lock held. Thus the explicit clear() rather than
288 // relying on the automatic clear() at end of scope.
289 mClient.clear();
290 }
291 if (mAllocType == ALLOC_LOCAL) {
292 free(mBuffer);
293 mBuffer = nullptr;
294 }
295 // flush the binder command buffer
296 IPCThreadState::self()->flushCommands();
297 }
298
299 // AudioBufferProvider interface
300 // getNextBuffer() = 0;
301 // This implementation of releaseBuffer() is used by Track and RecordTrack
releaseBuffer(AudioBufferProvider::Buffer * buffer)302 void TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
303 {
304 #ifdef TEE_SINK
305 mTee.write(buffer->raw, buffer->frameCount);
306 #endif
307
308 ServerProxy::Buffer buf;
309 buf.mFrameCount = buffer->frameCount;
310 buf.mRaw = buffer->raw;
311 buffer->frameCount = 0;
312 buffer->raw = NULL;
313 mServerProxy->releaseBuffer(&buf);
314 }
315
setSyncEvent(const sp<audioflinger::SyncEvent> & event)316 status_t TrackBase::setSyncEvent(
317 const sp<audioflinger::SyncEvent>& event)
318 {
319 mSyncEvents.emplace_back(event);
320 return NO_ERROR;
321 }
322
deferRestartIfDisabled()323 void TrackBase::deferRestartIfDisabled()
324 {
325 const auto thread = mThread.promote();
326 if (thread == nullptr) return;
327 auto weakTrack = wp<TrackBase>::fromExisting(this);
328 thread->getThreadloopExecutor().defer([weakTrack] {
329 const auto actual = weakTrack.promote();
330 if (actual) actual->restartIfDisabled();
331 });
332 }
333
beginBatteryAttribution()334 void TrackBase::beginBatteryAttribution() {
335 mBatteryStatsHolder.emplace(uid());
336 if (media::psh_utils::AudioPowerManager::enabled()) {
337 mTrackToken = media::psh_utils::createAudioTrackToken(uid());
338 }
339 }
340
endBatteryAttribution()341 void TrackBase::endBatteryAttribution() {
342 mBatteryStatsHolder.reset();
343 mTrackToken.reset();
344 }
345
createDeviceIntervalTrace(const std::string & devices)346 audio_utils::trace::Object TrackBase::createDeviceIntervalTrace(const std::string& devices) {
347 audio_utils::trace::Object trace;
348
349 // Please do not modify any items without approval (look at git blame).
350 // Sanitize the device string to remove addresses.
351 std::string plainDevices;
352 if (devices.find(")") != std::string::npos) {
353 auto deviceAddrVector = audio_utils::stringutils::getDeviceAddressPairs(devices);
354 for (const auto& deviceAddr : deviceAddrVector) {
355 // "|" not compatible with ATRACE filtering so we use "+".
356 if (!plainDevices.empty()) plainDevices.append("+");
357 plainDevices.append(deviceAddr.first);
358 }
359 } else {
360 plainDevices = devices;
361 }
362
363 trace // the following key, value pairs should be alphabetical
364 .set(AUDIO_TRACE_OBJECT_KEY_CHANNEL_MASK, static_cast<int32_t>(mChannelMask))
365 .set(AUDIO_TRACE_OBJECT_KEY_CONTENT_TYPE, toString(mAttr.content_type))
366 .set(AUDIO_TRACE_OBJECT_KEY_DEVICES, plainDevices)
367 .set(AUDIO_TRACE_OBJECT_KEY_FLAGS, trackFlagsAsString())
368 .set(AUDIO_TRACE_OBJECT_KEY_FORMAT, IAfThreadBase::formatToString(mFormat))
369 .set(AUDIO_TRACE_OBJECT_KEY_FRAMECOUNT, static_cast<int64_t>(mFrameCount))
370 .set(AUDIO_TRACE_OBJECT_KEY_PID, static_cast<int32_t>(
371 mClient ? mClient->pid() : getpid()))
372 .set(AUDIO_TRACE_OBJECT_KEY_SAMPLE_RATE, static_cast<int32_t>(sampleRate()));
373 if (const auto thread = mThread.promote()) {
374 trace // continue in alphabetical order
375 .set(AUDIO_TRACE_PREFIX_THREAD AUDIO_TRACE_OBJECT_KEY_CHANNEL_MASK,
376 static_cast<int32_t>(thread->channelMask()))
377 .set(AUDIO_TRACE_PREFIX_THREAD AUDIO_TRACE_OBJECT_KEY_FLAGS,
378 thread->flagsAsString())
379 .set(AUDIO_TRACE_PREFIX_THREAD AUDIO_TRACE_OBJECT_KEY_FORMAT,
380 IAfThreadBase::formatToString(thread->format()))
381 .set(AUDIO_TRACE_PREFIX_THREAD AUDIO_TRACE_OBJECT_KEY_FRAMECOUNT,
382 static_cast<int64_t>(thread->frameCount()))
383 .set(AUDIO_TRACE_PREFIX_THREAD AUDIO_TRACE_OBJECT_KEY_ID,
384 static_cast<int32_t>(mThreadIoHandle))
385 .set(AUDIO_TRACE_PREFIX_THREAD AUDIO_TRACE_OBJECT_KEY_SAMPLE_RATE,
386 static_cast<int32_t>(thread->sampleRate()))
387 .set(AUDIO_TRACE_PREFIX_THREAD AUDIO_TRACE_OBJECT_KEY_TYPE,
388 IAfThreadBase::threadTypeToString(thread->type()));
389 }
390 trace // continue in alphabetical order
391 .set(AUDIO_TRACE_OBJECT_KEY_UID, static_cast<int32_t>(uid()))
392 .set(AUDIO_TRACE_OBJECT_KEY_USAGE, toString(mAttr.usage));
393 return trace;
394 }
395
logBeginInterval(const std::string & devices)396 void TrackBase::logBeginInterval(const std::string& devices) {
397 mTrackMetrics.logBeginInterval(devices);
398
399 if (ATRACE_ENABLED()) [[unlikely]] {
400 auto trace = createDeviceIntervalTrace(devices);
401 mLastTrace = trace;
402 ATRACE_INSTANT_FOR_TRACK(mTraceIntervalId.c_str(),
403 trace.set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_BEGIN_INTERVAL)
404 .toTrace().c_str());
405 }
406 }
407
logEndInterval()408 void TrackBase::logEndInterval() {
409 if (!mLastTrace.empty()) {
410 if (ATRACE_ENABLED()) [[unlikely]] {
411 ATRACE_INSTANT_FOR_TRACK(mTraceIntervalId.c_str(),
412 mLastTrace.set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_END_INTERVAL)
413 .toTrace().c_str());
414 }
415 mLastTrace.clear();
416 }
417 mTrackMetrics.logEndInterval();
418 }
419
logRefreshInterval(const std::string & devices)420 void TrackBase::logRefreshInterval(const std::string& devices) {
421 if (ATRACE_ENABLED()) [[unlikely]] {
422 if (mLastTrace.empty()) mLastTrace = createDeviceIntervalTrace(devices);
423 auto trace = mLastTrace;
424 ATRACE_INSTANT_FOR_TRACK(mTraceIntervalId.c_str(),
425 trace.set(AUDIO_TRACE_OBJECT_KEY_EVENT,
426 AUDIO_TRACE_EVENT_REFRESH_INTERVAL)
427 .toTrace().c_str());
428 }
429 }
430
PatchTrackBase(const sp<ClientProxy> & proxy,IAfThreadBase * thread,const Timeout & timeout)431 PatchTrackBase::PatchTrackBase(const sp<ClientProxy>& proxy,
432 IAfThreadBase* thread, const Timeout& timeout)
433 : mProxy(proxy)
434 {
435 if (timeout) {
436 setPeerTimeout(*timeout);
437 } else {
438 // Double buffer mixer
439 uint64_t mixBufferNs = ((uint64_t)2 * thread->frameCount() * 1000000000) /
440 thread->sampleRate();
441 setPeerTimeout(std::chrono::nanoseconds{mixBufferNs});
442 }
443 }
444
setPeerTimeout(std::chrono::nanoseconds timeout)445 void PatchTrackBase::setPeerTimeout(std::chrono::nanoseconds timeout) {
446 mPeerTimeout.tv_sec = timeout.count() / std::nano::den;
447 mPeerTimeout.tv_nsec = timeout.count() % std::nano::den;
448 }
449
450
451 // ----------------------------------------------------------------------------
452 // Playback
453 // ----------------------------------------------------------------------------
454 #undef LOG_TAG
455 #define LOG_TAG "AF::TrackHandle"
456
457 class TrackHandle : public android::media::BnAudioTrack {
458 public:
459 explicit TrackHandle(const sp<IAfTrack>& track);
460 ~TrackHandle() override;
461
462 binder::Status getCblk(std::optional<media::SharedFileRegion>* _aidl_return) final;
463 binder::Status start(int32_t* _aidl_return) final;
464 binder::Status stop() final;
465 binder::Status flush() final;
466 binder::Status pause() final;
467 binder::Status attachAuxEffect(int32_t effectId, int32_t* _aidl_return) final;
468 binder::Status setParameters(const std::string& keyValuePairs,
469 int32_t* _aidl_return) final;
470 binder::Status selectPresentation(int32_t presentationId, int32_t programId,
471 int32_t* _aidl_return) final;
472 binder::Status getTimestamp(media::AudioTimestampInternal* timestamp,
473 int32_t* _aidl_return) final;
474 binder::Status signal() final;
475 binder::Status applyVolumeShaper(const media::VolumeShaperConfiguration& configuration,
476 const media::VolumeShaperOperation& operation,
477 int32_t* _aidl_return) final;
478 binder::Status getVolumeShaperState(
479 int32_t id,
480 std::optional<media::VolumeShaperState>* _aidl_return) final;
481 binder::Status getDualMonoMode(
482 media::audio::common::AudioDualMonoMode* _aidl_return) final;
483 binder::Status setDualMonoMode(
484 media::audio::common::AudioDualMonoMode mode) final;
485 binder::Status getAudioDescriptionMixLevel(float* _aidl_return) final;
486 binder::Status setAudioDescriptionMixLevel(float leveldB) final;
487 binder::Status getPlaybackRateParameters(
488 media::audio::common::AudioPlaybackRate* _aidl_return) final;
489 binder::Status setPlaybackRateParameters(
490 const media::audio::common::AudioPlaybackRate& playbackRate) final;
491
492 private:
493 const sp<IAfTrack> mTrack;
494 };
495
496 /* static */
createIAudioTrackAdapter(const sp<IAfTrack> & track)497 sp<media::IAudioTrack> IAfTrack::createIAudioTrackAdapter(const sp<IAfTrack>& track) {
498 return sp<TrackHandle>::make(track);
499 }
500
TrackHandle(const sp<IAfTrack> & track)501 TrackHandle::TrackHandle(const sp<IAfTrack>& track)
502 : BnAudioTrack(),
503 mTrack(track)
504 {
505 setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
506 setInheritRt(true);
507 }
508
~TrackHandle()509 TrackHandle::~TrackHandle() {
510 // just stop the track on deletion, associated resources
511 // will be freed from the main thread once all pending buffers have
512 // been played. Unless it's not in the active track list, in which
513 // case we free everything now...
514 mTrack->destroy();
515 }
516
getCblk(std::optional<media::SharedFileRegion> * _aidl_return)517 Status TrackHandle::getCblk(
518 std::optional<media::SharedFileRegion>* _aidl_return) {
519 *_aidl_return = legacy2aidl_NullableIMemory_SharedFileRegion(mTrack->getCblk()).value();
520 return Status::ok();
521 }
522
start(int32_t * _aidl_return)523 Status TrackHandle::start(int32_t* _aidl_return) {
524 *_aidl_return = mTrack->start();
525 return Status::ok();
526 }
527
stop()528 Status TrackHandle::stop() {
529 mTrack->stop();
530 return Status::ok();
531 }
532
flush()533 Status TrackHandle::flush() {
534 mTrack->flush();
535 return Status::ok();
536 }
537
pause()538 Status TrackHandle::pause() {
539 mTrack->pause();
540 return Status::ok();
541 }
542
attachAuxEffect(int32_t effectId,int32_t * _aidl_return)543 Status TrackHandle::attachAuxEffect(int32_t effectId,
544 int32_t* _aidl_return) {
545 *_aidl_return = mTrack->attachAuxEffect(effectId);
546 return Status::ok();
547 }
548
setParameters(const std::string & keyValuePairs,int32_t * _aidl_return)549 Status TrackHandle::setParameters(const std::string& keyValuePairs,
550 int32_t* _aidl_return) {
551 *_aidl_return = mTrack->setParameters(String8(keyValuePairs.c_str()));
552 return Status::ok();
553 }
554
selectPresentation(int32_t presentationId,int32_t programId,int32_t * _aidl_return)555 Status TrackHandle::selectPresentation(int32_t presentationId, int32_t programId,
556 int32_t* _aidl_return) {
557 *_aidl_return = mTrack->selectPresentation(presentationId, programId);
558 return Status::ok();
559 }
560
getTimestamp(media::AudioTimestampInternal * timestamp,int32_t * _aidl_return)561 Status TrackHandle::getTimestamp(media::AudioTimestampInternal* timestamp,
562 int32_t* _aidl_return) {
563 AudioTimestamp legacy;
564 *_aidl_return = mTrack->getTimestamp(legacy);
565 if (*_aidl_return != OK) {
566 return Status::ok();
567 }
568
569 // restrict position modulo INT_MAX to avoid integer sanitization abort
570 legacy.mPosition &= INT_MAX;
571
572 *timestamp = legacy2aidl_AudioTimestamp_AudioTimestampInternal(legacy).value();
573 return Status::ok();
574 }
575
signal()576 Status TrackHandle::signal() {
577 mTrack->signal();
578 return Status::ok();
579 }
580
applyVolumeShaper(const media::VolumeShaperConfiguration & configuration,const media::VolumeShaperOperation & operation,int32_t * _aidl_return)581 Status TrackHandle::applyVolumeShaper(
582 const media::VolumeShaperConfiguration& configuration,
583 const media::VolumeShaperOperation& operation,
584 int32_t* _aidl_return) {
585 sp<VolumeShaper::Configuration> conf = new VolumeShaper::Configuration();
586 *_aidl_return = conf->readFromParcelable(configuration);
587 if (*_aidl_return != OK) {
588 return Status::ok();
589 }
590
591 sp<VolumeShaper::Operation> op = new VolumeShaper::Operation();
592 *_aidl_return = op->readFromParcelable(operation);
593 if (*_aidl_return != OK) {
594 return Status::ok();
595 }
596
597 *_aidl_return = mTrack->applyVolumeShaper(conf, op);
598 return Status::ok();
599 }
600
getVolumeShaperState(int32_t id,std::optional<media::VolumeShaperState> * _aidl_return)601 Status TrackHandle::getVolumeShaperState(
602 int32_t id,
603 std::optional<media::VolumeShaperState>* _aidl_return) {
604 sp<VolumeShaper::State> legacy = mTrack->getVolumeShaperState(id);
605 if (legacy == nullptr) {
606 _aidl_return->reset();
607 return Status::ok();
608 }
609 media::VolumeShaperState aidl;
610 legacy->writeToParcelable(&aidl);
611 *_aidl_return = aidl;
612 return Status::ok();
613 }
614
getDualMonoMode(media::audio::common::AudioDualMonoMode * _aidl_return)615 Status TrackHandle::getDualMonoMode(
616 media::audio::common::AudioDualMonoMode* _aidl_return)
617 {
618 audio_dual_mono_mode_t mode = AUDIO_DUAL_MONO_MODE_OFF;
619 const status_t status = mTrack->getDualMonoMode(&mode)
620 ?: AudioValidator::validateDualMonoMode(mode);
621 if (status == OK) {
622 *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
623 legacy2aidl_audio_dual_mono_mode_t_AudioDualMonoMode(mode));
624 }
625 return binderStatusFromStatusT(status);
626 }
627
setDualMonoMode(media::audio::common::AudioDualMonoMode mode)628 Status TrackHandle::setDualMonoMode(
629 media::audio::common::AudioDualMonoMode mode)
630 {
631 const auto localMonoMode = VALUE_OR_RETURN_BINDER_STATUS(
632 aidl2legacy_AudioDualMonoMode_audio_dual_mono_mode_t(mode));
633 return binderStatusFromStatusT(AudioValidator::validateDualMonoMode(localMonoMode)
634 ?: mTrack->setDualMonoMode(localMonoMode));
635 }
636
getAudioDescriptionMixLevel(float * _aidl_return)637 Status TrackHandle::getAudioDescriptionMixLevel(float* _aidl_return)
638 {
639 float leveldB = -std::numeric_limits<float>::infinity();
640 const status_t status = mTrack->getAudioDescriptionMixLevel(&leveldB)
641 ?: AudioValidator::validateAudioDescriptionMixLevel(leveldB);
642 if (status == OK) *_aidl_return = leveldB;
643 return binderStatusFromStatusT(status);
644 }
645
setAudioDescriptionMixLevel(float leveldB)646 Status TrackHandle::setAudioDescriptionMixLevel(float leveldB)
647 {
648 return binderStatusFromStatusT(AudioValidator::validateAudioDescriptionMixLevel(leveldB)
649 ?: mTrack->setAudioDescriptionMixLevel(leveldB));
650 }
651
getPlaybackRateParameters(media::audio::common::AudioPlaybackRate * _aidl_return)652 Status TrackHandle::getPlaybackRateParameters(
653 media::audio::common::AudioPlaybackRate* _aidl_return)
654 {
655 audio_playback_rate_t localPlaybackRate{};
656 status_t status = mTrack->getPlaybackRateParameters(&localPlaybackRate)
657 ?: AudioValidator::validatePlaybackRate(localPlaybackRate);
658 if (status == NO_ERROR) {
659 *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(
660 legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(localPlaybackRate));
661 }
662 return binderStatusFromStatusT(status);
663 }
664
setPlaybackRateParameters(const media::audio::common::AudioPlaybackRate & playbackRate)665 Status TrackHandle::setPlaybackRateParameters(
666 const media::audio::common::AudioPlaybackRate& playbackRate)
667 {
668 const audio_playback_rate_t localPlaybackRate = VALUE_OR_RETURN_BINDER_STATUS(
669 aidl2legacy_AudioPlaybackRate_audio_playback_rate_t(playbackRate));
670 return binderStatusFromStatusT(AudioValidator::validatePlaybackRate(localPlaybackRate)
671 ?: mTrack->setPlaybackRateParameters(localPlaybackRate));
672 }
673
674 // ----------------------------------------------------------------------------
675 // AppOp for audio playback
676 // -------------------------------
677
678 // static
createIfNeeded(IAfThreadBase * thread,const AttributionSourceState & attributionSource,const audio_attributes_t & attr,int id,audio_stream_type_t streamType)679 sp<OpPlayAudioMonitor> OpPlayAudioMonitor::createIfNeeded(
680 IAfThreadBase* thread,
681 const AttributionSourceState& attributionSource, const audio_attributes_t& attr, int id,
682 audio_stream_type_t streamType)
683 {
684 Vector<String16> packages;
685 const uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
686 getPackagesForUid(uid, packages);
687 if (isServiceUid(uid)) {
688 if (packages.isEmpty()) {
689 ALOGW("OpPlayAudio: not muting track:%d usage:%d for service UID %d", id, attr.usage,
690 uid);
691 return nullptr;
692 }
693 }
694 // stream type has been filtered by audio policy to indicate whether it can be muted
695 if (streamType == AUDIO_STREAM_ENFORCED_AUDIBLE) {
696 ALOGD("OpPlayAudio: not muting track:%d usage:%d ENFORCED_AUDIBLE", id, attr.usage);
697 return nullptr;
698 }
699 if ((attr.flags & AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY)
700 == AUDIO_FLAG_BYPASS_INTERRUPTION_POLICY) {
701 ALOGD("OpPlayAudio: not muting track:%d flags %#x have FLAG_BYPASS_INTERRUPTION_POLICY",
702 id, attr.flags);
703 return nullptr;
704 }
705 return sp<OpPlayAudioMonitor>::make(thread, attributionSource, attr.usage, id, uid);
706 }
707
OpPlayAudioMonitor(IAfThreadBase * thread,const AttributionSourceState & attributionSource,audio_usage_t usage,int id,uid_t uid)708 OpPlayAudioMonitor::OpPlayAudioMonitor(IAfThreadBase* thread,
709 const AttributionSourceState& attributionSource,
710 audio_usage_t usage, int id, uid_t uid)
711 : mThread(wp<IAfThreadBase>::fromExisting(thread)),
712 mHasOpPlayAudio(true),
713 mUsage((int32_t)usage),
714 mId(id),
715 mUid(uid),
716 mPackageName(VALUE_OR_FATAL(aidl2legacy_string_view_String16(
717 attributionSource.packageName.value_or("")))) {}
718
~OpPlayAudioMonitor()719 OpPlayAudioMonitor::~OpPlayAudioMonitor()
720 {
721 if (mOpCallback != 0) {
722 mAppOpsManager.stopWatchingMode(mOpCallback);
723 }
724 mOpCallback.clear();
725 }
726
onFirstRef()727 void OpPlayAudioMonitor::onFirstRef()
728 {
729 // make sure not to broadcast the initial state since it is not needed and could
730 // cause a deadlock since this method can be called with the mThread->mLock held
731 checkPlayAudioForUsage(/*doBroadcast=*/false);
732 if (mPackageName.size()) {
733 mOpCallback = new PlayAudioOpCallback(this);
734 mAppOpsManager.startWatchingMode(AppOpsManager::OP_PLAY_AUDIO, mPackageName, mOpCallback);
735 } else {
736 ALOGW("Skipping OpPlayAudioMonitor due to null package name");
737 }
738 }
739
hasOpPlayAudio() const740 bool OpPlayAudioMonitor::hasOpPlayAudio() const {
741 return mHasOpPlayAudio.load();
742 }
743
744 // Note this method is never called (and never to be) for audio server / patch record track
745 // - not called from constructor due to check on UID,
746 // - not called from PlayAudioOpCallback because the callback is not installed in this case
checkPlayAudioForUsage(bool doBroadcast)747 void OpPlayAudioMonitor::checkPlayAudioForUsage(bool doBroadcast) {
748 const bool hasAppOps =
749 mPackageName.size() &&
750 mAppOpsManager.checkAudioOpNoThrow(AppOpsManager::OP_PLAY_AUDIO, mUsage, mUid,
751 mPackageName) == AppOpsManager::MODE_ALLOWED;
752
753 bool shouldChange = !hasAppOps; // check if we need to update.
754 if (mHasOpPlayAudio.compare_exchange_strong(shouldChange, hasAppOps)) {
755 ALOGI("OpPlayAudio: track:%d package:%s usage:%d %smuted", mId,
756 String8(mPackageName).c_str(), mUsage, hasAppOps ? "not " : "");
757 if (doBroadcast) {
758 auto thread = mThread.promote();
759 if (thread != nullptr && thread->type() == IAfThreadBase::OFFLOAD) {
760 // Wake up Thread if offloaded, otherwise it may be several seconds for update.
761 audio_utils::lock_guard _l(thread->mutex());
762 thread->broadcast_l();
763 }
764 }
765 }
766 }
767
PlayAudioOpCallback(const wp<OpPlayAudioMonitor> & monitor)768 OpPlayAudioMonitor::PlayAudioOpCallback::PlayAudioOpCallback(
769 const wp<OpPlayAudioMonitor>& monitor) : mMonitor(monitor)
770 { }
771
opChanged(int32_t op,const String16 & packageName)772 void OpPlayAudioMonitor::PlayAudioOpCallback::opChanged(int32_t op,
773 const String16& packageName) {
774 if (op != AppOpsManager::OP_PLAY_AUDIO) {
775 return;
776 }
777
778 ALOGI("%s OP_PLAY_AUDIO callback received for %s", __func__, String8(packageName).c_str());
779 sp<OpPlayAudioMonitor> monitor = mMonitor.promote();
780 if (monitor != NULL) {
781 monitor->checkPlayAudioForUsage(/*doBroadcast=*/true);
782 }
783 }
784
785 // static
getPackagesForUid(uid_t uid,Vector<String16> & packages)786 void OpPlayAudioMonitor::getPackagesForUid(
787 uid_t uid, Vector<String16>& packages)
788 {
789 PermissionController permissionController;
790 permissionController.getPackagesForUid(uid, packages);
791 }
792
793 // ----------------------------------------------------------------------------
794 #undef LOG_TAG
795 #define LOG_TAG "AF::Track"
796
797 /* static */
create(IAfPlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,const sp<IMemory> & sharedBuffer,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_output_flags_t flags,track_type type,audio_port_handle_t portId,size_t frameCountToBeReady,float speed,bool isSpatialized,bool isBitPerfect,float volume,bool muted)798 sp<IAfTrack> IAfTrack::create(
799 IAfPlaybackThread* thread,
800 const sp<Client>& client,
801 audio_stream_type_t streamType,
802 const audio_attributes_t& attr,
803 uint32_t sampleRate,
804 audio_format_t format,
805 audio_channel_mask_t channelMask,
806 size_t frameCount,
807 void *buffer,
808 size_t bufferSize,
809 const sp<IMemory>& sharedBuffer,
810 audio_session_t sessionId,
811 pid_t creatorPid,
812 const AttributionSourceState& attributionSource,
813 audio_output_flags_t flags,
814 track_type type,
815 audio_port_handle_t portId,
816 /** default behaviour is to start when there are as many frames
817 * ready as possible (aka. Buffer is full). */
818 size_t frameCountToBeReady,
819 float speed,
820 bool isSpatialized,
821 bool isBitPerfect,
822 float volume,
823 bool muted) {
824 return sp<Track>::make(thread,
825 client,
826 streamType,
827 attr,
828 sampleRate,
829 format,
830 channelMask,
831 frameCount,
832 buffer,
833 bufferSize,
834 sharedBuffer,
835 sessionId,
836 creatorPid,
837 attributionSource,
838 flags,
839 type,
840 portId,
841 frameCountToBeReady,
842 speed,
843 isSpatialized,
844 isBitPerfect,
845 volume,
846 muted);
847 }
848
849 // Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
Track(IAfPlaybackThread * thread,const sp<Client> & client,audio_stream_type_t streamType,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,const sp<IMemory> & sharedBuffer,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_output_flags_t flags,track_type type,audio_port_handle_t portId,size_t frameCountToBeReady,float speed,bool isSpatialized,bool isBitPerfect,float volume,bool muted)850 Track::Track(
851 IAfPlaybackThread* thread,
852 const sp<Client>& client,
853 audio_stream_type_t streamType,
854 const audio_attributes_t& attr,
855 uint32_t sampleRate,
856 audio_format_t format,
857 audio_channel_mask_t channelMask,
858 size_t frameCount,
859 void *buffer,
860 size_t bufferSize,
861 const sp<IMemory>& sharedBuffer,
862 audio_session_t sessionId,
863 pid_t creatorPid,
864 const AttributionSourceState& attributionSource,
865 audio_output_flags_t flags,
866 track_type type,
867 audio_port_handle_t portId,
868 size_t frameCountToBeReady,
869 float speed,
870 bool isSpatialized,
871 bool isBitPerfect,
872 float volume,
873 bool muted)
874 : TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
875 // TODO: Using unsecurePointer() has some associated security pitfalls
876 // (see declaration for details).
877 // Either document why it is safe in this case or address the
878 // issue (e.g. by copying).
879 (sharedBuffer != 0) ? sharedBuffer->unsecurePointer() : buffer,
880 (sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
881 sessionId, creatorPid,
882 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)), true /*isOut*/,
883 (type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
884 type,
885 portId,
886 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(portId)),
887 mFillingStatus(FS_INVALID),
888 // mRetryCount initialized later when needed
889 mSharedBuffer(sharedBuffer),
890 mStreamType(streamType),
891 mMainBuffer(thread->sinkBuffer()),
892 mAuxBuffer(NULL),
893 mAuxEffectId(0), mHasVolumeController(false),
894 mFrameMap(16 /* sink-frame-to-track-frame map memory */),
895 mVolumeHandler(new media::VolumeHandler(sampleRate)),
896 mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(thread, attributionSource, attr, id(),
897 streamType)),
898 // mSinkTimestamp
899 mFastIndex(-1),
900 mCachedVolume(1.0),
901 /* The track might not play immediately after being active, similarly as if its volume was 0.
902 * When the track starts playing, its volume will be computed. */
903 mFinalVolume(0.f),
904 mResumeToStopping(false),
905 mFlushHwPending(false),
906 mFlags(flags),
907 mSpeed(speed),
908 mIsSpatialized(isSpatialized),
909 mIsBitPerfect(isBitPerfect),
910 mVolume(volume)
911 {
912 // client == 0 implies sharedBuffer == 0
913 ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
914
915 ALOGV_IF(sharedBuffer != 0, "%s(%d): sharedBuffer: %p, size: %zu",
916 __func__, mId, sharedBuffer->unsecurePointer(), sharedBuffer->size());
917
918 if (mCblk == NULL) {
919 return;
920 }
921
922 uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
923 if (!thread->isTrackAllowed_l(channelMask, format, sessionId, uid)) {
924 ALOGE("%s(%d): no more tracks available", __func__, mId);
925 releaseCblk(); // this makes the track invalid.
926 return;
927 }
928
929 if (sharedBuffer == 0) {
930 mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
931 mFrameSize, !isExternalTrack(), sampleRate);
932 } else {
933 mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
934 mFrameSize, sampleRate);
935 }
936 mServerProxy = mAudioTrackServerProxy;
937 mServerProxy->setStartThresholdInFrames(frameCountToBeReady); // update the Cblk value
938
939 // only allocate a fast track index if we were able to allocate a normal track name
940 if (flags & AUDIO_OUTPUT_FLAG_FAST) {
941 // FIXME: Not calling framesReadyIsCalledByMultipleThreads() exposes a potential
942 // race with setSyncEvent(). However, if we call it, we cannot properly start
943 // static fast tracks (SoundPool) immediately after stopping.
944 //mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
945 ALOG_ASSERT(thread->fastTrackAvailMask_l() != 0);
946 const int i = __builtin_ctz(thread->fastTrackAvailMask_l());
947 ALOG_ASSERT(0 < i && i < (int)FastMixerState::sMaxFastTracks);
948 // FIXME This is too eager. We allocate a fast track index before the
949 // fast track becomes active. Since fast tracks are a scarce resource,
950 // this means we are potentially denying other more important fast tracks from
951 // being created. It would be better to allocate the index dynamically.
952 mFastIndex = i;
953 thread->fastTrackAvailMask_l() &= ~(1 << i);
954 }
955
956 populateUsageAndContentTypeFromStreamType();
957
958 mMutedFromPort = muted;
959
960 // Audio patch and call assistant volume are always max
961 if (mAttr.usage == AUDIO_USAGE_CALL_ASSISTANT
962 || mAttr.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
963 mVolume = 1.0f;
964 mMutedFromPort = false;
965 }
966
967 mServerLatencySupported = checkServerLatencySupported(format, flags);
968 #ifdef TEE_SINK
969 mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
970 + "_" + std::to_string(mId) + "_T");
971 #endif
972
973 if (thread->supportsHapticPlayback()) {
974 // If the track is attached to haptic playback thread, it is potentially to have
975 // HapticGenerator effect, which will generate haptic data, on the track. In that case,
976 // external vibration is always created for all tracks attached to haptic playback thread.
977 mAudioVibrationController = new AudioVibrationController(this);
978 std::string packageName = attributionSource.packageName.has_value() ?
979 attributionSource.packageName.value() : "";
980 mExternalVibration = new os::ExternalVibration(
981 mUid, packageName, mAttr, mAudioVibrationController);
982 }
983
984 // Once this item is logged by the server, the client can add properties.
985 const char * const traits = sharedBuffer == 0 ? "" : "static";
986 mTrackMetrics.logConstructor(creatorPid, uid, id(), traits, streamType);
987 }
988
989 // When attributes are undefined, derive default values from stream type.
990 // See AudioAttributes.java, usageForStreamType() and Builder.setInternalLegacyStreamType()
populateUsageAndContentTypeFromStreamType()991 void Track::populateUsageAndContentTypeFromStreamType() {
992 if (mAttr.usage == AUDIO_USAGE_UNKNOWN) {
993 switch (mStreamType) {
994 case AUDIO_STREAM_VOICE_CALL:
995 mAttr.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
996 mAttr.content_type = AUDIO_CONTENT_TYPE_SPEECH;
997 break;
998 case AUDIO_STREAM_SYSTEM:
999 mAttr.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
1000 mAttr.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1001 break;
1002 case AUDIO_STREAM_RING:
1003 mAttr.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
1004 mAttr.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1005 break;
1006 case AUDIO_STREAM_MUSIC:
1007 mAttr.usage = AUDIO_USAGE_MEDIA;
1008 mAttr.content_type = AUDIO_CONTENT_TYPE_MUSIC;
1009 break;
1010 case AUDIO_STREAM_ALARM:
1011 mAttr.usage = AUDIO_USAGE_ALARM;
1012 mAttr.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1013 break;
1014 case AUDIO_STREAM_NOTIFICATION:
1015 mAttr.usage = AUDIO_USAGE_NOTIFICATION;
1016 mAttr.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1017 break;
1018 case AUDIO_STREAM_DTMF:
1019 mAttr.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
1020 mAttr.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1021 break;
1022 case AUDIO_STREAM_ACCESSIBILITY:
1023 mAttr.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
1024 mAttr.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1025 break;
1026 case AUDIO_STREAM_ASSISTANT:
1027 mAttr.usage = AUDIO_USAGE_ASSISTANT;
1028 mAttr.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1029 break;
1030 case AUDIO_STREAM_REROUTING:
1031 case AUDIO_STREAM_PATCH:
1032 mAttr.usage = AUDIO_USAGE_VIRTUAL_SOURCE;
1033 // unknown content type
1034 break;
1035 case AUDIO_STREAM_CALL_ASSISTANT:
1036 mAttr.usage = AUDIO_USAGE_CALL_ASSISTANT;
1037 mAttr.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1038 break;
1039 default:
1040 break;
1041 }
1042 }
1043 }
1044
~Track()1045 Track::~Track()
1046 {
1047 ALOGV("%s(%d)", __func__, mId);
1048
1049 // The destructor would clear mSharedBuffer,
1050 // but it will not push the decremented reference count,
1051 // leaving the client's IMemory dangling indefinitely.
1052 // This prevents that leak.
1053 if (mSharedBuffer != 0) {
1054 mSharedBuffer.clear();
1055 }
1056 }
1057
initCheck() const1058 status_t Track::initCheck() const
1059 {
1060 status_t status = TrackBase::initCheck();
1061 if (status == NO_ERROR && mCblk == nullptr) {
1062 status = NO_MEMORY;
1063 }
1064 return status;
1065 }
1066
destroy()1067 void Track::destroy()
1068 {
1069 // NOTE: destroyTrack_l() can remove a strong reference to this Track
1070 // by removing it from mTracks vector, so there is a risk that this Tracks's
1071 // destructor is called. As the destructor needs to lock mLock,
1072 // we must acquire a strong reference on this Track before locking mLock
1073 // here so that the destructor is called only when exiting this function.
1074 // On the other hand, as long as Track::destroy() is only called by
1075 // TrackHandle destructor, the TrackHandle still holds a strong ref on
1076 // this Track with its member mTrack.
1077 sp<Track> keep(this);
1078 { // scope for mLock
1079 bool wasActive = false;
1080 const sp<IAfThreadBase> thread = mThread.promote();
1081 if (thread != 0) {
1082 audio_utils::unique_lock ul(thread->mutex());
1083 thread->waitWhileThreadBusy_l(ul);
1084
1085 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1086 wasActive = playbackThread->destroyTrack_l(this);
1087 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->destroy(); });
1088 }
1089 if (isExternalTrack() && !wasActive) {
1090 // If the track is not active, the TrackHandle is responsible for
1091 // releasing the port id, not the ThreadBase::threadLoop().
1092 // At this point, there is no concurrency issue as the track is going away.
1093 AudioSystem::releaseOutput(mPortId);
1094 }
1095 }
1096 }
1097
appendDumpHeader(String8 & result) const1098 void Track::appendDumpHeader(String8& result) const
1099 {
1100 const auto res = IAfTrack::getLogHeader();
1101 result.append(res.data(), res.size());
1102 }
1103
appendDump(String8 & result,bool active) const1104 void Track::appendDump(String8& result, bool active) const
1105 {
1106 char trackType;
1107 switch (mType) {
1108 case TYPE_DEFAULT:
1109 case TYPE_OUTPUT:
1110 if (isStatic()) {
1111 trackType = 'S'; // static
1112 } else {
1113 trackType = ' '; // normal
1114 }
1115 break;
1116 case TYPE_PATCH:
1117 trackType = 'P';
1118 break;
1119 default:
1120 trackType = '?';
1121 }
1122
1123 if (isFastTrack()) {
1124 result.appendFormat("F%d %c %6d", mFastIndex, trackType, mId);
1125 } else {
1126 result.appendFormat(" %c %6d", trackType, mId);
1127 }
1128
1129 char nowInUnderrun;
1130 switch (mObservedUnderruns.mBitFields.mMostRecent) {
1131 case UNDERRUN_FULL:
1132 nowInUnderrun = ' ';
1133 break;
1134 case UNDERRUN_PARTIAL:
1135 nowInUnderrun = '<';
1136 break;
1137 case UNDERRUN_EMPTY:
1138 nowInUnderrun = '*';
1139 break;
1140 default:
1141 nowInUnderrun = '?';
1142 break;
1143 }
1144
1145 char fillingStatus;
1146 switch (mFillingStatus) {
1147 case FS_INVALID:
1148 fillingStatus = 'I';
1149 break;
1150 case FS_FILLING:
1151 fillingStatus = 'f';
1152 break;
1153 case FS_FILLED:
1154 fillingStatus = 'F';
1155 break;
1156 case FS_ACTIVE:
1157 fillingStatus = 'A';
1158 break;
1159 default:
1160 fillingStatus = '?';
1161 break;
1162 }
1163
1164 // clip framesReadySafe to max representation in dump
1165 const size_t framesReadySafe =
1166 std::min(mAudioTrackServerProxy->framesReadySafe(), (size_t)99999999);
1167
1168 // obtain volumes
1169 const gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
1170 const std::pair<float /* volume */, bool /* active */> vsVolume =
1171 mVolumeHandler->getLastVolume();
1172
1173 // Our effective frame count is obtained by ServerProxy::getBufferSizeInFrames()
1174 // as it may be reduced by the application.
1175 const size_t bufferSizeInFrames = (size_t)mAudioTrackServerProxy->getBufferSizeInFrames();
1176 // Check whether the buffer size has been modified by the app.
1177 const char modifiedBufferChar = bufferSizeInFrames < mFrameCount
1178 ? 'r' /* buffer reduced */: bufferSizeInFrames > mFrameCount
1179 ? 'e' /* error */ : ' ' /* identical */;
1180
1181 result.appendFormat("%7s %7u/%7u %7u %7u %2s 0x%03X "
1182 "%08X %08X %6u "
1183 "%2u %3x %2x "
1184 "%5.2g %5.2g %5.2g %5.2g%c %11.2g %10s "
1185 "%08X %6zu%c %6zu %c %9u%c %7u %10s %12s",
1186 active ? "yes" : "no",
1187 mClient ? mClient->pid() : getpid() ,
1188 mClient ? mClient->uid() : getuid(),
1189 mSessionId,
1190 mPortId,
1191 getTrackStateAsCodedString(),
1192 mCblk->mFlags,
1193
1194 mFormat,
1195 mChannelMask,
1196 sampleRate(),
1197
1198 mStreamType,
1199 mAttr.usage,
1200 mAttr.content_type,
1201
1202 20.0 * log10(mFinalVolume),
1203 20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))),
1204 20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))),
1205 20.0 * log10(vsVolume.first), // VolumeShaper(s) total volume
1206 vsVolume.second ? 'A' : ' ', // if any VolumeShapers active
1207 20.0 * log10(mVolume),
1208 getPortMute() ? "true" : "false",
1209
1210 mCblk->mServer,
1211 bufferSizeInFrames,
1212 modifiedBufferChar,
1213 framesReadySafe,
1214 fillingStatus,
1215 mAudioTrackServerProxy->getUnderrunFrames(),
1216 nowInUnderrun,
1217 (unsigned)mAudioTrackServerProxy->framesFlushed() % 10000000,
1218 isBitPerfect() ? "true" : "false",
1219 getInternalMute() ? "true" : "false"
1220 );
1221
1222 if (isServerLatencySupported()) {
1223 double latencyMs;
1224 bool fromTrack;
1225 if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
1226 // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
1227 // or 'k' if estimated from kernel because track frames haven't been presented yet.
1228 result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
1229 } else {
1230 result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
1231 }
1232 }
1233 result.append("\n");
1234 }
1235
sampleRate() const1236 uint32_t Track::sampleRate() const {
1237 return mAudioTrackServerProxy->getSampleRate();
1238 }
1239
1240 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)1241 status_t Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
1242 {
1243 ServerProxy::Buffer buf;
1244 size_t desiredFrames = buffer->frameCount;
1245 buf.mFrameCount = desiredFrames;
1246 status_t status = mServerProxy->obtainBuffer(&buf);
1247 buffer->frameCount = buf.mFrameCount;
1248 buffer->raw = buf.mRaw;
1249 if (buf.mFrameCount == 0 && !isStopping() && !isStopped() && !isPaused() && !isOffloaded()) {
1250 ALOGV("%s(%d): underrun, framesReady(%zu) < framesDesired(%zd), state: %d",
1251 __func__, mId, buf.mFrameCount, desiredFrames, (int)mState);
1252 mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
1253 if (ATRACE_ENABLED()) [[unlikely]] {
1254 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
1255 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_UNDERRUN)
1256 .set(AUDIO_TRACE_OBJECT_KEY_FRAMECOUNT, desiredFrames)
1257 .toTrace().c_str());
1258 }
1259 } else {
1260 mAudioTrackServerProxy->tallyUnderrunFrames(0);
1261 }
1262 return status;
1263 }
1264
releaseBuffer(AudioBufferProvider::Buffer * buffer)1265 void Track::releaseBuffer(AudioBufferProvider::Buffer* buffer)
1266 {
1267 interceptBuffer(*buffer);
1268 TrackBase::releaseBuffer(buffer);
1269 }
1270
1271 // TODO: compensate for time shift between HW modules.
interceptBuffer(const AudioBufferProvider::Buffer & sourceBuffer)1272 void Track::interceptBuffer(
1273 const AudioBufferProvider::Buffer& sourceBuffer) {
1274 auto start = std::chrono::steady_clock::now();
1275 const size_t frameCount = sourceBuffer.frameCount;
1276 if (frameCount == 0) {
1277 return; // No audio to intercept.
1278 // Additionally PatchProxyBufferProvider::obtainBuffer (called by PathTrack::getNextBuffer)
1279 // does not allow 0 frame size request contrary to getNextBuffer
1280 }
1281 TeePatches teePatches;
1282 if (mTeePatchesRWLock.tryReadLock() == NO_ERROR) {
1283 // Cache a copy of tee patches in case it is updated while using.
1284 teePatches = mTeePatches;
1285 mTeePatchesRWLock.unlock();
1286 }
1287 for (auto& teePatch : teePatches) {
1288 IAfPatchRecord* patchRecord = teePatch.patchRecord.get();
1289 const size_t framesWritten = patchRecord->writeFrames(
1290 sourceBuffer.i8, frameCount, mFrameSize);
1291 const size_t framesLeft = frameCount - framesWritten;
1292 ALOGW_IF(framesLeft != 0, "%s(%d) PatchRecord %d can not provide big enough "
1293 "buffer %zu/%zu, dropping %zu frames", __func__, mId, patchRecord->id(),
1294 framesWritten, frameCount, framesLeft);
1295 }
1296 auto spent = ceil<std::chrono::microseconds>(std::chrono::steady_clock::now() - start);
1297 using namespace std::chrono_literals;
1298 // Average is ~20us per track, this should virtually never be logged (Logging takes >200us)
1299 ALOGD_IF(spent > 500us, "%s: took %lldus to intercept %zu tracks", __func__,
1300 spent.count(), teePatches.size());
1301 }
1302
1303 // ExtendedAudioBufferProvider interface
1304
1305 // framesReady() may return an approximation of the number of frames if called
1306 // from a different thread than the one calling Proxy->obtainBuffer() and
1307 // Proxy->releaseBuffer(). Also note there is no mutual exclusion in the
1308 // AudioTrackServerProxy so be especially careful calling with FastTracks.
framesReady() const1309 size_t Track::framesReady() const {
1310 if (mSharedBuffer != 0 && (isStopped() || isStopping())) {
1311 // Static tracks return zero frames immediately upon stopping (for FastTracks).
1312 // The remainder of the buffer is not drained.
1313 return 0;
1314 }
1315 return mAudioTrackServerProxy->framesReady();
1316 }
1317
framesReleased() const1318 int64_t Track::framesReleased() const
1319 {
1320 return mAudioTrackServerProxy->framesReleased();
1321 }
1322
onTimestamp(const ExtendedTimestamp & timestamp)1323 void Track::onTimestamp(const ExtendedTimestamp ×tamp)
1324 {
1325 // This call comes from a FastTrack and should be kept lockless.
1326 // The server side frames are already translated to client frames.
1327 mAudioTrackServerProxy->setTimestamp(timestamp);
1328
1329 // We do not set drained here, as FastTrack timestamp may not go to very last frame.
1330
1331 // Compute latency.
1332 // TODO: Consider whether the server latency may be passed in by FastMixer
1333 // as a constant for all active FastTracks.
1334 const double latencyMs = timestamp.getOutputServerLatencyMs(sampleRate());
1335 mServerLatencyFromTrack.store(true);
1336 mServerLatencyMs.store(latencyMs);
1337 }
1338
1339 // Don't call for fast tracks; the framesReady() could result in priority inversion
isReady() const1340 bool Track::isReady() const {
1341 if (mFillingStatus != FS_FILLING || isStopped() || isPausing()) {
1342 return true;
1343 }
1344
1345 if (isStopping()) {
1346 if (framesReady() > 0) {
1347 mFillingStatus = FS_FILLED;
1348 }
1349 return true;
1350 }
1351
1352 size_t bufferSizeInFrames = mServerProxy->getBufferSizeInFrames();
1353 // Note: mServerProxy->getStartThresholdInFrames() is clamped.
1354 const size_t startThresholdInFrames = mServerProxy->getStartThresholdInFrames();
1355 const size_t framesToBeReady = std::clamp( // clamp again to validate client values.
1356 std::min(startThresholdInFrames, bufferSizeInFrames), size_t(1), mFrameCount);
1357
1358 if (framesReady() >= framesToBeReady || (mCblk->mFlags & CBLK_FORCEREADY)) {
1359 ALOGV("%s(%d): consider track ready with %zu/%zu, target was %zu)",
1360 __func__, mId, framesReady(), bufferSizeInFrames, framesToBeReady);
1361 mFillingStatus = FS_FILLED;
1362 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
1363 return true;
1364 }
1365 return false;
1366 }
1367
start(AudioSystem::sync_event_t event __unused,audio_session_t triggerSession __unused)1368 status_t Track::start(AudioSystem::sync_event_t event __unused,
1369 audio_session_t triggerSession __unused)
1370 {
1371 if (ATRACE_ENABLED()) [[unlikely]] {
1372 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
1373 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_START)
1374 .toTrace().c_str());
1375 }
1376 status_t status = NO_ERROR;
1377 ALOGV("%s(%d): calling pid %d session %d",
1378 __func__, mId, IPCThreadState::self()->getCallingPid(), mSessionId);
1379
1380 const sp<IAfThreadBase> thread = mThread.promote();
1381 if (thread != 0) {
1382 if (isOffloaded()) {
1383 audio_utils::lock_guard _laf(thread->afThreadCallback()->mutex());
1384 const bool nonOffloadableGlobalEffectEnabled =
1385 thread->afThreadCallback()->isNonOffloadableGlobalEffectEnabled_l();
1386 audio_utils::lock_guard _lth(thread->mutex());
1387 sp<IAfEffectChain> ec = thread->getEffectChain_l(mSessionId);
1388 if (nonOffloadableGlobalEffectEnabled ||
1389 (ec != 0 && ec->isNonOffloadableEnabled())) {
1390 invalidate();
1391 return PERMISSION_DENIED;
1392 }
1393 }
1394 audio_utils::unique_lock ul(thread->mutex());
1395 thread->waitWhileThreadBusy_l(ul);
1396
1397 track_state state = mState;
1398 // here the track could be either new, or restarted
1399 // in both cases "unstop" the track
1400
1401 // initial state-stopping. next state-pausing.
1402 // What if resume is called ?
1403
1404 if (state == FLUSHED) {
1405 // avoid underrun glitches when starting after flush
1406 reset();
1407 }
1408
1409 // clear mPauseHwPending because of pause (and possibly flush) during underrun.
1410 mPauseHwPending = false;
1411 if (state == PAUSED || state == PAUSING) {
1412 if (mResumeToStopping) {
1413 // happened we need to resume to STOPPING_1
1414 mState = TrackBase::STOPPING_1;
1415 ALOGV("%s(%d): PAUSED => STOPPING_1 on thread %d",
1416 __func__, mId, (int)mThreadIoHandle);
1417 } else {
1418 mState = TrackBase::RESUMING;
1419 ALOGV("%s(%d): PAUSED => RESUMING on thread %d",
1420 __func__, mId, (int)mThreadIoHandle);
1421 }
1422 } else {
1423 mState = TrackBase::ACTIVE;
1424 ALOGV("%s(%d): ? => ACTIVE on thread %d",
1425 __func__, mId, (int)mThreadIoHandle);
1426 }
1427
1428 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1429
1430 // states to reset position info for pcm tracks
1431 if (audio_is_linear_pcm(mFormat)
1432 && (state == IDLE || state == STOPPED || state == FLUSHED
1433 || state == PAUSED)) {
1434 mFrameMap.reset();
1435
1436 if (!isFastTrack()) {
1437 // Start point of track -> sink frame map. If the HAL returns a
1438 // frame position smaller than the first written frame in
1439 // updateTrackFrameInfo, the timestamp can be interpolated
1440 // instead of using a larger value.
1441 mFrameMap.push(mAudioTrackServerProxy->framesReleased(),
1442 playbackThread->framesWritten());
1443 }
1444 }
1445 if (isFastTrack()) {
1446 // refresh fast track underruns on start because that field is never cleared
1447 // by the fast mixer; furthermore, the same track can be recycled, i.e. start
1448 // after stop.
1449 mObservedUnderruns = playbackThread->getFastTrackUnderruns(mFastIndex);
1450 }
1451 status = playbackThread->addTrack_l(this);
1452 if (status == INVALID_OPERATION || status == PERMISSION_DENIED || status == DEAD_OBJECT) {
1453 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1454 // restore previous state if start was rejected by policy manager
1455 if (status == PERMISSION_DENIED || status == DEAD_OBJECT) {
1456 mState = state;
1457 }
1458 }
1459
1460 // Audio timing metrics are computed a few mix cycles after starting.
1461 {
1462 mLogStartCountdown = LOG_START_COUNTDOWN;
1463 mLogStartTimeNs = systemTime();
1464 mLogStartFrames = mAudioTrackServerProxy->getTimestamp()
1465 .mPosition[ExtendedTimestamp::LOCATION_KERNEL];
1466 mLogLatencyMs = 0.;
1467 }
1468 mLogForceVolumeUpdate = true; // at least one volume logged for metrics when starting.
1469
1470 if (status == NO_ERROR || status == ALREADY_EXISTS) {
1471 // for streaming tracks, remove the buffer read stop limit.
1472 mAudioTrackServerProxy->start();
1473 }
1474
1475 // track was already in the active list, not a problem
1476 if (status == ALREADY_EXISTS) {
1477 status = NO_ERROR;
1478 } else {
1479 // Acknowledge any pending flush(), so that subsequent new data isn't discarded.
1480 // It is usually unsafe to access the server proxy from a binder thread.
1481 // But in this case we know the mixer thread (whether normal mixer or fast mixer)
1482 // isn't looking at this track yet: we still hold the normal mixer thread lock,
1483 // and for fast tracks the track is not yet in the fast mixer thread's active set.
1484 // For static tracks, this is used to acknowledge change in position or loop.
1485 ServerProxy::Buffer buffer;
1486 buffer.mFrameCount = 1;
1487 (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/);
1488 }
1489 if (status == NO_ERROR) {
1490 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->start(); });
1491 }
1492 } else {
1493 status = BAD_VALUE;
1494 }
1495 if (status == NO_ERROR) {
1496 // send format to AudioManager for playback activity monitoring
1497 const sp<IAudioManager> audioManager =
1498 thread->afThreadCallback()->getOrCreateAudioManager();
1499 if (audioManager && mPortId != AUDIO_PORT_HANDLE_NONE) {
1500 std::unique_ptr<os::PersistableBundle> bundle =
1501 std::make_unique<os::PersistableBundle>();
1502 bundle->putBoolean(String16(kExtraPlayerEventSpatializedKey),
1503 isSpatialized());
1504 bundle->putInt(String16(kExtraPlayerEventSampleRateKey), mSampleRate);
1505 bundle->putInt(String16(kExtraPlayerEventChannelMaskKey), mChannelMask);
1506 status_t result = audioManager->portEvent(mPortId,
1507 PLAYER_UPDATE_FORMAT, bundle);
1508 if (result != OK) {
1509 ALOGE("%s: unable to send playback format for port ID %d, status error %d",
1510 __func__, mPortId, result);
1511 }
1512 }
1513 }
1514 return status;
1515 }
1516
stop()1517 void Track::stop()
1518 {
1519 ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
1520 if (ATRACE_ENABLED()) [[unlikely]] {
1521 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
1522 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_STOP)
1523 .toTrace().c_str());
1524 }
1525 const sp<IAfThreadBase> thread = mThread.promote();
1526 if (thread != 0) {
1527 audio_utils::unique_lock ul(thread->mutex());
1528 thread->waitWhileThreadBusy_l(ul);
1529
1530 track_state state = mState;
1531 if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
1532 // If the track is not active (PAUSED and buffers full), flush buffers
1533 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1534 if (!playbackThread->isTrackActive(this)) {
1535 reset();
1536 mState = STOPPED;
1537 } else if (isPatchTrack() || (!isFastTrack() && !isOffloaded() && !isDirect())) {
1538 // for a PatchTrack (whatever fast ot not), do not drain but move directly
1539 // to STOPPED to avoid closing while active.
1540 mState = STOPPED;
1541 } else {
1542 // For fast tracks prepareTracks_l() will set state to STOPPING_2
1543 // presentation is complete
1544 // For an offloaded track this starts a drain and state will
1545 // move to STOPPING_2 when drain completes and then STOPPED
1546 mState = STOPPING_1;
1547 if (isOffloaded()) {
1548 mRetryCount = IAfPlaybackThread::kMaxTrackStopRetriesOffload;
1549 }
1550 }
1551 playbackThread->broadcast_l();
1552 ALOGV("%s(%d): not stopping/stopped => stopping/stopped on thread %d",
1553 __func__, mId, (int)mThreadIoHandle);
1554 }
1555 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->stop(); });
1556 }
1557 }
1558
pause()1559 void Track::pause()
1560 {
1561 ALOGV("%s(%d): calling pid %d", __func__, mId, IPCThreadState::self()->getCallingPid());
1562 if (ATRACE_ENABLED()) [[unlikely]] {
1563 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
1564 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_PAUSE)
1565 .toTrace().c_str());
1566 }
1567 const sp<IAfThreadBase> thread = mThread.promote();
1568 if (thread != 0) {
1569 audio_utils::unique_lock ul(thread->mutex());
1570 thread->waitWhileThreadBusy_l(ul);
1571
1572 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1573 switch (mState) {
1574 case STOPPING_1:
1575 case STOPPING_2:
1576 if (!isOffloaded()) {
1577 /* nothing to do if track is not offloaded */
1578 break;
1579 }
1580
1581 // Offloaded track was draining, we need to carry on draining when resumed
1582 mResumeToStopping = true;
1583 FALLTHROUGH_INTENDED;
1584 case ACTIVE:
1585 case RESUMING:
1586 mState = PAUSING;
1587 ALOGV("%s(%d): ACTIVE/RESUMING => PAUSING on thread %d",
1588 __func__, mId, (int)mThreadIoHandle);
1589 if (isOffloadedOrDirect()) {
1590 mPauseHwPending = true;
1591 }
1592 playbackThread->broadcast_l();
1593 break;
1594
1595 default:
1596 break;
1597 }
1598 // Pausing the TeePatch to avoid a glitch on underrun, at the cost of buffered audio loss.
1599 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->pause(); });
1600 }
1601 }
1602
flush()1603 void Track::flush()
1604 {
1605 ALOGV("%s(%d)", __func__, mId);
1606 if (ATRACE_ENABLED()) [[unlikely]] {
1607 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
1608 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_FLUSH)
1609 .toTrace().c_str());
1610 }
1611 const sp<IAfThreadBase> thread = mThread.promote();
1612 if (thread != 0) {
1613 audio_utils::unique_lock ul(thread->mutex());
1614 thread->waitWhileThreadBusy_l(ul);
1615
1616 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1617
1618 // Flush the ring buffer now if the track is not active in the PlaybackThread.
1619 // Otherwise the flush would not be done until the track is resumed.
1620 // Requires FastTrack removal be BLOCK_UNTIL_ACKED
1621 if (!playbackThread->isTrackActive(this)) {
1622 (void)mServerProxy->flushBufferIfNeeded();
1623 }
1624
1625 if (isOffloaded()) {
1626 // If offloaded we allow flush during any state except terminated
1627 // and keep the track active to avoid problems if user is seeking
1628 // rapidly and underlying hardware has a significant delay handling
1629 // a pause
1630 if (isTerminated()) {
1631 return;
1632 }
1633
1634 ALOGV("%s(%d): offload flush", __func__, mId);
1635 reset();
1636
1637 if (mState == STOPPING_1 || mState == STOPPING_2) {
1638 ALOGV("%s(%d): flushed in STOPPING_1 or 2 state, change state to ACTIVE",
1639 __func__, mId);
1640 mState = ACTIVE;
1641 }
1642
1643 mFlushHwPending = true;
1644 mResumeToStopping = false;
1645 } else {
1646 if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
1647 mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
1648 return;
1649 }
1650 // No point remaining in PAUSED state after a flush => go to
1651 // FLUSHED state
1652 mState = FLUSHED;
1653 // do not reset the track if it is still in the process of being stopped or paused.
1654 // this will be done by prepareTracks_l() when the track is stopped.
1655 // prepareTracks_l() will see mState == FLUSHED, then
1656 // remove from active track list, reset(), and trigger presentation complete
1657 if (isDirect()) {
1658 mFlushHwPending = true;
1659 }
1660 if (!playbackThread->isTrackActive(this)) {
1661 reset();
1662 }
1663 }
1664 // Prevent flush being lost if the track is flushed and then resumed
1665 // before mixer thread can run. This is important when offloading
1666 // because the hardware buffer could hold a large amount of audio
1667 playbackThread->broadcast_l();
1668 // Flush the Tee to avoid on resume playing old data and glitching on the transition to
1669 // new data
1670 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->flush(); });
1671 }
1672 }
1673
1674 // must be called with thread lock held
flushAck()1675 void Track::flushAck()
1676 {
1677 if (!isOffloaded() && !isDirect()) {
1678 return;
1679 }
1680
1681 // Clear the client ring buffer so that the app can prime the buffer while paused.
1682 // Otherwise it might not get cleared until playback is resumed and obtainBuffer() is called.
1683 mServerProxy->flushBufferIfNeeded();
1684
1685 mFlushHwPending = false;
1686 }
1687
pauseAck()1688 void Track::pauseAck()
1689 {
1690 mPauseHwPending = false;
1691 }
1692
reset()1693 void Track::reset()
1694 {
1695 // Do not reset twice to avoid discarding data written just after a flush and before
1696 // the audioflinger thread detects the track is stopped.
1697 if (!mResetDone) {
1698 // Force underrun condition to avoid false underrun callback until first data is
1699 // written to buffer
1700 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
1701 mFillingStatus = FS_FILLING;
1702 mResetDone = true;
1703 if (mState == FLUSHED) {
1704 mState = IDLE;
1705 }
1706 }
1707 }
1708
setParameters(const String8 & keyValuePairs)1709 status_t Track::setParameters(const String8& keyValuePairs)
1710 {
1711 const sp<IAfThreadBase> thread = mThread.promote();
1712 if (thread == 0) {
1713 ALOGE("%s(%d): thread is dead", __func__, mId);
1714 return FAILED_TRANSACTION;
1715 } else if (thread->type() == IAfThreadBase::DIRECT
1716 || thread->type() == IAfThreadBase::OFFLOAD) {
1717 return thread->setParameters(keyValuePairs);
1718 } else {
1719 return PERMISSION_DENIED;
1720 }
1721 }
1722
selectPresentation(int presentationId,int programId)1723 status_t Track::selectPresentation(int presentationId,
1724 int programId) {
1725 const sp<IAfThreadBase> thread = mThread.promote();
1726 if (thread == 0) {
1727 ALOGE("thread is dead");
1728 return FAILED_TRANSACTION;
1729 } else if (thread->type() == IAfThreadBase::DIRECT
1730 || thread->type() == IAfThreadBase::OFFLOAD) {
1731 auto directOutputThread = thread->asIAfDirectOutputThread().get();
1732 return directOutputThread->selectPresentation(presentationId, programId);
1733 }
1734 return INVALID_OPERATION;
1735 }
1736
setPortVolume(float volume)1737 void Track::setPortVolume(float volume) {
1738 mVolume = volume;
1739 if (mType != TYPE_PATCH) {
1740 // Do not recursively propagate a PatchTrack setPortVolume to
1741 // downstream PatchTracks.
1742 forEachTeePatchTrack_l([volume](const auto &patchTrack) {
1743 patchTrack->setPortVolume(volume);
1744 });
1745 }
1746 }
1747
setPortMute(bool muted)1748 void Track::setPortMute(bool muted) {
1749 if (mMutedFromPort == muted) {
1750 return;
1751 }
1752 mMutedFromPort = muted;
1753 if (mType != TYPE_PATCH) {
1754 // Do not recursively propagate a PatchTrack setPortVolume to
1755 // downstream PatchTracks.
1756 forEachTeePatchTrack_l([muted](const auto &patchTrack) {
1757 patchTrack->setPortMute(muted);
1758 });
1759 }
1760 }
1761
applyVolumeShaper(const sp<VolumeShaper::Configuration> & configuration,const sp<VolumeShaper::Operation> & operation)1762 VolumeShaper::Status Track::applyVolumeShaper(
1763 const sp<VolumeShaper::Configuration>& configuration,
1764 const sp<VolumeShaper::Operation>& operation)
1765 {
1766 VolumeShaper::Status status = mVolumeHandler->applyVolumeShaper(configuration, operation);
1767
1768 if (isOffloadedOrDirect()) {
1769 // Signal thread to fetch new volume.
1770 const sp<IAfThreadBase> thread = mThread.promote();
1771 if (thread != 0) {
1772 audio_utils::lock_guard _l(thread->mutex());
1773 thread->broadcast_l();
1774 }
1775 }
1776 return status;
1777 }
1778
getVolumeShaperState(int id) const1779 sp<VolumeShaper::State> Track::getVolumeShaperState(int id) const
1780 {
1781 // Note: We don't check if Thread exists.
1782
1783 // mVolumeHandler is thread safe.
1784 return mVolumeHandler->getVolumeShaperState(id);
1785 }
1786
setFinalVolume(float volumeLeft,float volumeRight)1787 void Track::setFinalVolume(float volumeLeft, float volumeRight)
1788 {
1789 mFinalVolumeLeft = volumeLeft;
1790 mFinalVolumeRight = volumeRight;
1791 const float volume = (volumeLeft + volumeRight) * 0.5f;
1792 if (mFinalVolume != volume) { // Compare to an epsilon if too many meaningless updates
1793 mFinalVolume = volume;
1794 setMetadataHasChanged();
1795 mLogForceVolumeUpdate = true;
1796 }
1797 if (mLogForceVolumeUpdate) {
1798 mLogForceVolumeUpdate = false;
1799 mTrackMetrics.logVolume(mFinalVolume);
1800 }
1801 }
1802
copyMetadataTo(MetadataInserter & backInserter) const1803 void Track::copyMetadataTo(MetadataInserter& backInserter) const
1804 {
1805 // Do not forward metadata for PatchTrack with unspecified stream type
1806 if (mStreamType == AUDIO_STREAM_PATCH) {
1807 return;
1808 }
1809
1810 playback_track_metadata_v7_t metadata;
1811 metadata.base = {
1812 .usage = mAttr.usage,
1813 .content_type = mAttr.content_type,
1814 .gain = mFinalVolume,
1815 };
1816
1817 metadata.channel_mask = mChannelMask;
1818 strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
1819 *backInserter++ = metadata;
1820 }
1821
updateTeePatches_l()1822 void Track::updateTeePatches_l() {
1823 if (mTeePatchesToUpdate.has_value()) {
1824 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->destroy(); });
1825 {
1826 RWLock::AutoWLock writeLock(mTeePatchesRWLock);
1827 mTeePatches = std::move(mTeePatchesToUpdate.value());
1828 }
1829 if (mState == TrackBase::ACTIVE || mState == TrackBase::RESUMING ||
1830 mState == TrackBase::STOPPING_1) {
1831 forEachTeePatchTrack_l([](const auto& patchTrack) { patchTrack->start(); });
1832 }
1833 mTeePatchesToUpdate.reset();
1834 }
1835 }
1836
setTeePatchesToUpdate_l(TeePatches teePatchesToUpdate)1837 void Track::setTeePatchesToUpdate_l(TeePatches teePatchesToUpdate) {
1838 ALOGW_IF(mTeePatchesToUpdate.has_value(),
1839 "%s, existing tee patches to update will be ignored", __func__);
1840 mTeePatchesToUpdate = std::move(teePatchesToUpdate);
1841 }
1842
1843 // must be called with player thread lock held
processMuteEvent_l(const sp<IAudioManager> & audioManager,mute_state_t muteState)1844 void Track::processMuteEvent_l(const sp<
1845 IAudioManager>& audioManager, mute_state_t muteState)
1846 {
1847 if (mMuteState == muteState) {
1848 // mute state did not change, do nothing
1849 return;
1850 }
1851
1852 status_t result = UNKNOWN_ERROR;
1853 if (audioManager && mPortId != AUDIO_PORT_HANDLE_NONE) {
1854 if (mMuteEventExtras == nullptr) {
1855 mMuteEventExtras = std::make_unique<os::PersistableBundle>();
1856 }
1857 mMuteEventExtras->putInt(String16(kExtraPlayerEventMuteKey), static_cast<int>(muteState));
1858
1859 result = audioManager->portEvent(mPortId, PLAYER_UPDATE_MUTED, mMuteEventExtras);
1860 }
1861
1862 if (result == OK) {
1863 ALOGI("%s(%d): processed mute state for port ID %d from %#x to %#x", __func__, id(),
1864 mPortId, static_cast<int>(mMuteState.load()), static_cast<int>(muteState));
1865 mMuteState = muteState;
1866 } else {
1867 ALOGW("%s(%d): cannot process mute state for port ID %d, status error %d", __func__, id(),
1868 mPortId, result);
1869 }
1870 }
1871
getTimestamp(AudioTimestamp & timestamp)1872 status_t Track::getTimestamp(AudioTimestamp& timestamp)
1873 {
1874 if (!isOffloaded() && !isDirect()) {
1875 return INVALID_OPERATION; // normal tracks handled through SSQ
1876 }
1877 const sp<IAfThreadBase> thread = mThread.promote();
1878 if (thread == 0) {
1879 return INVALID_OPERATION;
1880 }
1881
1882 audio_utils::lock_guard _l(thread->mutex());
1883 auto* const playbackThread = thread->asIAfPlaybackThread().get();
1884 return playbackThread->getTimestamp_l(timestamp);
1885 }
1886
attachAuxEffect(int EffectId)1887 status_t Track::attachAuxEffect(int EffectId)
1888 {
1889 const sp<IAfThreadBase> thread = mThread.promote();
1890 if (thread == nullptr) {
1891 return DEAD_OBJECT;
1892 }
1893
1894 auto dstThread = thread->asIAfPlaybackThread();
1895 // srcThread is initialized by call to moveAuxEffectToIo()
1896 sp<IAfPlaybackThread> srcThread;
1897 const auto& af = mClient->afClientCallback();
1898 status_t status = af->moveAuxEffectToIo(EffectId, dstThread, &srcThread);
1899
1900 if (EffectId != 0 && status == NO_ERROR) {
1901 status = dstThread->attachAuxEffect(this, EffectId);
1902 if (status == NO_ERROR) {
1903 AudioSystem::moveEffectsToIo(std::vector<int>(EffectId), dstThread->id());
1904 }
1905 }
1906
1907 if (status != NO_ERROR && srcThread != nullptr) {
1908 af->moveAuxEffectToIo(EffectId, srcThread, &dstThread);
1909 }
1910 return status;
1911 }
1912
setAuxBuffer(int EffectId,int32_t * buffer)1913 void Track::setAuxBuffer(int EffectId, int32_t *buffer)
1914 {
1915 mAuxEffectId = EffectId;
1916 mAuxBuffer = buffer;
1917 }
1918
1919 // presentationComplete verified by frames, used by Mixed tracks.
presentationComplete(int64_t framesWritten,size_t audioHalFrames)1920 bool Track::presentationComplete(
1921 int64_t framesWritten, size_t audioHalFrames)
1922 {
1923 // TODO: improve this based on FrameMap if it exists, to ensure full drain.
1924 // This assists in proper timestamp computation as well as wakelock management.
1925
1926 // a track is considered presented when the total number of frames written to audio HAL
1927 // corresponds to the number of frames written when presentationComplete() is called for the
1928 // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
1929 // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1930 // to detect when all frames have been played. In this case framesWritten isn't
1931 // useful because it doesn't always reflect whether there is data in the h/w
1932 // buffers, particularly if a track has been paused and resumed during draining
1933 ALOGV("%s(%d): presentationComplete() mPresentationCompleteFrames %lld framesWritten %lld",
1934 __func__, mId,
1935 (long long)mPresentationCompleteFrames, (long long)framesWritten);
1936 if (mPresentationCompleteFrames == 0) {
1937 mPresentationCompleteFrames = framesWritten + audioHalFrames;
1938 ALOGV("%s(%d): set:"
1939 " mPresentationCompleteFrames %lld audioHalFrames %zu",
1940 __func__, mId,
1941 (long long)mPresentationCompleteFrames, audioHalFrames);
1942 }
1943
1944 bool complete;
1945 if (isFastTrack()) { // does not go through linear map
1946 complete = framesWritten >= (int64_t) mPresentationCompleteFrames;
1947 ALOGV("%s(%d): %s framesWritten:%lld mPresentationCompleteFrames:%lld",
1948 __func__, mId, (complete ? "complete" : "waiting"),
1949 (long long) framesWritten, (long long) mPresentationCompleteFrames);
1950 } else { // Normal tracks, OutputTracks, and PatchTracks
1951 complete = framesWritten >= (int64_t) mPresentationCompleteFrames
1952 && mAudioTrackServerProxy->isDrained();
1953 }
1954
1955 if (complete) {
1956 notifyPresentationComplete();
1957 return true;
1958 }
1959 return false;
1960 }
1961
1962 // presentationComplete checked by time, used by DirectTracks.
presentationComplete(uint32_t latencyMs)1963 bool Track::presentationComplete(uint32_t latencyMs)
1964 {
1965 // For Offloaded or Direct tracks.
1966
1967 // For a direct track, we incorporated time based testing for presentationComplete.
1968
1969 // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
1970 // to detect when all frames have been played. In this case latencyMs isn't
1971 // useful because it doesn't always reflect whether there is data in the h/w
1972 // buffers, particularly if a track has been paused and resumed during draining
1973
1974 constexpr float MIN_SPEED = 0.125f; // min speed scaling allowed for timely response.
1975 if (mPresentationCompleteTimeNs == 0) {
1976 mPresentationCompleteTimeNs = systemTime() + latencyMs * 1e6 / fmax(mSpeed, MIN_SPEED);
1977 ALOGV("%s(%d): set: latencyMs %u mPresentationCompleteTimeNs:%lld",
1978 __func__, mId, latencyMs, (long long) mPresentationCompleteTimeNs);
1979 }
1980
1981 bool complete;
1982 if (isOffloaded()) {
1983 complete = true;
1984 } else { // Direct
1985 complete = systemTime() >= mPresentationCompleteTimeNs;
1986 ALOGV("%s(%d): %s", __func__, mId, (complete ? "complete" : "waiting"));
1987 }
1988 if (complete) {
1989 notifyPresentationComplete();
1990 return true;
1991 }
1992 return false;
1993 }
1994
notifyPresentationComplete()1995 void Track::notifyPresentationComplete()
1996 {
1997 // This only triggers once. TODO: should we enforce this?
1998 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
1999 mAudioTrackServerProxy->setStreamEndDone();
2000 }
2001
triggerEvents(AudioSystem::sync_event_t type)2002 void Track::triggerEvents(AudioSystem::sync_event_t type)
2003 {
2004 for (auto it = mSyncEvents.begin(); it != mSyncEvents.end();) {
2005 if ((*it)->type() == type) {
2006 ALOGV("%s: triggering SyncEvent type %d", __func__, type);
2007 (*it)->trigger();
2008 it = mSyncEvents.erase(it);
2009 } else {
2010 ++it;
2011 }
2012 }
2013 }
2014
2015 // implement VolumeBufferProvider interface
2016
getVolumeLR() const2017 gain_minifloat_packed_t Track::getVolumeLR() const
2018 {
2019 // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
2020 ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
2021 gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR();
2022 float vl = float_from_gain(gain_minifloat_unpack_left(vlr));
2023 float vr = float_from_gain(gain_minifloat_unpack_right(vlr));
2024 // track volumes come from shared memory, so can't be trusted and must be clamped
2025 if (vl > GAIN_FLOAT_UNITY) {
2026 vl = GAIN_FLOAT_UNITY;
2027 }
2028 if (vr > GAIN_FLOAT_UNITY) {
2029 vr = GAIN_FLOAT_UNITY;
2030 }
2031 // now apply the cached master volume and stream type volume;
2032 // this is trusted but lacks any synchronization or barrier so may be stale
2033 float v = mCachedVolume;
2034 vl *= v;
2035 vr *= v;
2036 // re-combine into packed minifloat
2037 vlr = gain_minifloat_pack(gain_from_float(vl), gain_from_float(vr));
2038 // FIXME look at mute, pause, and stop flags
2039 return vlr;
2040 }
2041
setSyncEvent(const sp<audioflinger::SyncEvent> & event)2042 status_t Track::setSyncEvent(
2043 const sp<audioflinger::SyncEvent>& event)
2044 {
2045 if (isTerminated() || mState == PAUSED ||
2046 ((framesReady() == 0) && ((mSharedBuffer != 0) ||
2047 (mState == STOPPED)))) {
2048 ALOGW("%s(%d): in invalid state %d on session %d %s mode, framesReady %zu",
2049 __func__, mId,
2050 (int)mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
2051 event->cancel();
2052 return INVALID_OPERATION;
2053 }
2054 (void) TrackBase::setSyncEvent(event);
2055 return NO_ERROR;
2056 }
2057
invalidate()2058 void Track::invalidate()
2059 {
2060 TrackBase::invalidate();
2061 signalClientFlag(CBLK_INVALID);
2062 }
2063
disable()2064 void Track::disable()
2065 {
2066 // TODO(b/142394888): the filling status should also be reset to filling
2067 signalClientFlag(CBLK_DISABLED);
2068 }
2069
isDisabled() const2070 bool Track::isDisabled() const {
2071 audio_track_cblk_t* cblk = mCblk;
2072 return (cblk != nullptr)
2073 && ((android_atomic_release_load(&cblk->mFlags) & CBLK_DISABLED) != 0);
2074 }
2075
signalClientFlag(int32_t flag)2076 void Track::signalClientFlag(int32_t flag)
2077 {
2078 // FIXME should use proxy, and needs work
2079 audio_track_cblk_t* cblk = mCblk;
2080 android_atomic_or(flag, &cblk->mFlags);
2081 android_atomic_release_store(0x40000000, &cblk->mFutex);
2082 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
2083 (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
2084 }
2085
signal()2086 void Track::signal()
2087 {
2088 const sp<IAfThreadBase> thread = mThread.promote();
2089 if (thread != 0) {
2090 auto* const t = thread->asIAfPlaybackThread().get();
2091 audio_utils::lock_guard _l(t->mutex());
2092 t->broadcast_l();
2093 }
2094 }
2095
getDualMonoMode(audio_dual_mono_mode_t * mode) const2096 status_t Track::getDualMonoMode(audio_dual_mono_mode_t* mode) const
2097 {
2098 status_t status = INVALID_OPERATION;
2099 if (isOffloadedOrDirect()) {
2100 const sp<IAfThreadBase> thread = mThread.promote();
2101 if (thread != nullptr) {
2102 auto* const t = thread->asIAfPlaybackThread().get();
2103 audio_utils::lock_guard _l(t->mutex());
2104 status = t->getOutput_l()->stream->getDualMonoMode(mode);
2105 ALOGD_IF((status == NO_ERROR) && (mDualMonoMode != *mode),
2106 "%s: mode %d inconsistent", __func__, mDualMonoMode);
2107 }
2108 }
2109 return status;
2110 }
2111
setDualMonoMode(audio_dual_mono_mode_t mode)2112 status_t Track::setDualMonoMode(audio_dual_mono_mode_t mode)
2113 {
2114 status_t status = INVALID_OPERATION;
2115 if (isOffloadedOrDirect()) {
2116 const sp<IAfThreadBase> thread = mThread.promote();
2117 if (thread != nullptr) {
2118 auto* const t = thread->asIAfPlaybackThread().get();
2119 audio_utils::lock_guard lock(t->mutex());
2120 status = t->getOutput_l()->stream->setDualMonoMode(mode);
2121 if (status == NO_ERROR) {
2122 mDualMonoMode = mode;
2123 }
2124 }
2125 }
2126 return status;
2127 }
2128
getAudioDescriptionMixLevel(float * leveldB) const2129 status_t Track::getAudioDescriptionMixLevel(float* leveldB) const
2130 {
2131 status_t status = INVALID_OPERATION;
2132 if (isOffloadedOrDirect()) {
2133 sp<IAfThreadBase> thread = mThread.promote();
2134 if (thread != nullptr) {
2135 auto* const t = thread->asIAfPlaybackThread().get();
2136 audio_utils::lock_guard lock(t->mutex());
2137 status = t->getOutput_l()->stream->getAudioDescriptionMixLevel(leveldB);
2138 ALOGD_IF((status == NO_ERROR) && (mAudioDescriptionMixLevel != *leveldB),
2139 "%s: level %.3f inconsistent", __func__, mAudioDescriptionMixLevel);
2140 }
2141 }
2142 return status;
2143 }
2144
setAudioDescriptionMixLevel(float leveldB)2145 status_t Track::setAudioDescriptionMixLevel(float leveldB)
2146 {
2147 status_t status = INVALID_OPERATION;
2148 if (isOffloadedOrDirect()) {
2149 const sp<IAfThreadBase> thread = mThread.promote();
2150 if (thread != nullptr) {
2151 auto* const t = thread->asIAfPlaybackThread().get();
2152 audio_utils::lock_guard lock(t->mutex());
2153 status = t->getOutput_l()->stream->setAudioDescriptionMixLevel(leveldB);
2154 if (status == NO_ERROR) {
2155 mAudioDescriptionMixLevel = leveldB;
2156 }
2157 }
2158 }
2159 return status;
2160 }
2161
getPlaybackRateParameters(audio_playback_rate_t * playbackRate) const2162 status_t Track::getPlaybackRateParameters(
2163 audio_playback_rate_t* playbackRate) const
2164 {
2165 status_t status = INVALID_OPERATION;
2166 if (isOffloadedOrDirect()) {
2167 const sp<IAfThreadBase> thread = mThread.promote();
2168 if (thread != nullptr) {
2169 auto* const t = thread->asIAfPlaybackThread().get();
2170 audio_utils::lock_guard lock(t->mutex());
2171 status = t->getOutput_l()->stream->getPlaybackRateParameters(playbackRate);
2172 ALOGD_IF((status == NO_ERROR) &&
2173 !isAudioPlaybackRateEqual(mPlaybackRateParameters, *playbackRate),
2174 "%s: playbackRate inconsistent", __func__);
2175 }
2176 }
2177 return status;
2178 }
2179
setPlaybackRateParameters(const audio_playback_rate_t & playbackRate)2180 status_t Track::setPlaybackRateParameters(
2181 const audio_playback_rate_t& playbackRate)
2182 {
2183 status_t status = INVALID_OPERATION;
2184 if (isOffloadedOrDirect()) {
2185 const sp<IAfThreadBase> thread = mThread.promote();
2186 if (thread != nullptr) {
2187 auto* const t = thread->asIAfPlaybackThread().get();
2188 audio_utils::lock_guard lock(t->mutex());
2189 status = t->getOutput_l()->stream->setPlaybackRateParameters(playbackRate);
2190 if (status == NO_ERROR) {
2191 mPlaybackRateParameters = playbackRate;
2192 }
2193 }
2194 }
2195 return status;
2196 }
2197
2198 //To be called with thread lock held
isResumePending() const2199 bool Track::isResumePending() const {
2200 if (mState == RESUMING) {
2201 return true;
2202 }
2203 /* Resume is pending if track was stopping before pause was called */
2204 if (mState == STOPPING_1 &&
2205 mResumeToStopping) {
2206 return true;
2207 }
2208
2209 return false;
2210 }
2211
2212 //To be called with thread lock held
resumeAck()2213 void Track::resumeAck() {
2214 if (mState == RESUMING) {
2215 mState = ACTIVE;
2216 }
2217
2218 // Other possibility of pending resume is stopping_1 state
2219 // Do not update the state from stopping as this prevents
2220 // drain being called.
2221 if (mState == STOPPING_1) {
2222 mResumeToStopping = false;
2223 }
2224 }
2225
2226 //To be called with thread lock held
updateTrackFrameInfo(int64_t trackFramesReleased,int64_t sinkFramesWritten,uint32_t halSampleRate,const ExtendedTimestamp & timeStamp)2227 void Track::updateTrackFrameInfo(
2228 int64_t trackFramesReleased, int64_t sinkFramesWritten,
2229 uint32_t halSampleRate, const ExtendedTimestamp &timeStamp) {
2230 // Make the kernel frametime available.
2231 const FrameTime ft{
2232 timeStamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
2233 timeStamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
2234 // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
2235 mKernelFrameTime.store(ft);
2236 if (!audio_is_linear_pcm(mFormat)) {
2237 return;
2238 }
2239
2240 //update frame map
2241 mFrameMap.push(trackFramesReleased, sinkFramesWritten);
2242
2243 // adjust server times and set drained state.
2244 //
2245 // Our timestamps are only updated when the track is on the Thread active list.
2246 // We need to ensure that tracks are not removed before full drain.
2247 ExtendedTimestamp local = timeStamp;
2248 bool drained = true; // default assume drained, if no server info found
2249 bool checked = false;
2250 for (int i = ExtendedTimestamp::LOCATION_MAX - 1;
2251 i >= ExtendedTimestamp::LOCATION_SERVER; --i) {
2252 // Lookup the track frame corresponding to the sink frame position.
2253 if (local.mTimeNs[i] > 0) {
2254 local.mPosition[i] = mFrameMap.findX(local.mPosition[i]);
2255 // check drain state from the latest stage in the pipeline.
2256 if (!checked && i <= ExtendedTimestamp::LOCATION_KERNEL) {
2257 drained = local.mPosition[i] >= mAudioTrackServerProxy->framesReleased();
2258 checked = true;
2259 }
2260 }
2261 }
2262
2263 ALOGV("%s: trackFramesReleased:%lld sinkFramesWritten:%lld setDrained: %d",
2264 __func__, (long long)trackFramesReleased, (long long)sinkFramesWritten, drained);
2265 mAudioTrackServerProxy->setDrained(drained);
2266 // Set correction for flushed frames that are not accounted for in released.
2267 local.mFlushed = mAudioTrackServerProxy->framesFlushed();
2268 mServerProxy->setTimestamp(local);
2269
2270 // Compute latency info.
2271 const bool useTrackTimestamp = !drained;
2272 const double latencyMs = useTrackTimestamp
2273 ? local.getOutputServerLatencyMs(sampleRate())
2274 : timeStamp.getOutputServerLatencyMs(halSampleRate);
2275
2276 mServerLatencyFromTrack.store(useTrackTimestamp);
2277 mServerLatencyMs.store(latencyMs);
2278
2279 if (mLogStartCountdown > 0
2280 && local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] > 0
2281 && local.mPosition[ExtendedTimestamp::LOCATION_KERNEL] > 0)
2282 {
2283 if (mLogStartCountdown > 1) {
2284 --mLogStartCountdown;
2285 } else if (latencyMs < mLogLatencyMs) { // wait for latency to stabilize (dip)
2286 mLogStartCountdown = 0;
2287 // startup is the difference in times for the current timestamp and our start
2288 double startUpMs =
2289 (local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL] - mLogStartTimeNs) * 1e-6;
2290 // adjust for frames played.
2291 startUpMs -= (local.mPosition[ExtendedTimestamp::LOCATION_KERNEL] - mLogStartFrames)
2292 * 1e3 / mSampleRate;
2293 ALOGV("%s: latencyMs:%lf startUpMs:%lf"
2294 " localTime:%lld startTime:%lld"
2295 " localPosition:%lld startPosition:%lld",
2296 __func__, latencyMs, startUpMs,
2297 (long long)local.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL],
2298 (long long)mLogStartTimeNs,
2299 (long long)local.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
2300 (long long)mLogStartFrames);
2301 mTrackMetrics.logLatencyAndStartup(latencyMs, startUpMs);
2302 }
2303 mLogLatencyMs = latencyMs;
2304 }
2305 }
2306
setMute(bool muted)2307 bool Track::AudioVibrationController::setMute(bool muted) {
2308 const sp<IAfThreadBase> thread = mTrack->mThread.promote();
2309 if (thread != 0) {
2310 // Lock for updating mHapticPlaybackEnabled.
2311 audio_utils::lock_guard _l(thread->mutex());
2312 auto* const playbackThread = thread->asIAfPlaybackThread().get();
2313 if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
2314 && playbackThread->hapticChannelCount() > 0) {
2315 ALOGD("%s, haptic playback was %s for track %d",
2316 __func__, muted ? "muted" : "unmuted", mTrack->id());
2317 mTrack->setHapticPlaybackEnabled(!muted);
2318 return true;
2319 }
2320 }
2321 return false;
2322 }
2323
mute(bool * ret)2324 binder::Status Track::AudioVibrationController::mute(
2325 /*out*/ bool *ret) {
2326 *ret = setMute(true);
2327 return binder::Status::ok();
2328 }
2329
unmute(bool * ret)2330 binder::Status Track::AudioVibrationController::unmute(
2331 /*out*/ bool *ret) {
2332 *ret = setMute(false);
2333 return binder::Status::ok();
2334 }
2335
2336 // ----------------------------------------------------------------------------
2337 #undef LOG_TAG
2338 #define LOG_TAG "AF::OutputTrack"
2339
2340 /* static */
create(IAfPlaybackThread * playbackThread,IAfDuplicatingThread * sourceThread,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const AttributionSourceState & attributionSource)2341 sp<IAfOutputTrack> IAfOutputTrack::create(
2342 IAfPlaybackThread* playbackThread,
2343 IAfDuplicatingThread* sourceThread,
2344 uint32_t sampleRate,
2345 audio_format_t format,
2346 audio_channel_mask_t channelMask,
2347 size_t frameCount,
2348 const AttributionSourceState& attributionSource) {
2349 return sp<OutputTrack>::make(
2350 playbackThread,
2351 sourceThread,
2352 sampleRate,
2353 format,
2354 channelMask,
2355 frameCount,
2356 attributionSource);
2357 }
2358
OutputTrack(IAfPlaybackThread * playbackThread,IAfDuplicatingThread * sourceThread,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,const AttributionSourceState & attributionSource)2359 OutputTrack::OutputTrack(
2360 IAfPlaybackThread* playbackThread,
2361 IAfDuplicatingThread* sourceThread,
2362 uint32_t sampleRate,
2363 audio_format_t format,
2364 audio_channel_mask_t channelMask,
2365 size_t frameCount,
2366 const AttributionSourceState& attributionSource)
2367 : Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
2368 AUDIO_ATTRIBUTES_INITIALIZER ,
2369 sampleRate, format, channelMask, frameCount,
2370 nullptr /* buffer */, (size_t)0 /* bufferSize */, nullptr /* sharedBuffer */,
2371 AUDIO_SESSION_NONE, getpid(), attributionSource, AUDIO_OUTPUT_FLAG_NONE,
2372 TYPE_OUTPUT),
2373 mActive(false), mSourceThread(sourceThread)
2374 {
2375 if (mCblk != NULL) {
2376 mOutBuffer.frameCount = 0;
2377 playbackThread->addOutputTrack_l(this);
2378 ALOGV("%s(): mCblk %p, mBuffer %p, "
2379 "frameCount %zu, mChannelMask 0x%08x",
2380 __func__, mCblk, mBuffer,
2381 frameCount, mChannelMask);
2382 // since client and server are in the same process,
2383 // the buffer has the same virtual address on both sides
2384 mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
2385 true /*clientInServer*/);
2386 mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
2387 mClientProxy->setSendLevel(0.0);
2388 mClientProxy->setSampleRate(sampleRate);
2389 } else {
2390 ALOGW("%s(%d): Error creating output track on thread %d",
2391 __func__, mId, (int)mThreadIoHandle);
2392 }
2393 }
2394
~OutputTrack()2395 OutputTrack::~OutputTrack()
2396 {
2397 clearBufferQueue();
2398 // superclass destructor will now delete the server proxy and shared memory both refer to
2399 }
2400
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2401 status_t OutputTrack::start(AudioSystem::sync_event_t event,
2402 audio_session_t triggerSession)
2403 {
2404 status_t status = Track::start(event, triggerSession);
2405 if (status != NO_ERROR) {
2406 return status;
2407 }
2408
2409 mActive = true;
2410 mRetryCount = 127;
2411 return status;
2412 }
2413
stop()2414 void OutputTrack::stop()
2415 {
2416 Track::stop();
2417 clearBufferQueue();
2418 mOutBuffer.frameCount = 0;
2419 mActive = false;
2420 }
2421
write(void * data,uint32_t frames)2422 ssize_t OutputTrack::write(void* data, uint32_t frames)
2423 {
2424 if (!mActive && frames != 0) {
2425 const sp<IAfThreadBase> thread = mThread.promote();
2426 if (thread != nullptr && thread->inStandby()) {
2427 // preload one silent buffer to trigger mixer on start()
2428 ClientProxy::Buffer buf { .mFrameCount = mClientProxy->getStartThresholdInFrames() };
2429 status_t status = mClientProxy->obtainBuffer(&buf);
2430 if (status != NO_ERROR && status != NOT_ENOUGH_DATA && status != WOULD_BLOCK) {
2431 ALOGE("%s(%d): could not obtain buffer on start", __func__, mId);
2432 return 0;
2433 }
2434 memset(buf.mRaw, 0, buf.mFrameCount * mFrameSize);
2435 mClientProxy->releaseBuffer(&buf);
2436
2437 (void) start();
2438
2439 // wait for HAL stream to start before sending actual audio. Doing this on each
2440 // OutputTrack makes that playback start on all output streams is synchronized.
2441 // If another OutputTrack has already started it can underrun but this is OK
2442 // as only silence has been played so far and the retry count is very high on
2443 // OutputTrack.
2444 auto* const pt = thread->asIAfPlaybackThread().get();
2445 if (!pt->waitForHalStart()) {
2446 ALOGW("%s(%d): timeout waiting for thread to exit standby", __func__, mId);
2447 stop();
2448 return 0;
2449 }
2450
2451 // enqueue the first buffer and exit so that other OutputTracks will also start before
2452 // write() is called again and this buffer actually consumed.
2453 Buffer firstBuffer;
2454 firstBuffer.frameCount = frames;
2455 firstBuffer.raw = data;
2456 queueBuffer(firstBuffer);
2457 return frames;
2458 } else {
2459 (void) start();
2460 }
2461 }
2462
2463 Buffer *pInBuffer;
2464 Buffer inBuffer;
2465 inBuffer.frameCount = frames;
2466 inBuffer.raw = data;
2467 uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
2468 while (waitTimeLeftMs) {
2469 // First write pending buffers, then new data
2470 if (mBufferQueue.size()) {
2471 pInBuffer = mBufferQueue.itemAt(0);
2472 } else {
2473 pInBuffer = &inBuffer;
2474 }
2475
2476 if (pInBuffer->frameCount == 0) {
2477 break;
2478 }
2479
2480 if (mOutBuffer.frameCount == 0) {
2481 mOutBuffer.frameCount = pInBuffer->frameCount;
2482 nsecs_t startTime = systemTime();
2483 status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
2484 if (status != NO_ERROR && status != NOT_ENOUGH_DATA) {
2485 ALOGV("%s(%d): thread %d no more output buffers; status %d",
2486 __func__, mId,
2487 (int)mThreadIoHandle, status);
2488 break;
2489 }
2490 uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
2491 if (waitTimeLeftMs >= waitTimeMs) {
2492 waitTimeLeftMs -= waitTimeMs;
2493 } else {
2494 waitTimeLeftMs = 0;
2495 }
2496 if (status == NOT_ENOUGH_DATA) {
2497 deferRestartIfDisabled();
2498 continue;
2499 }
2500 }
2501
2502 uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
2503 pInBuffer->frameCount;
2504 memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * mFrameSize);
2505 Proxy::Buffer buf;
2506 buf.mFrameCount = outFrames;
2507 buf.mRaw = NULL;
2508 mClientProxy->releaseBuffer(&buf);
2509 deferRestartIfDisabled();
2510 pInBuffer->frameCount -= outFrames;
2511 pInBuffer->raw = (int8_t *)pInBuffer->raw + outFrames * mFrameSize;
2512 mOutBuffer.frameCount -= outFrames;
2513 mOutBuffer.raw = (int8_t *)mOutBuffer.raw + outFrames * mFrameSize;
2514
2515 if (pInBuffer->frameCount == 0) {
2516 if (mBufferQueue.size()) {
2517 mBufferQueue.removeAt(0);
2518 free(pInBuffer->mBuffer);
2519 if (pInBuffer != &inBuffer) {
2520 delete pInBuffer;
2521 }
2522 ALOGV("%s(%d): thread %d released overflow buffer %zu",
2523 __func__, mId,
2524 (int)mThreadIoHandle, mBufferQueue.size());
2525 } else {
2526 break;
2527 }
2528 }
2529 }
2530
2531 // If we could not write all frames, allocate a buffer and queue it for next time.
2532 if (inBuffer.frameCount) {
2533 const sp<IAfThreadBase> thread = mThread.promote();
2534 if (thread != nullptr && !thread->inStandby()) {
2535 queueBuffer(inBuffer);
2536 }
2537 }
2538
2539 // Calling write() with a 0 length buffer means that no more data will be written:
2540 // We rely on stop() to set the appropriate flags to allow the remaining frames to play out.
2541 if (frames == 0 && mBufferQueue.size() == 0 && mActive) {
2542 stop();
2543 }
2544
2545 return frames - inBuffer.frameCount; // number of frames consumed.
2546 }
2547
queueBuffer(Buffer & inBuffer)2548 void OutputTrack::queueBuffer(Buffer& inBuffer) {
2549
2550 if (mBufferQueue.size() < kMaxOverFlowBuffers) {
2551 Buffer *pInBuffer = new Buffer;
2552 const size_t bufferSize = inBuffer.frameCount * mFrameSize;
2553 pInBuffer->mBuffer = malloc(bufferSize);
2554 LOG_ALWAYS_FATAL_IF(pInBuffer->mBuffer == nullptr,
2555 "%s: Unable to malloc size %zu", __func__, bufferSize);
2556 pInBuffer->frameCount = inBuffer.frameCount;
2557 pInBuffer->raw = pInBuffer->mBuffer;
2558 memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize);
2559 mBufferQueue.add(pInBuffer);
2560 ALOGV("%s(%d): thread %d adding overflow buffer %zu", __func__, mId,
2561 (int)mThreadIoHandle, mBufferQueue.size());
2562 // audio data is consumed (stored locally); set frameCount to 0.
2563 inBuffer.frameCount = 0;
2564 } else {
2565 ALOGW("%s(%d): thread %d no more overflow buffers",
2566 __func__, mId, (int)mThreadIoHandle);
2567 // TODO: return error for this.
2568 }
2569 }
2570
copyMetadataTo(MetadataInserter & backInserter) const2571 void OutputTrack::copyMetadataTo(MetadataInserter& backInserter) const
2572 {
2573 audio_utils::lock_guard lock(trackMetadataMutex());
2574 backInserter = std::copy(mTrackMetadatas.begin(), mTrackMetadatas.end(), backInserter);
2575 }
2576
setMetadatas(const SourceMetadatas & metadatas)2577 void OutputTrack::setMetadatas(const SourceMetadatas& metadatas) {
2578 {
2579 audio_utils::lock_guard lock(trackMetadataMutex());
2580 mTrackMetadatas = metadatas;
2581 }
2582 // No need to adjust metadata track volumes as OutputTrack volumes are always 0dBFS.
2583 setMetadataHasChanged();
2584 }
2585
obtainBuffer(AudioBufferProvider::Buffer * buffer,uint32_t waitTimeMs)2586 status_t OutputTrack::obtainBuffer(
2587 AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
2588 {
2589 ClientProxy::Buffer buf;
2590 buf.mFrameCount = buffer->frameCount;
2591 struct timespec timeout;
2592 timeout.tv_sec = waitTimeMs / 1000;
2593 timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
2594 status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
2595 buffer->frameCount = buf.mFrameCount;
2596 buffer->raw = buf.mRaw;
2597 return status;
2598 }
2599
clearBufferQueue()2600 void OutputTrack::clearBufferQueue()
2601 {
2602 size_t size = mBufferQueue.size();
2603
2604 for (size_t i = 0; i < size; i++) {
2605 Buffer *pBuffer = mBufferQueue.itemAt(i);
2606 free(pBuffer->mBuffer);
2607 delete pBuffer;
2608 }
2609 mBufferQueue.clear();
2610 }
2611
restartIfDisabled()2612 void OutputTrack::restartIfDisabled()
2613 {
2614 int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2615 if (mActive && (flags & CBLK_DISABLED)) {
2616 start();
2617 }
2618 }
2619
2620 // ----------------------------------------------------------------------------
2621 #undef LOG_TAG
2622 #define LOG_TAG "AF::PatchTrack"
2623
2624 /* static */
create(IAfPlaybackThread * playbackThread,audio_stream_type_t streamType,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_output_flags_t flags,const Timeout & timeout,size_t frameCountToBeReady,float speed,float volume,bool muted)2625 sp<IAfPatchTrack> IAfPatchTrack::create(
2626 IAfPlaybackThread* playbackThread,
2627 audio_stream_type_t streamType,
2628 uint32_t sampleRate,
2629 audio_channel_mask_t channelMask,
2630 audio_format_t format,
2631 size_t frameCount,
2632 void* buffer,
2633 size_t bufferSize,
2634 audio_output_flags_t flags,
2635 const Timeout& timeout,
2636 size_t frameCountToBeReady, /** Default behaviour is to start
2637 * as soon as possible to have
2638 * the lowest possible latency
2639 * even if it might glitch. */
2640 float speed,
2641 float volume,
2642 bool muted)
2643 {
2644 return sp<PatchTrack>::make(
2645 playbackThread,
2646 streamType,
2647 sampleRate,
2648 channelMask,
2649 format,
2650 frameCount,
2651 buffer,
2652 bufferSize,
2653 flags,
2654 timeout,
2655 frameCountToBeReady,
2656 speed,
2657 volume,
2658 muted);
2659 }
2660
PatchTrack(IAfPlaybackThread * playbackThread,audio_stream_type_t streamType,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_output_flags_t flags,const Timeout & timeout,size_t frameCountToBeReady,float speed,float volume,bool muted)2661 PatchTrack::PatchTrack(IAfPlaybackThread* playbackThread,
2662 audio_stream_type_t streamType,
2663 uint32_t sampleRate,
2664 audio_channel_mask_t channelMask,
2665 audio_format_t format,
2666 size_t frameCount,
2667 void *buffer,
2668 size_t bufferSize,
2669 audio_output_flags_t flags,
2670 const Timeout& timeout,
2671 size_t frameCountToBeReady,
2672 float speed,
2673 float volume,
2674 bool muted)
2675 : Track(playbackThread, NULL, streamType,
2676 AUDIO_ATTRIBUTES_INITIALIZER,
2677 sampleRate, format, channelMask, frameCount,
2678 buffer, bufferSize, nullptr /* sharedBuffer */,
2679 AUDIO_SESSION_NONE, getpid(), audioServerAttributionSource(getpid()), flags,
2680 TYPE_PATCH, AUDIO_PORT_HANDLE_NONE, frameCountToBeReady, speed,
2681 false /*isSpatialized*/, false /*isBitPerfect*/, volume, muted),
2682 PatchTrackBase(mCblk ? new AudioTrackClientProxy(mCblk, mBuffer, frameCount, mFrameSize,
2683 true /*clientInServer*/) : nullptr,
2684 playbackThread, timeout)
2685 {
2686 if (mProxy != nullptr) {
2687 sp<AudioTrackClientProxy>::cast(mProxy)->setPlaybackRate({
2688 /* .mSpeed = */ speed,
2689 /* .mPitch = */ AUDIO_TIMESTRETCH_PITCH_NORMAL,
2690 /* .mStretchMode = */ AUDIO_TIMESTRETCH_STRETCH_DEFAULT,
2691 /* .mFallbackMode = */ AUDIO_TIMESTRETCH_FALLBACK_FAIL
2692 });
2693 }
2694 ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
2695 __func__, mId, sampleRate,
2696 (int)mPeerTimeout.tv_sec,
2697 (int)(mPeerTimeout.tv_nsec / 1000000));
2698 }
2699
~PatchTrack()2700 PatchTrack::~PatchTrack()
2701 {
2702 ALOGV("%s(%d)", __func__, mId);
2703 }
2704
framesReady() const2705 size_t PatchTrack::framesReady() const
2706 {
2707 if (mPeerProxy && mPeerProxy->producesBufferOnDemand()) {
2708 return std::numeric_limits<size_t>::max();
2709 } else {
2710 return Track::framesReady();
2711 }
2712 }
2713
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)2714 status_t PatchTrack::start(AudioSystem::sync_event_t event,
2715 audio_session_t triggerSession)
2716 {
2717 status_t status = Track::start(event, triggerSession);
2718 if (status != NO_ERROR) {
2719 return status;
2720 }
2721 android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
2722 return status;
2723 }
2724
2725 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)2726 status_t PatchTrack::getNextBuffer(
2727 AudioBufferProvider::Buffer* buffer)
2728 {
2729 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2730 Proxy::Buffer buf;
2731 buf.mFrameCount = buffer->frameCount;
2732 if (ATRACE_ENABLED()) {
2733 std::string traceName("PTnReq");
2734 traceName += std::to_string(id());
2735 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2736 }
2737 status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
2738 ALOGV_IF(status != NO_ERROR, "%s(%d): getNextBuffer status %d", __func__, mId, status);
2739 buffer->frameCount = buf.mFrameCount;
2740 if (ATRACE_ENABLED()) {
2741 std::string traceName("PTnObt");
2742 traceName += std::to_string(id());
2743 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
2744 }
2745 if (buf.mFrameCount == 0) {
2746 return WOULD_BLOCK;
2747 }
2748 status = Track::getNextBuffer(buffer);
2749 return status;
2750 }
2751
releaseBuffer(AudioBufferProvider::Buffer * buffer)2752 void PatchTrack::releaseBuffer(AudioBufferProvider::Buffer* buffer)
2753 {
2754 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
2755 Proxy::Buffer buf;
2756 buf.mFrameCount = buffer->frameCount;
2757 buf.mRaw = buffer->raw;
2758 mPeerProxy->releaseBuffer(&buf);
2759 TrackBase::releaseBuffer(buffer); // Note: this is the base class.
2760 }
2761
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)2762 status_t PatchTrack::obtainBuffer(Proxy::Buffer* buffer,
2763 const struct timespec *timeOut)
2764 {
2765 status_t status = NO_ERROR;
2766 static const int32_t kMaxTries = 5;
2767 int32_t tryCounter = kMaxTries;
2768 const size_t originalFrameCount = buffer->mFrameCount;
2769 do {
2770 if (status == NOT_ENOUGH_DATA) {
2771 deferRestartIfDisabled();
2772 buffer->mFrameCount = originalFrameCount; // cleared on error, must be restored.
2773 }
2774 status = mProxy->obtainBuffer(buffer, timeOut);
2775 } while ((status == NOT_ENOUGH_DATA) && (tryCounter-- > 0));
2776 return status;
2777 }
2778
releaseBuffer(Proxy::Buffer * buffer)2779 void PatchTrack::releaseBuffer(Proxy::Buffer* buffer)
2780 {
2781 mProxy->releaseBuffer(buffer);
2782 deferRestartIfDisabled();
2783
2784 // Check if the PatchTrack has enough data to write once in releaseBuffer().
2785 // If not, prevent an underrun from occurring by moving the track into FS_FILLING;
2786 // this logic avoids glitches when suspending A2DP with AudioPlaybackCapture.
2787 // TODO: perhaps underrun avoidance could be a track property checked in isReady() instead.
2788 if (mFillingStatus == FS_ACTIVE
2789 && audio_is_linear_pcm(mFormat)
2790 && !isOffloadedOrDirect()) {
2791 if (const sp<IAfThreadBase> thread = mThread.promote();
2792 thread != 0) {
2793 auto* const playbackThread = thread->asIAfPlaybackThread().get();
2794 const size_t frameCount = playbackThread->frameCount() * sampleRate()
2795 / playbackThread->sampleRate();
2796 if (framesReady() < frameCount) {
2797 ALOGD("%s(%d) Not enough data, wait for buffer to fill", __func__, mId);
2798 mFillingStatus = FS_FILLING;
2799 }
2800 }
2801 }
2802 }
2803
restartIfDisabled()2804 void PatchTrack::restartIfDisabled()
2805 {
2806 if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) {
2807 ALOGW("%s(%d): disabled due to previous underrun, restarting", __func__, mId);
2808 start();
2809 }
2810 }
2811
2812 // ----------------------------------------------------------------------------
2813 // Record
2814 // ----------------------------------------------------------------------------
2815
2816
2817 #undef LOG_TAG
2818 #define LOG_TAG "AF::RecordHandle"
2819
2820 class RecordHandle : public android::media::BnAudioRecord {
2821 public:
2822 explicit RecordHandle(const sp<IAfRecordTrack>& recordTrack);
2823 ~RecordHandle() override;
2824 binder::Status start(int /*AudioSystem::sync_event_t*/ event,
2825 int /*audio_session_t*/ triggerSession) final;
2826 binder::Status stop() final;
2827 binder::Status getActiveMicrophones(
2828 std::vector<media::MicrophoneInfoFw>* activeMicrophones) final;
2829 binder::Status setPreferredMicrophoneDirection(
2830 int /*audio_microphone_direction_t*/ direction) final;
2831 binder::Status setPreferredMicrophoneFieldDimension(float zoom) final;
2832 binder::Status shareAudioHistory(
2833 const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) final;
2834
2835 private:
2836 const sp<IAfRecordTrack> mRecordTrack;
2837
2838 // for use from destructor
2839 void stop_nonvirtual();
2840 };
2841
2842 /* static */
createIAudioRecordAdapter(const sp<IAfRecordTrack> & recordTrack)2843 sp<media::IAudioRecord> IAfRecordTrack::createIAudioRecordAdapter(
2844 const sp<IAfRecordTrack>& recordTrack) {
2845 return sp<RecordHandle>::make(recordTrack);
2846 }
2847
RecordHandle(const sp<IAfRecordTrack> & recordTrack)2848 RecordHandle::RecordHandle(
2849 const sp<IAfRecordTrack>& recordTrack)
2850 : BnAudioRecord(),
2851 mRecordTrack(recordTrack)
2852 {
2853 setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
2854 setInheritRt(true);
2855 }
2856
~RecordHandle()2857 RecordHandle::~RecordHandle() {
2858 stop_nonvirtual();
2859 mRecordTrack->destroy();
2860 }
2861
start(int event,int triggerSession)2862 binder::Status RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
2863 int /*audio_session_t*/ triggerSession) {
2864 ALOGV("%s()", __func__);
2865 return binderStatusFromStatusT(
2866 mRecordTrack->start((AudioSystem::sync_event_t)event, (audio_session_t) triggerSession));
2867 }
2868
stop()2869 binder::Status RecordHandle::stop() {
2870 stop_nonvirtual();
2871 return binder::Status::ok();
2872 }
2873
stop_nonvirtual()2874 void RecordHandle::stop_nonvirtual() {
2875 ALOGV("%s()", __func__);
2876 mRecordTrack->stop();
2877 }
2878
getActiveMicrophones(std::vector<media::MicrophoneInfoFw> * activeMicrophones)2879 binder::Status RecordHandle::getActiveMicrophones(
2880 std::vector<media::MicrophoneInfoFw>* activeMicrophones) {
2881 ALOGV("%s()", __func__);
2882 return binderStatusFromStatusT(mRecordTrack->getActiveMicrophones(activeMicrophones));
2883 }
2884
setPreferredMicrophoneDirection(int direction)2885 binder::Status RecordHandle::setPreferredMicrophoneDirection(
2886 int /*audio_microphone_direction_t*/ direction) {
2887 ALOGV("%s()", __func__);
2888 return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneDirection(
2889 static_cast<audio_microphone_direction_t>(direction)));
2890 }
2891
setPreferredMicrophoneFieldDimension(float zoom)2892 binder::Status RecordHandle::setPreferredMicrophoneFieldDimension(float zoom) {
2893 ALOGV("%s()", __func__);
2894 return binderStatusFromStatusT(mRecordTrack->setPreferredMicrophoneFieldDimension(zoom));
2895 }
2896
shareAudioHistory(const std::string & sharedAudioPackageName,int64_t sharedAudioStartMs)2897 binder::Status RecordHandle::shareAudioHistory(
2898 const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
2899 return binderStatusFromStatusT(
2900 mRecordTrack->shareAudioHistory(sharedAudioPackageName, sharedAudioStartMs));
2901 }
2902
2903 // ----------------------------------------------------------------------------
2904 #undef LOG_TAG
2905 #define LOG_TAG "AF::RecordTrack"
2906
2907
2908 /* static */
create(IAfRecordThread * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_input_flags_t flags,track_type type,audio_port_handle_t portId,int32_t startFrames)2909 sp<IAfRecordTrack> IAfRecordTrack::create(IAfRecordThread* thread,
2910 const sp<Client>& client,
2911 const audio_attributes_t& attr,
2912 uint32_t sampleRate,
2913 audio_format_t format,
2914 audio_channel_mask_t channelMask,
2915 size_t frameCount,
2916 void* buffer,
2917 size_t bufferSize,
2918 audio_session_t sessionId,
2919 pid_t creatorPid,
2920 const AttributionSourceState& attributionSource,
2921 audio_input_flags_t flags,
2922 track_type type,
2923 audio_port_handle_t portId,
2924 int32_t startFrames)
2925 {
2926 return sp<RecordTrack>::make(
2927 thread,
2928 client,
2929 attr,
2930 sampleRate,
2931 format,
2932 channelMask,
2933 frameCount,
2934 buffer,
2935 bufferSize,
2936 sessionId,
2937 creatorPid,
2938 attributionSource,
2939 flags,
2940 type,
2941 portId,
2942 startFrames);
2943 }
2944
2945 // RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
RecordTrack(IAfRecordThread * thread,const sp<Client> & client,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,size_t frameCount,void * buffer,size_t bufferSize,audio_session_t sessionId,pid_t creatorPid,const AttributionSourceState & attributionSource,audio_input_flags_t flags,track_type type,audio_port_handle_t portId,int32_t startFrames)2946 RecordTrack::RecordTrack(
2947 IAfRecordThread* thread,
2948 const sp<Client>& client,
2949 const audio_attributes_t& attr,
2950 uint32_t sampleRate,
2951 audio_format_t format,
2952 audio_channel_mask_t channelMask,
2953 size_t frameCount,
2954 void *buffer,
2955 size_t bufferSize,
2956 audio_session_t sessionId,
2957 pid_t creatorPid,
2958 const AttributionSourceState& attributionSource,
2959 audio_input_flags_t flags,
2960 track_type type,
2961 audio_port_handle_t portId,
2962 int32_t startFrames)
2963 : TrackBase(thread, client, attr, sampleRate, format,
2964 channelMask, frameCount, buffer, bufferSize, sessionId,
2965 creatorPid,
2966 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
2967 false /*isOut*/,
2968 (type == TYPE_DEFAULT) ?
2969 ((flags & AUDIO_INPUT_FLAG_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
2970 ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
2971 type, portId,
2972 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_RECORD) + std::to_string(portId)),
2973 mOverflow(false),
2974 mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
2975 mRecordBufferConverter(NULL),
2976 mFlags(flags),
2977 mSilenced(false),
2978 mStartFrames(startFrames)
2979 {
2980 if (mCblk == NULL) {
2981 return;
2982 }
2983
2984 if (!isDirect()) {
2985 mRecordBufferConverter = new RecordBufferConverter(
2986 thread->channelMask(), thread->format(), thread->sampleRate(),
2987 channelMask, format, sampleRate);
2988 // Check if the RecordBufferConverter construction was successful.
2989 // If not, don't continue with construction.
2990 //
2991 // NOTE: It would be extremely rare that the record track cannot be created
2992 // for the current device, but a pending or future device change would make
2993 // the record track configuration valid.
2994 if (mRecordBufferConverter->initCheck() != NO_ERROR) {
2995 ALOGE("%s(%d): RecordTrack unable to create record buffer converter", __func__, mId);
2996 return;
2997 }
2998 }
2999
3000 mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
3001 mFrameSize, !isExternalTrack());
3002
3003 mResamplerBufferProvider = new ResamplerBufferProvider(this);
3004
3005 if (flags & AUDIO_INPUT_FLAG_FAST) {
3006 ALOG_ASSERT(thread->fastTrackAvailable());
3007 thread->setFastTrackAvailable(false);
3008 } else {
3009 // TODO: only Normal Record has timestamps (Fast Record does not).
3010 mServerLatencySupported = checkServerLatencySupported(mFormat, flags);
3011 }
3012 #ifdef TEE_SINK
3013 mTee.setId(std::string("_") + std::to_string(mThreadIoHandle)
3014 + "_" + std::to_string(mId)
3015 + "_R");
3016 #endif
3017
3018 // Once this item is logged by the server, the client can add properties.
3019 mTrackMetrics.logConstructor(creatorPid, uid(), id());
3020 }
3021
~RecordTrack()3022 RecordTrack::~RecordTrack()
3023 {
3024 ALOGV("%s()", __func__);
3025 delete mRecordBufferConverter;
3026 delete mResamplerBufferProvider;
3027 }
3028
initCheck() const3029 status_t RecordTrack::initCheck() const
3030 {
3031 status_t status = TrackBase::initCheck();
3032 if (status == NO_ERROR && mServerProxy == 0) {
3033 status = BAD_VALUE;
3034 }
3035 return status;
3036 }
3037
3038 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)3039 status_t RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
3040 {
3041 ServerProxy::Buffer buf;
3042 buf.mFrameCount = buffer->frameCount;
3043 status_t status = mServerProxy->obtainBuffer(&buf);
3044 buffer->frameCount = buf.mFrameCount;
3045 buffer->raw = buf.mRaw;
3046 if (buf.mFrameCount == 0) {
3047 // FIXME also wake futex so that overrun is noticed more quickly
3048 (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags);
3049 }
3050 return status;
3051 }
3052
start(AudioSystem::sync_event_t event,audio_session_t triggerSession)3053 status_t RecordTrack::start(AudioSystem::sync_event_t event,
3054 audio_session_t triggerSession)
3055 {
3056 if (ATRACE_ENABLED()) [[unlikely]] {
3057 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
3058 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_START)
3059 .toTrace().c_str());
3060 }
3061 const sp<IAfThreadBase> thread = mThread.promote();
3062 if (thread != 0) {
3063 auto* const recordThread = thread->asIAfRecordThread().get();
3064 return recordThread->start(this, event, triggerSession);
3065 } else {
3066 ALOGW("%s track %d: thread was destroyed", __func__, portId());
3067 return DEAD_OBJECT;
3068 }
3069 }
3070
stop()3071 void RecordTrack::stop()
3072 {
3073 if (ATRACE_ENABLED()) [[unlikely]] {
3074 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
3075 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_STOP)
3076 .toTrace().c_str());
3077 }
3078 const sp<IAfThreadBase> thread = mThread.promote();
3079 if (thread != 0) {
3080 auto* const recordThread = thread->asIAfRecordThread().get();
3081 if (recordThread->stop(this) && isExternalTrack()) {
3082 AudioSystem::stopInput(mPortId);
3083 }
3084 }
3085 }
3086
destroy()3087 void RecordTrack::destroy()
3088 {
3089 // see comments at Track::destroy()
3090 sp<RecordTrack> keep(this);
3091 {
3092 track_state priorState = mState;
3093 const sp<IAfThreadBase> thread = mThread.promote();
3094 if (thread != 0) {
3095 audio_utils::lock_guard _l(thread->mutex());
3096 auto* const recordThread = thread->asIAfRecordThread().get();
3097 priorState = mState;
3098 if (!mSharedAudioPackageName.empty()) {
3099 recordThread->resetAudioHistory_l();
3100 }
3101 recordThread->destroyTrack_l(this); // move mState to STOPPED, terminate
3102 }
3103 // APM portid/client management done outside of lock.
3104 // NOTE: if thread doesn't exist, the input descriptor probably doesn't either.
3105 if (isExternalTrack()) {
3106 switch (priorState) {
3107 case ACTIVE: // invalidated while still active
3108 case STARTING_2: // invalidated/start-aborted after startInput successfully called
3109 case PAUSING: // invalidated while in the middle of stop() pausing (still active)
3110 AudioSystem::stopInput(mPortId);
3111 break;
3112
3113 case STARTING_1: // invalidated/start-aborted and startInput not successful
3114 case PAUSED: // OK, not active
3115 case IDLE: // OK, not active
3116 break;
3117
3118 case STOPPED: // unexpected (destroyed)
3119 default:
3120 LOG_ALWAYS_FATAL("%s(%d): invalid prior state: %d", __func__, mId, priorState);
3121 }
3122 AudioSystem::releaseInput(mPortId);
3123 }
3124 }
3125 }
3126
invalidate()3127 void RecordTrack::invalidate()
3128 {
3129 TrackBase::invalidate();
3130 // FIXME should use proxy, and needs work
3131 audio_track_cblk_t* cblk = mCblk;
3132 android_atomic_or(CBLK_INVALID, &cblk->mFlags);
3133 android_atomic_release_store(0x40000000, &cblk->mFutex);
3134 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
3135 (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
3136 }
3137
3138
appendDumpHeader(String8 & result) const3139 void RecordTrack::appendDumpHeader(String8& result) const
3140 {
3141 const auto res = IAfRecordTrack::getLogHeader();
3142 result.append(res.data(), res.size());
3143 }
3144
appendDump(String8 & result,bool active) const3145 void RecordTrack::appendDump(String8& result, bool active) const
3146 {
3147 result.appendFormat("%c%5s %6d %7u/%7u %7u %7u %2s 0x%03X "
3148 "%08X %08X %6u %6X "
3149 "%08X %6zu %6zu %3c",
3150 isFastTrack() ? 'F' : ' ',
3151 active ? "yes" : "no",
3152 mId,
3153 mClient ? mClient->pid() : getpid(),
3154 mClient ? mClient->uid() : getuid(),
3155 mSessionId,
3156 mPortId,
3157 getTrackStateAsCodedString(),
3158 mCblk->mFlags,
3159
3160 mFormat,
3161 mChannelMask,
3162 mSampleRate,
3163 mAttr.source,
3164
3165 mCblk->mServer,
3166 mFrameCount,
3167 mServerProxy->framesReadySafe(),
3168 isSilenced() ? 's' : 'n'
3169 );
3170 if (isServerLatencySupported()) {
3171 double latencyMs;
3172 bool fromTrack;
3173 if (getTrackLatencyMs(&latencyMs, &fromTrack) == OK) {
3174 // Show latency in msec, followed by 't' if from track timestamp (the most accurate)
3175 // or 'k' if estimated from kernel (usually for debugging).
3176 result.appendFormat(" %7.2lf %c", latencyMs, fromTrack ? 't' : 'k');
3177 } else {
3178 result.appendFormat("%10s", mCblk->mServer != 0 ? "unavail" : "new");
3179 }
3180 }
3181 result.append("\n");
3182 }
3183
3184 // This is invoked by SyncEvent callback.
handleSyncStartEvent(const sp<audioflinger::SyncEvent> & event)3185 void RecordTrack::handleSyncStartEvent(
3186 const sp<audioflinger::SyncEvent>& event)
3187 {
3188 size_t framesToDrop = 0;
3189 const sp<IAfThreadBase> threadBase = mThread.promote();
3190 if (threadBase != 0) {
3191 // TODO: use actual buffer filling status instead of 2 buffers when info is available
3192 // from audio HAL
3193 framesToDrop = threadBase->frameCount() * 2;
3194 }
3195
3196 mSynchronizedRecordState.onPlaybackFinished(event, framesToDrop);
3197 }
3198
clearSyncStartEvent()3199 void RecordTrack::clearSyncStartEvent()
3200 {
3201 mSynchronizedRecordState.clear();
3202 }
3203
updateTrackFrameInfo(int64_t trackFramesReleased,int64_t sourceFramesRead,uint32_t halSampleRate,const ExtendedTimestamp & timestamp)3204 void RecordTrack::updateTrackFrameInfo(
3205 int64_t trackFramesReleased, int64_t sourceFramesRead,
3206 uint32_t halSampleRate, const ExtendedTimestamp ×tamp)
3207 {
3208 // Make the kernel frametime available.
3209 const FrameTime ft{
3210 timestamp.mPosition[ExtendedTimestamp::LOCATION_KERNEL],
3211 timestamp.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL]};
3212 // ALOGD("FrameTime: %lld %lld", (long long)ft.frames, (long long)ft.timeNs);
3213 mKernelFrameTime.store(ft);
3214 if (!audio_is_linear_pcm(mFormat)) {
3215 // Stream is direct, return provided timestamp with no conversion
3216 mServerProxy->setTimestamp(timestamp);
3217 return;
3218 }
3219
3220 ExtendedTimestamp local = timestamp;
3221
3222 // Convert HAL frames to server-side track frames at track sample rate.
3223 // We use trackFramesReleased and sourceFramesRead as an anchor point.
3224 for (int i = ExtendedTimestamp::LOCATION_SERVER; i < ExtendedTimestamp::LOCATION_MAX; ++i) {
3225 if (local.mTimeNs[i] != 0) {
3226 const int64_t relativeServerFrames = local.mPosition[i] - sourceFramesRead;
3227 const int64_t relativeTrackFrames = relativeServerFrames
3228 * mSampleRate / halSampleRate; // TODO: potential computation overflow
3229 local.mPosition[i] = relativeTrackFrames + trackFramesReleased;
3230 }
3231 }
3232 mServerProxy->setTimestamp(local);
3233
3234 // Compute latency info.
3235 const bool useTrackTimestamp = true; // use track unless debugging.
3236 const double latencyMs = - (useTrackTimestamp
3237 ? local.getOutputServerLatencyMs(sampleRate())
3238 : timestamp.getOutputServerLatencyMs(halSampleRate));
3239
3240 mServerLatencyFromTrack.store(useTrackTimestamp);
3241 mServerLatencyMs.store(latencyMs);
3242 }
3243
getActiveMicrophones(std::vector<media::MicrophoneInfoFw> * activeMicrophones) const3244 status_t RecordTrack::getActiveMicrophones(
3245 std::vector<media::MicrophoneInfoFw>* activeMicrophones) const
3246 {
3247 const sp<IAfThreadBase> thread = mThread.promote();
3248 if (thread != 0) {
3249 auto* const recordThread = thread->asIAfRecordThread().get();
3250 return recordThread->getActiveMicrophones(activeMicrophones);
3251 } else {
3252 return BAD_VALUE;
3253 }
3254 }
3255
setPreferredMicrophoneDirection(audio_microphone_direction_t direction)3256 status_t RecordTrack::setPreferredMicrophoneDirection(
3257 audio_microphone_direction_t direction) {
3258 const sp<IAfThreadBase> thread = mThread.promote();
3259 if (thread != 0) {
3260 auto* const recordThread = thread->asIAfRecordThread().get();
3261 return recordThread->setPreferredMicrophoneDirection(direction);
3262 } else {
3263 return BAD_VALUE;
3264 }
3265 }
3266
setPreferredMicrophoneFieldDimension(float zoom)3267 status_t RecordTrack::setPreferredMicrophoneFieldDimension(float zoom) {
3268 const sp<IAfThreadBase> thread = mThread.promote();
3269 if (thread != 0) {
3270 auto* const recordThread = thread->asIAfRecordThread().get();
3271 return recordThread->setPreferredMicrophoneFieldDimension(zoom);
3272 } else {
3273 return BAD_VALUE;
3274 }
3275 }
3276
shareAudioHistory(const std::string & sharedAudioPackageName,int64_t sharedAudioStartMs)3277 status_t RecordTrack::shareAudioHistory(
3278 const std::string& sharedAudioPackageName, int64_t sharedAudioStartMs) {
3279
3280 const uid_t callingUid = IPCThreadState::self()->getCallingUid();
3281 const pid_t callingPid = IPCThreadState::self()->getCallingPid();
3282 if (callingUid != mUid || callingPid != mCreatorPid) {
3283 return PERMISSION_DENIED;
3284 }
3285
3286 AttributionSourceState attributionSource{};
3287 attributionSource.uid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingUid));
3288 attributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_uid_t_int32_t(callingPid));
3289 attributionSource.token = sp<BBinder>::make();
3290 if (!captureHotwordAllowed(attributionSource)) {
3291 return PERMISSION_DENIED;
3292 }
3293
3294 const sp<IAfThreadBase> thread = mThread.promote();
3295 if (thread != 0) {
3296 auto* const recordThread = thread->asIAfRecordThread().get();
3297 status_t status = recordThread->shareAudioHistory(
3298 sharedAudioPackageName, mSessionId, sharedAudioStartMs);
3299 if (status == NO_ERROR) {
3300 mSharedAudioPackageName = sharedAudioPackageName;
3301 }
3302 return status;
3303 } else {
3304 return BAD_VALUE;
3305 }
3306 }
3307
copyMetadataTo(MetadataInserter & backInserter) const3308 void RecordTrack::copyMetadataTo(MetadataInserter& backInserter) const
3309 {
3310
3311 // Do not forward PatchRecord metadata with unspecified audio source
3312 if (mAttr.source == AUDIO_SOURCE_DEFAULT) {
3313 return;
3314 }
3315
3316 // No track is invalid as this is called after prepareTrack_l in the same critical section
3317 record_track_metadata_v7_t metadata;
3318 metadata.base = {
3319 .source = mAttr.source,
3320 .gain = 1, // capture tracks do not have volumes
3321 };
3322 metadata.channel_mask = mChannelMask;
3323 strncpy(metadata.tags, mAttr.tags, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE);
3324
3325 *backInserter++ = metadata;
3326 }
3327
setSilenced(bool silenced)3328 void RecordTrack::setSilenced(bool silenced) {
3329 if (!isPatchTrack() && mSilenced != silenced) {
3330 mSilenced = silenced;
3331 ALOGD("%s: track with port id: %d, (%s)", __func__, mPortId,
3332 mSilenced ? "silenced" : "unsilenced");
3333 }
3334 }
3335
3336 // ----------------------------------------------------------------------------
3337 #undef LOG_TAG
3338 #define LOG_TAG "AF::PatchRecord"
3339
3340 /* static */
create(IAfRecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_input_flags_t flags,const Timeout & timeout,audio_source_t source)3341 sp<IAfPatchRecord> IAfPatchRecord::create(
3342 IAfRecordThread* recordThread,
3343 uint32_t sampleRate,
3344 audio_channel_mask_t channelMask,
3345 audio_format_t format,
3346 size_t frameCount,
3347 void *buffer,
3348 size_t bufferSize,
3349 audio_input_flags_t flags,
3350 const Timeout& timeout,
3351 audio_source_t source)
3352 {
3353 return sp<PatchRecord>::make(
3354 recordThread,
3355 sampleRate,
3356 channelMask,
3357 format,
3358 frameCount,
3359 buffer,
3360 bufferSize,
3361 flags,
3362 timeout,
3363 source);
3364 }
3365
PatchRecord(IAfRecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,void * buffer,size_t bufferSize,audio_input_flags_t flags,const Timeout & timeout,audio_source_t source)3366 PatchRecord::PatchRecord(IAfRecordThread* recordThread,
3367 uint32_t sampleRate,
3368 audio_channel_mask_t channelMask,
3369 audio_format_t format,
3370 size_t frameCount,
3371 void *buffer,
3372 size_t bufferSize,
3373 audio_input_flags_t flags,
3374 const Timeout& timeout,
3375 audio_source_t source)
3376 : RecordTrack(recordThread, NULL,
3377 audio_attributes_t{ .source = source } ,
3378 sampleRate, format, channelMask, frameCount,
3379 buffer, bufferSize, AUDIO_SESSION_NONE, getpid(),
3380 audioServerAttributionSource(getpid()), flags, TYPE_PATCH),
3381 PatchTrackBase(mCblk ? new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true)
3382 : nullptr,
3383 recordThread, timeout)
3384 {
3385 ALOGV("%s(%d): sampleRate %d mPeerTimeout %d.%03d sec",
3386 __func__, mId, sampleRate,
3387 (int)mPeerTimeout.tv_sec,
3388 (int)(mPeerTimeout.tv_nsec / 1000000));
3389 }
3390
~PatchRecord()3391 PatchRecord::~PatchRecord()
3392 {
3393 ALOGV("%s(%d)", __func__, mId);
3394 }
3395
writeFramesHelper(AudioBufferProvider * dest,const void * src,size_t frameCount,size_t frameSize)3396 static size_t writeFramesHelper(
3397 AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
3398 {
3399 AudioBufferProvider::Buffer patchBuffer;
3400 patchBuffer.frameCount = frameCount;
3401 auto status = dest->getNextBuffer(&patchBuffer);
3402 if (status != NO_ERROR) {
3403 ALOGW("%s PathRecord getNextBuffer failed with error %d: %s",
3404 __func__, status, strerror(-status));
3405 return 0;
3406 }
3407 ALOG_ASSERT(patchBuffer.frameCount <= frameCount);
3408 memcpy(patchBuffer.raw, src, patchBuffer.frameCount * frameSize);
3409 size_t framesWritten = patchBuffer.frameCount;
3410 dest->releaseBuffer(&patchBuffer);
3411 return framesWritten;
3412 }
3413
3414 // static
writeFrames(AudioBufferProvider * dest,const void * src,size_t frameCount,size_t frameSize)3415 size_t PatchRecord::writeFrames(
3416 AudioBufferProvider* dest, const void* src, size_t frameCount, size_t frameSize)
3417 {
3418 size_t framesWritten = writeFramesHelper(dest, src, frameCount, frameSize);
3419 // On buffer wrap, the buffer frame count will be less than requested,
3420 // when this happens a second buffer needs to be used to write the leftover audio
3421 const size_t framesLeft = frameCount - framesWritten;
3422 if (framesWritten != 0 && framesLeft != 0) {
3423 framesWritten += writeFramesHelper(dest, (const char*)src + framesWritten * frameSize,
3424 framesLeft, frameSize);
3425 }
3426 return framesWritten;
3427 }
3428
3429 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)3430 status_t PatchRecord::getNextBuffer(
3431 AudioBufferProvider::Buffer* buffer)
3432 {
3433 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
3434 Proxy::Buffer buf;
3435 buf.mFrameCount = buffer->frameCount;
3436 status_t status = mPeerProxy->obtainBuffer(&buf, &mPeerTimeout);
3437 ALOGV_IF(status != NO_ERROR,
3438 "%s(%d): mPeerProxy->obtainBuffer status %d", __func__, mId, status);
3439 buffer->frameCount = buf.mFrameCount;
3440 if (ATRACE_ENABLED()) {
3441 std::string traceName("PRnObt");
3442 traceName += std::to_string(id());
3443 ATRACE_INT(traceName.c_str(), buf.mFrameCount);
3444 }
3445 if (buf.mFrameCount == 0) {
3446 return WOULD_BLOCK;
3447 }
3448 status = RecordTrack::getNextBuffer(buffer);
3449 return status;
3450 }
3451
releaseBuffer(AudioBufferProvider::Buffer * buffer)3452 void PatchRecord::releaseBuffer(AudioBufferProvider::Buffer* buffer)
3453 {
3454 ALOG_ASSERT(mPeerProxy != 0, "%s(%d): called without peer proxy", __func__, mId);
3455 Proxy::Buffer buf;
3456 buf.mFrameCount = buffer->frameCount;
3457 buf.mRaw = buffer->raw;
3458 mPeerProxy->releaseBuffer(&buf);
3459 TrackBase::releaseBuffer(buffer);
3460 }
3461
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)3462 status_t PatchRecord::obtainBuffer(Proxy::Buffer* buffer,
3463 const struct timespec *timeOut)
3464 {
3465 return mProxy->obtainBuffer(buffer, timeOut);
3466 }
3467
releaseBuffer(Proxy::Buffer * buffer)3468 void PatchRecord::releaseBuffer(Proxy::Buffer* buffer)
3469 {
3470 mProxy->releaseBuffer(buffer);
3471 }
3472
3473 #undef LOG_TAG
3474 #define LOG_TAG "AF::PthrPatchRecord"
3475
allocAligned(size_t alignment,size_t size)3476 static std::unique_ptr<void, decltype(free)*> allocAligned(size_t alignment, size_t size)
3477 {
3478 void *ptr = nullptr;
3479 (void)posix_memalign(&ptr, alignment, size);
3480 return {ptr, free};
3481 }
3482
3483 /* static */
createPassThru(IAfRecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,audio_input_flags_t flags,audio_source_t source)3484 sp<IAfPatchRecord> IAfPatchRecord::createPassThru(
3485 IAfRecordThread* recordThread,
3486 uint32_t sampleRate,
3487 audio_channel_mask_t channelMask,
3488 audio_format_t format,
3489 size_t frameCount,
3490 audio_input_flags_t flags,
3491 audio_source_t source)
3492 {
3493 return sp<PassthruPatchRecord>::make(
3494 recordThread,
3495 sampleRate,
3496 channelMask,
3497 format,
3498 frameCount,
3499 flags,
3500 source);
3501 }
3502
PassthruPatchRecord(IAfRecordThread * recordThread,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_format_t format,size_t frameCount,audio_input_flags_t flags,audio_source_t source)3503 PassthruPatchRecord::PassthruPatchRecord(
3504 IAfRecordThread* recordThread,
3505 uint32_t sampleRate,
3506 audio_channel_mask_t channelMask,
3507 audio_format_t format,
3508 size_t frameCount,
3509 audio_input_flags_t flags,
3510 audio_source_t source)
3511 : PatchRecord(recordThread, sampleRate, channelMask, format, frameCount,
3512 nullptr /*buffer*/, 0 /*bufferSize*/, flags, {} /* timeout */, source),
3513 mPatchRecordAudioBufferProvider(*this),
3514 mSinkBuffer(allocAligned(32, mFrameCount * mFrameSize)),
3515 mStubBuffer(allocAligned(32, mFrameCount * mFrameSize))
3516 {
3517 memset(mStubBuffer.get(), 0, mFrameCount * mFrameSize);
3518 }
3519
obtainStream(sp<IAfThreadBase> * thread)3520 sp<StreamInHalInterface> PassthruPatchRecord::obtainStream(
3521 sp<IAfThreadBase>* thread)
3522 {
3523 *thread = mThread.promote();
3524 if (!*thread) return nullptr;
3525 auto* const recordThread = (*thread)->asIAfRecordThread().get();
3526 audio_utils::lock_guard _l(recordThread->mutex());
3527 return recordThread->getInput() ? recordThread->getInput()->stream : nullptr;
3528 }
3529
3530 // PatchProxyBufferProvider methods are called on DirectOutputThread
obtainBuffer(Proxy::Buffer * buffer,const struct timespec * timeOut)3531 status_t PassthruPatchRecord::obtainBuffer(
3532 Proxy::Buffer* buffer, const struct timespec* timeOut)
3533 {
3534 if (mUnconsumedFrames) {
3535 buffer->mFrameCount = std::min(buffer->mFrameCount, mUnconsumedFrames);
3536 // mUnconsumedFrames is decreased in releaseBuffer to use actual frame consumption figure.
3537 return PatchRecord::obtainBuffer(buffer, timeOut);
3538 }
3539
3540 // Otherwise, execute a read from HAL and write into the buffer.
3541 nsecs_t startTimeNs = 0;
3542 if (timeOut && (timeOut->tv_sec != 0 || timeOut->tv_nsec != 0) && timeOut->tv_sec != INT_MAX) {
3543 // Will need to correct timeOut by elapsed time.
3544 startTimeNs = systemTime();
3545 }
3546 const size_t framesToRead = std::min(buffer->mFrameCount, mFrameCount);
3547 buffer->mFrameCount = 0;
3548 buffer->mRaw = nullptr;
3549 sp<IAfThreadBase> thread;
3550 sp<StreamInHalInterface> stream = obtainStream(&thread);
3551 if (!stream) return NO_INIT; // If there is no stream, RecordThread is not reading.
3552
3553 status_t result = NO_ERROR;
3554 size_t bytesRead = 0;
3555 {
3556 ATRACE_NAME("read");
3557 result = stream->read(mSinkBuffer.get(), framesToRead * mFrameSize, &bytesRead);
3558 if (result != NO_ERROR) goto stream_error;
3559 if (bytesRead == 0) return NO_ERROR;
3560 }
3561
3562 {
3563 audio_utils::lock_guard lock(readMutex());
3564 mReadBytes += bytesRead;
3565 mReadError = NO_ERROR;
3566 }
3567 mReadCV.notify_one();
3568 // writeFrames handles wraparound and should write all the provided frames.
3569 // If it couldn't, there is something wrong with the client/server buffer of the software patch.
3570 buffer->mFrameCount = writeFrames(
3571 &mPatchRecordAudioBufferProvider,
3572 mSinkBuffer.get(), bytesRead / mFrameSize, mFrameSize);
3573 ALOGW_IF(buffer->mFrameCount < bytesRead / mFrameSize,
3574 "Lost %zu frames obtained from HAL", bytesRead / mFrameSize - buffer->mFrameCount);
3575 mUnconsumedFrames = buffer->mFrameCount;
3576 struct timespec newTimeOut;
3577 if (startTimeNs) {
3578 // Correct the timeout by elapsed time.
3579 nsecs_t newTimeOutNs = audio_utils_ns_from_timespec(timeOut) - (systemTime() - startTimeNs);
3580 if (newTimeOutNs < 0) newTimeOutNs = 0;
3581 newTimeOut.tv_sec = newTimeOutNs / NANOS_PER_SECOND;
3582 newTimeOut.tv_nsec = newTimeOutNs - newTimeOut.tv_sec * NANOS_PER_SECOND;
3583 timeOut = &newTimeOut;
3584 }
3585 return PatchRecord::obtainBuffer(buffer, timeOut);
3586
3587 stream_error:
3588 stream->standby();
3589 {
3590 audio_utils::lock_guard lock(readMutex());
3591 mReadError = result;
3592 }
3593 mReadCV.notify_one();
3594 return result;
3595 }
3596
releaseBuffer(Proxy::Buffer * buffer)3597 void PassthruPatchRecord::releaseBuffer(Proxy::Buffer* buffer)
3598 {
3599 if (buffer->mFrameCount <= mUnconsumedFrames) {
3600 mUnconsumedFrames -= buffer->mFrameCount;
3601 } else {
3602 ALOGW("Write side has consumed more frames than we had: %zu > %zu",
3603 buffer->mFrameCount, mUnconsumedFrames);
3604 mUnconsumedFrames = 0;
3605 }
3606 PatchRecord::releaseBuffer(buffer);
3607 }
3608
3609 // AudioBufferProvider and Source methods are called on RecordThread
3610 // 'read' emulates actual audio data with 0's. This is OK as 'getNextBuffer'
3611 // and 'releaseBuffer' are stubbed out and ignore their input.
3612 // It's not possible to retrieve actual data here w/o blocking 'obtainBuffer'
3613 // until we copy it.
read(void * buffer,size_t bytes,size_t * read)3614 status_t PassthruPatchRecord::read(
3615 void* buffer, size_t bytes, size_t* read)
3616 {
3617 bytes = std::min(bytes, mFrameCount * mFrameSize);
3618 {
3619 audio_utils::unique_lock lock(readMutex());
3620 mReadCV.wait(lock, [&]{ return mReadError != NO_ERROR || mReadBytes != 0; });
3621 if (mReadError != NO_ERROR) {
3622 mLastReadFrames = 0;
3623 return mReadError;
3624 }
3625 *read = std::min(bytes, mReadBytes);
3626 mReadBytes -= *read;
3627 }
3628 mLastReadFrames = *read / mFrameSize;
3629 memset(buffer, 0, *read);
3630 return 0;
3631 }
3632
getCapturePosition(int64_t * frames,int64_t * time)3633 status_t PassthruPatchRecord::getCapturePosition(
3634 int64_t* frames, int64_t* time)
3635 {
3636 sp<IAfThreadBase> thread;
3637 sp<StreamInHalInterface> stream = obtainStream(&thread);
3638 return stream ? stream->getCapturePosition(frames, time) : NO_INIT;
3639 }
3640
standby()3641 status_t PassthruPatchRecord::standby()
3642 {
3643 // RecordThread issues 'standby' command in two major cases:
3644 // 1. Error on read--this case is handled in 'obtainBuffer'.
3645 // 2. Track is stopping--as PassthruPatchRecord assumes continuous
3646 // output, this can only happen when the software patch
3647 // is being torn down. In this case, the RecordThread
3648 // will terminate and close the HAL stream.
3649 return 0;
3650 }
3651
3652 // As the buffer gets filled in obtainBuffer, here we only simulate data consumption.
getNextBuffer(AudioBufferProvider::Buffer * buffer)3653 status_t PassthruPatchRecord::getNextBuffer(
3654 AudioBufferProvider::Buffer* buffer)
3655 {
3656 buffer->frameCount = mLastReadFrames;
3657 buffer->raw = buffer->frameCount != 0 ? mStubBuffer.get() : nullptr;
3658 return NO_ERROR;
3659 }
3660
releaseBuffer(AudioBufferProvider::Buffer * buffer)3661 void PassthruPatchRecord::releaseBuffer(
3662 AudioBufferProvider::Buffer* buffer)
3663 {
3664 buffer->frameCount = 0;
3665 buffer->raw = nullptr;
3666 }
3667
3668 // ----------------------------------------------------------------------------
3669 #undef LOG_TAG
3670 #define LOG_TAG "AF::MmapTrack"
3671
3672 /* static */
create(IAfThreadBase * thread,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,audio_session_t sessionId,bool isOut,const android::content::AttributionSourceState & attributionSource,pid_t creatorPid,audio_port_handle_t portId,float volume,bool muted)3673 sp<IAfMmapTrack> IAfMmapTrack::create(IAfThreadBase* thread,
3674 const audio_attributes_t& attr,
3675 uint32_t sampleRate,
3676 audio_format_t format,
3677 audio_channel_mask_t channelMask,
3678 audio_session_t sessionId,
3679 bool isOut,
3680 const android::content::AttributionSourceState& attributionSource,
3681 pid_t creatorPid,
3682 audio_port_handle_t portId,
3683 float volume,
3684 bool muted)
3685 {
3686 return sp<MmapTrack>::make(
3687 thread,
3688 attr,
3689 sampleRate,
3690 format,
3691 channelMask,
3692 sessionId,
3693 isOut,
3694 attributionSource,
3695 creatorPid,
3696 portId,
3697 volume,
3698 muted);
3699 }
3700
MmapTrack(IAfThreadBase * thread,const audio_attributes_t & attr,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,audio_session_t sessionId,bool isOut,const AttributionSourceState & attributionSource,pid_t creatorPid,audio_port_handle_t portId,float volume,bool muted)3701 MmapTrack::MmapTrack(IAfThreadBase* thread,
3702 const audio_attributes_t& attr,
3703 uint32_t sampleRate,
3704 audio_format_t format,
3705 audio_channel_mask_t channelMask,
3706 audio_session_t sessionId,
3707 bool isOut,
3708 const AttributionSourceState& attributionSource,
3709 pid_t creatorPid,
3710 audio_port_handle_t portId,
3711 float volume,
3712 bool muted)
3713 : TrackBase(thread, NULL, attr, sampleRate, format,
3714 channelMask, (size_t)0 /* frameCount */,
3715 nullptr /* buffer */, (size_t)0 /* bufferSize */,
3716 sessionId, creatorPid,
3717 VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid)),
3718 isOut,
3719 ALLOC_NONE,
3720 TYPE_DEFAULT, portId,
3721 std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_MMAP) + std::to_string(portId)),
3722 mPid(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.pid))),
3723 mUid(VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid))),
3724 mSilenced(false), mSilencedNotified(false), mVolume(volume)
3725 {
3726 mMutedFromPort = muted;
3727 // Once this item is logged by the server, the client can add properties.
3728 mTrackMetrics.logConstructor(creatorPid, uid(), id());
3729 if (isOut && (attr.usage == AUDIO_USAGE_CALL_ASSISTANT
3730 || attr.usage == AUDIO_USAGE_VIRTUAL_SOURCE)) {
3731 // Audio patch and call assistant volume are always max
3732 mVolume = 1.0f;
3733 mMutedFromPort = false;
3734 }
3735 }
3736
~MmapTrack()3737 MmapTrack::~MmapTrack()
3738 {
3739 }
3740
initCheck() const3741 status_t MmapTrack::initCheck() const
3742 {
3743 return NO_ERROR;
3744 }
3745
start(AudioSystem::sync_event_t event __unused,audio_session_t triggerSession __unused)3746 status_t MmapTrack::start(AudioSystem::sync_event_t event __unused,
3747 audio_session_t triggerSession __unused)
3748 {
3749 if (ATRACE_ENABLED()) [[unlikely]] {
3750 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
3751 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_START)
3752 .toTrace().c_str());
3753 }
3754 return NO_ERROR;
3755 }
3756
stop()3757 void MmapTrack::stop()
3758 {
3759 if (ATRACE_ENABLED()) [[unlikely]] {
3760 ATRACE_INSTANT_FOR_TRACK(mTraceActionId.c_str(), audio_utils::trace::Object{}
3761 .set(AUDIO_TRACE_OBJECT_KEY_EVENT, AUDIO_TRACE_EVENT_STOP)
3762 .toTrace().c_str());
3763 }
3764 }
3765
3766 // AudioBufferProvider interface
getNextBuffer(AudioBufferProvider::Buffer * buffer)3767 status_t MmapTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer)
3768 {
3769 buffer->frameCount = 0;
3770 buffer->raw = nullptr;
3771 return INVALID_OPERATION;
3772 }
3773
3774 // ExtendedAudioBufferProvider interface
framesReady() const3775 size_t MmapTrack::framesReady() const {
3776 return 0;
3777 }
3778
framesReleased() const3779 int64_t MmapTrack::framesReleased() const
3780 {
3781 return 0;
3782 }
3783
onTimestamp(const ExtendedTimestamp & timestamp __unused)3784 void MmapTrack::onTimestamp(const ExtendedTimestamp& timestamp __unused)
3785 {
3786 }
3787
processMuteEvent_l(const sp<IAudioManager> & audioManager,mute_state_t muteState)3788 void MmapTrack::processMuteEvent_l(const sp<IAudioManager>& audioManager, mute_state_t muteState)
3789 {
3790 if (mMuteState == muteState) {
3791 // mute state did not change, do nothing
3792 return;
3793 }
3794
3795 status_t result = UNKNOWN_ERROR;
3796 if (audioManager && mPortId != AUDIO_PORT_HANDLE_NONE) {
3797 if (mMuteEventExtras == nullptr) {
3798 mMuteEventExtras = std::make_unique<os::PersistableBundle>();
3799 }
3800 mMuteEventExtras->putInt(String16(kExtraPlayerEventMuteKey),
3801 static_cast<int>(muteState));
3802
3803 result = audioManager->portEvent(mPortId,
3804 PLAYER_UPDATE_MUTED,
3805 mMuteEventExtras);
3806 }
3807
3808 if (result == OK) {
3809 ALOGI("%s(%d): processed mute state for port ID %d from %d to %d", __func__, id(), mPortId,
3810 static_cast<int>(mMuteState), static_cast<int>(muteState));
3811 mMuteState = muteState;
3812 } else {
3813 ALOGW("%s(%d): cannot process mute state for port ID %d, status error %d",
3814 __func__,
3815 id(),
3816 mPortId,
3817 result);
3818 }
3819 }
3820
appendDumpHeader(String8 & result) const3821 void MmapTrack::appendDumpHeader(String8& result) const
3822 {
3823 const auto res = IAfMmapTrack::getLogHeader();
3824 result.append(res.data(), res.size());
3825 }
3826
appendDump(String8 & result,bool active __unused) const3827 void MmapTrack::appendDump(String8& result, bool active __unused) const
3828 {
3829 result.appendFormat("%7u/%7u %7u %7u %08X %08X %6u 0x%03X ",
3830 mPid,
3831 mUid,
3832 mSessionId,
3833 mPortId,
3834 mFormat,
3835 mChannelMask,
3836 mSampleRate,
3837 mAttr.flags);
3838 if (isOut()) {
3839 result.appendFormat("%4x %2x", mAttr.usage, mAttr.content_type);
3840 result.appendFormat("%11.2g", 20.0 * log10(mVolume));
3841 result.appendFormat("%12s", mMutedFromPort ? "true" : "false");
3842 } else {
3843 result.appendFormat("%7x", mAttr.source);
3844 }
3845 result.append("\n");
3846 }
3847
3848 } // namespace android
3849