1 /*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <inttypes.h>
18 #include <pwd.h>
19 #include <sys/types.h>
20
21 #define LOG_TAG "BufferQueueConsumer"
22 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
23 //#define LOG_NDEBUG 0
24
25 #if DEBUG_ONLY_CODE
26 #define VALIDATE_CONSISTENCY() do { mCore->validateConsistencyLocked(); } while (0)
27 #else
28 #define VALIDATE_CONSISTENCY()
29 #endif
30
31 #define EGL_EGLEXT_PROTOTYPES
32 #include <EGL/egl.h>
33 #include <EGL/eglext.h>
34
35 #include <gui/BufferItem.h>
36 #include <gui/BufferQueueConsumer.h>
37 #include <gui/BufferQueueCore.h>
38 #include <gui/IConsumerListener.h>
39 #include <gui/IProducerListener.h>
40 #include <gui/TraceUtils.h>
41
42 #include <private/gui/BufferQueueThreadState.h>
43 #if !defined(__ANDROID_VNDK__) && !defined(NO_BINDER)
44 #include <binder/PermissionCache.h>
45 #endif
46
47 #include <system/window.h>
48
49 #include <com_android_graphics_libgui_flags.h>
50
51 namespace android {
52
53 // Macros for include BufferQueueCore information in log messages
54 #define BQ_LOGV(x, ...) \
55 ALOGV("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.c_str(), \
56 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
57 ##__VA_ARGS__)
58 #define BQ_LOGD(x, ...) \
59 ALOGD("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.c_str(), \
60 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
61 ##__VA_ARGS__)
62 #define BQ_LOGI(x, ...) \
63 ALOGI("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.c_str(), \
64 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
65 ##__VA_ARGS__)
66 #define BQ_LOGW(x, ...) \
67 ALOGW("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.c_str(), \
68 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
69 ##__VA_ARGS__)
70 #define BQ_LOGE(x, ...) \
71 ALOGE("[%s](id:%" PRIx64 ",api:%d,p:%d,c:%" PRIu64 ") " x, mConsumerName.c_str(), \
72 mCore->mUniqueId, mCore->mConnectedApi, mCore->mConnectedPid, (mCore->mUniqueId) >> 32, \
73 ##__VA_ARGS__)
74
75 ConsumerListener::~ConsumerListener() = default;
76
BufferQueueConsumer(const sp<BufferQueueCore> & core)77 BufferQueueConsumer::BufferQueueConsumer(const sp<BufferQueueCore>& core) :
78 mCore(core),
79 mSlots(core->mSlots),
80 mConsumerName() {}
81
~BufferQueueConsumer()82 BufferQueueConsumer::~BufferQueueConsumer() {}
83
acquireBuffer(BufferItem * outBuffer,nsecs_t expectedPresent,uint64_t maxFrameNumber)84 status_t BufferQueueConsumer::acquireBuffer(BufferItem* outBuffer,
85 nsecs_t expectedPresent, uint64_t maxFrameNumber) {
86 ATRACE_CALL();
87
88 int numDroppedBuffers = 0;
89 sp<IProducerListener> listener;
90 {
91 std::unique_lock<std::mutex> lock(mCore->mMutex);
92
93 // Check that the consumer doesn't currently have the maximum number of
94 // buffers acquired. We allow the max buffer count to be exceeded by one
95 // buffer so that the consumer can successfully set up the newly acquired
96 // buffer before releasing the old one.
97 int numAcquiredBuffers = 0;
98 for (int s : mCore->mActiveBuffers) {
99 if (mSlots[s].mBufferState.isAcquired()) {
100 ++numAcquiredBuffers;
101 }
102 }
103 const bool acquireNonDroppableBuffer = mCore->mAllowExtraAcquire &&
104 numAcquiredBuffers == mCore->mMaxAcquiredBufferCount + 1;
105 if (numAcquiredBuffers >= mCore->mMaxAcquiredBufferCount + 1 &&
106 !acquireNonDroppableBuffer) {
107 BQ_LOGE("acquireBuffer: max acquired buffer count reached: %d (max %d)",
108 numAcquiredBuffers, mCore->mMaxAcquiredBufferCount);
109 return INVALID_OPERATION;
110 }
111
112 bool sharedBufferAvailable = mCore->mSharedBufferMode &&
113 mCore->mAutoRefresh && mCore->mSharedBufferSlot !=
114 BufferQueueCore::INVALID_BUFFER_SLOT;
115
116 // In asynchronous mode the list is guaranteed to be one buffer deep,
117 // while in synchronous mode we use the oldest buffer.
118 if (mCore->mQueue.empty() && !sharedBufferAvailable) {
119 return NO_BUFFER_AVAILABLE;
120 }
121
122 BufferQueueCore::Fifo::iterator front(mCore->mQueue.begin());
123
124 // If expectedPresent is specified, we may not want to return a buffer yet.
125 // If it's specified and there's more than one buffer queued, we may want
126 // to drop a buffer.
127 // Skip this if we're in shared buffer mode and the queue is empty,
128 // since in that case we'll just return the shared buffer.
129 if (expectedPresent != 0 && !mCore->mQueue.empty()) {
130 // The 'expectedPresent' argument indicates when the buffer is expected
131 // to be presented on-screen. If the buffer's desired present time is
132 // earlier (less) than expectedPresent -- meaning it will be displayed
133 // on time or possibly late if we show it as soon as possible -- we
134 // acquire and return it. If we don't want to display it until after the
135 // expectedPresent time, we return PRESENT_LATER without acquiring it.
136 //
137 // To be safe, we don't defer acquisition if expectedPresent is more
138 // than one second in the future beyond the desired present time
139 // (i.e., we'd be holding the buffer for a long time).
140 //
141 // NOTE: Code assumes monotonic time values from the system clock
142 // are positive.
143
144 // Start by checking to see if we can drop frames. We skip this check if
145 // the timestamps are being auto-generated by Surface. If the app isn't
146 // generating timestamps explicitly, it probably doesn't want frames to
147 // be discarded based on them.
148 while (mCore->mQueue.size() > 1 && !mCore->mQueue[0].mIsAutoTimestamp) {
149 const BufferItem& bufferItem(mCore->mQueue[1]);
150
151 // If dropping entry[0] would leave us with a buffer that the
152 // consumer is not yet ready for, don't drop it.
153 if (maxFrameNumber && bufferItem.mFrameNumber > maxFrameNumber) {
154 break;
155 }
156
157 // If entry[1] is timely, drop entry[0] (and repeat). We apply an
158 // additional criterion here: we only drop the earlier buffer if our
159 // desiredPresent falls within +/- 1 second of the expected present.
160 // Otherwise, bogus desiredPresent times (e.g., 0 or a small
161 // relative timestamp), which normally mean "ignore the timestamp
162 // and acquire immediately", would cause us to drop frames.
163 //
164 // We may want to add an additional criterion: don't drop the
165 // earlier buffer if entry[1]'s fence hasn't signaled yet.
166 nsecs_t desiredPresent = bufferItem.mTimestamp;
167 if (desiredPresent < expectedPresent - MAX_REASONABLE_NSEC ||
168 desiredPresent > expectedPresent) {
169 // This buffer is set to display in the near future, or
170 // desiredPresent is garbage. Either way we don't want to drop
171 // the previous buffer just to get this on the screen sooner.
172 BQ_LOGV("acquireBuffer: nodrop desire=%" PRId64 " expect=%"
173 PRId64 " (%" PRId64 ") now=%" PRId64,
174 desiredPresent, expectedPresent,
175 desiredPresent - expectedPresent,
176 systemTime(CLOCK_MONOTONIC));
177 break;
178 }
179
180 BQ_LOGV("acquireBuffer: drop desire=%" PRId64 " expect=%" PRId64
181 " size=%zu",
182 desiredPresent, expectedPresent, mCore->mQueue.size());
183
184 if (!front->mIsStale) {
185 // Front buffer is still in mSlots, so mark the slot as free
186 mSlots[front->mSlot].mBufferState.freeQueued();
187
188 // After leaving shared buffer mode, the shared buffer will
189 // still be around. Mark it as no longer shared if this
190 // operation causes it to be free.
191 if (!mCore->mSharedBufferMode &&
192 mSlots[front->mSlot].mBufferState.isFree()) {
193 mSlots[front->mSlot].mBufferState.mShared = false;
194 }
195
196 // Don't put the shared buffer on the free list
197 if (!mSlots[front->mSlot].mBufferState.isShared()) {
198 mCore->mActiveBuffers.erase(front->mSlot);
199 mCore->mFreeBuffers.push_back(front->mSlot);
200 }
201
202 if (mCore->mBufferReleasedCbEnabled) {
203 listener = mCore->mConnectedProducerListener;
204 }
205 ++numDroppedBuffers;
206 }
207
208 mCore->mQueue.erase(front);
209 front = mCore->mQueue.begin();
210 }
211
212 // See if the front buffer is ready to be acquired
213 nsecs_t desiredPresent = front->mTimestamp;
214 bool bufferIsDue = desiredPresent <= expectedPresent ||
215 desiredPresent > expectedPresent + MAX_REASONABLE_NSEC;
216 bool consumerIsReady = maxFrameNumber > 0 ?
217 front->mFrameNumber <= maxFrameNumber : true;
218 if (!bufferIsDue || !consumerIsReady) {
219 BQ_LOGV("acquireBuffer: defer desire=%" PRId64 " expect=%" PRId64
220 " (%" PRId64 ") now=%" PRId64 " frame=%" PRIu64
221 " consumer=%" PRIu64,
222 desiredPresent, expectedPresent,
223 desiredPresent - expectedPresent,
224 systemTime(CLOCK_MONOTONIC),
225 front->mFrameNumber, maxFrameNumber);
226 ATRACE_NAME("PRESENT_LATER");
227 return PRESENT_LATER;
228 }
229
230 BQ_LOGV("acquireBuffer: accept desire=%" PRId64 " expect=%" PRId64 " "
231 "(%" PRId64 ") now=%" PRId64, desiredPresent, expectedPresent,
232 desiredPresent - expectedPresent,
233 systemTime(CLOCK_MONOTONIC));
234 }
235
236 int slot = BufferQueueCore::INVALID_BUFFER_SLOT;
237
238 if (sharedBufferAvailable && mCore->mQueue.empty()) {
239 // make sure the buffer has finished allocating before acquiring it
240 mCore->waitWhileAllocatingLocked(lock);
241
242 slot = mCore->mSharedBufferSlot;
243
244 // Recreate the BufferItem for the shared buffer from the data that
245 // was cached when it was last queued.
246 outBuffer->mGraphicBuffer = mSlots[slot].mGraphicBuffer;
247 outBuffer->mFence = Fence::NO_FENCE;
248 outBuffer->mFenceTime = FenceTime::NO_FENCE;
249 outBuffer->mCrop = mCore->mSharedBufferCache.crop;
250 outBuffer->mTransform = mCore->mSharedBufferCache.transform &
251 ~static_cast<uint32_t>(
252 NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY);
253 outBuffer->mScalingMode = mCore->mSharedBufferCache.scalingMode;
254 outBuffer->mDataSpace = mCore->mSharedBufferCache.dataspace;
255 outBuffer->mFrameNumber = mCore->mFrameCounter;
256 outBuffer->mSlot = slot;
257 outBuffer->mAcquireCalled = mSlots[slot].mAcquireCalled;
258 outBuffer->mTransformToDisplayInverse =
259 (mCore->mSharedBufferCache.transform &
260 NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY) != 0;
261 outBuffer->mSurfaceDamage = Region::INVALID_REGION;
262 outBuffer->mQueuedBuffer = false;
263 outBuffer->mIsStale = false;
264 outBuffer->mAutoRefresh = mCore->mSharedBufferMode &&
265 mCore->mAutoRefresh;
266 } else if (acquireNonDroppableBuffer && front->mIsDroppable) {
267 BQ_LOGV("acquireBuffer: front buffer is not droppable");
268 return NO_BUFFER_AVAILABLE;
269 } else {
270 slot = front->mSlot;
271 *outBuffer = *front;
272 }
273
274 ATRACE_BUFFER_INDEX(slot);
275
276 BQ_LOGV("acquireBuffer: acquiring { slot=%d/%" PRIu64 " buffer=%p }",
277 slot, outBuffer->mFrameNumber, outBuffer->mGraphicBuffer->handle);
278
279 if (!outBuffer->mIsStale) {
280 mSlots[slot].mAcquireCalled = true;
281 // Don't decrease the queue count if the BufferItem wasn't
282 // previously in the queue. This happens in shared buffer mode when
283 // the queue is empty and the BufferItem is created above.
284 if (mCore->mQueue.empty()) {
285 mSlots[slot].mBufferState.acquireNotInQueue();
286 } else {
287 mSlots[slot].mBufferState.acquire();
288 }
289 mSlots[slot].mFence = Fence::NO_FENCE;
290 }
291
292 // If the buffer has previously been acquired by the consumer, set
293 // mGraphicBuffer to NULL to avoid unnecessarily remapping this buffer
294 // on the consumer side
295 if (outBuffer->mAcquireCalled) {
296 outBuffer->mGraphicBuffer = nullptr;
297 }
298
299 mCore->mQueue.erase(front);
300
301 // We might have freed a slot while dropping old buffers, or the producer
302 // may be blocked waiting for the number of buffers in the queue to
303 // decrease.
304 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
305 mCore->notifyBufferReleased();
306 #else
307 mCore->mDequeueCondition.notify_all();
308 #endif
309
310 ATRACE_INT(mCore->mConsumerName.c_str(), static_cast<int32_t>(mCore->mQueue.size()));
311 #ifndef NO_BINDER
312 mCore->mOccupancyTracker.registerOccupancyChange(mCore->mQueue.size());
313 #endif
314 VALIDATE_CONSISTENCY();
315 }
316
317 if (listener != nullptr) {
318 for (int i = 0; i < numDroppedBuffers; ++i) {
319 listener->onBufferReleased();
320 }
321 }
322
323 return NO_ERROR;
324 }
325
detachBuffer(int slot)326 status_t BufferQueueConsumer::detachBuffer(int slot) {
327 ATRACE_CALL();
328 ATRACE_BUFFER_INDEX(slot);
329 BQ_LOGV("detachBuffer: slot %d", slot);
330 sp<IProducerListener> listener;
331 {
332 std::lock_guard<std::mutex> lock(mCore->mMutex);
333
334 if (mCore->mIsAbandoned) {
335 BQ_LOGE("detachBuffer: BufferQueue has been abandoned");
336 return NO_INIT;
337 }
338
339 if (mCore->mSharedBufferMode || slot == mCore->mSharedBufferSlot) {
340 BQ_LOGE("detachBuffer: detachBuffer not allowed in shared buffer mode");
341 return BAD_VALUE;
342 }
343
344 if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS) {
345 BQ_LOGE("detachBuffer: slot index %d out of range [0, %d)",
346 slot, BufferQueueDefs::NUM_BUFFER_SLOTS);
347 return BAD_VALUE;
348 } else if (!mSlots[slot].mBufferState.isAcquired()) {
349 BQ_LOGE("detachBuffer: slot %d is not owned by the consumer "
350 "(state = %s)", slot, mSlots[slot].mBufferState.string());
351 return BAD_VALUE;
352 }
353 if (mCore->mBufferReleasedCbEnabled) {
354 listener = mCore->mConnectedProducerListener;
355 }
356
357 mSlots[slot].mBufferState.detachConsumer();
358 mCore->mActiveBuffers.erase(slot);
359 mCore->mFreeSlots.insert(slot);
360 mCore->clearBufferSlotLocked(slot);
361 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
362 mCore->notifyBufferReleased();
363 #else
364 mCore->mDequeueCondition.notify_all();
365 #endif
366
367 VALIDATE_CONSISTENCY();
368 }
369
370 if (listener) {
371 listener->onBufferDetached(slot);
372 }
373 return NO_ERROR;
374 }
375
attachBuffer(int * outSlot,const sp<android::GraphicBuffer> & buffer)376 status_t BufferQueueConsumer::attachBuffer(int* outSlot,
377 const sp<android::GraphicBuffer>& buffer) {
378 ATRACE_CALL();
379
380 if (outSlot == nullptr) {
381 BQ_LOGE("attachBuffer: outSlot must not be NULL");
382 return BAD_VALUE;
383 } else if (buffer == nullptr) {
384 BQ_LOGE("attachBuffer: cannot attach NULL buffer");
385 return BAD_VALUE;
386 }
387
388 sp<IProducerListener> listener;
389 {
390 std::lock_guard<std::mutex> lock(mCore->mMutex);
391
392 if (mCore->mSharedBufferMode) {
393 BQ_LOGE("attachBuffer: cannot attach a buffer in shared buffer mode");
394 return BAD_VALUE;
395 }
396
397 // Make sure we don't have too many acquired buffers
398 int numAcquiredBuffers = 0;
399 for (int s : mCore->mActiveBuffers) {
400 if (mSlots[s].mBufferState.isAcquired()) {
401 ++numAcquiredBuffers;
402 }
403 }
404
405 if (numAcquiredBuffers >= mCore->mMaxAcquiredBufferCount + 1) {
406 BQ_LOGE("attachBuffer: max acquired buffer count reached: %d "
407 "(max %d)", numAcquiredBuffers,
408 mCore->mMaxAcquiredBufferCount);
409 return INVALID_OPERATION;
410 }
411
412 if (buffer->getGenerationNumber() != mCore->mGenerationNumber) {
413 BQ_LOGE("attachBuffer: generation number mismatch [buffer %u] "
414 "[queue %u]", buffer->getGenerationNumber(),
415 mCore->mGenerationNumber);
416 return BAD_VALUE;
417 }
418
419 // Find a free slot to put the buffer into
420 int found = BufferQueueCore::INVALID_BUFFER_SLOT;
421 if (!mCore->mFreeSlots.empty()) {
422 auto slot = mCore->mFreeSlots.begin();
423 found = *slot;
424 mCore->mFreeSlots.erase(slot);
425 } else if (!mCore->mFreeBuffers.empty()) {
426 found = mCore->mFreeBuffers.front();
427 mCore->mFreeBuffers.remove(found);
428 }
429 if (found == BufferQueueCore::INVALID_BUFFER_SLOT) {
430 BQ_LOGE("attachBuffer: could not find free buffer slot");
431 return NO_MEMORY;
432 }
433
434 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_CONSUMER_ATTACH_CALLBACK)
435 if (mCore->mBufferAttachedCbEnabled) {
436 listener = mCore->mConnectedProducerListener;
437 }
438 #endif
439
440 mCore->mActiveBuffers.insert(found);
441 *outSlot = found;
442 ATRACE_BUFFER_INDEX(*outSlot);
443 BQ_LOGV("attachBuffer: returning slot %d", *outSlot);
444
445 mSlots[*outSlot].mGraphicBuffer = buffer;
446 mSlots[*outSlot].mBufferState.attachConsumer();
447 mSlots[*outSlot].mNeedsReallocation = true;
448 mSlots[*outSlot].mFence = Fence::NO_FENCE;
449 mSlots[*outSlot].mFrameNumber = 0;
450
451 // mAcquireCalled tells BufferQueue that it doesn't need to send a valid
452 // GraphicBuffer pointer on the next acquireBuffer call, which decreases
453 // Binder traffic by not un/flattening the GraphicBuffer. However, it
454 // requires that the consumer maintain a cached copy of the slot <--> buffer
455 // mappings, which is why the consumer doesn't need the valid pointer on
456 // acquire.
457 //
458 // The StreamSplitter is one of the primary users of the attach/detach
459 // logic, and while it is running, all buffers it acquires are immediately
460 // detached, and all buffers it eventually releases are ones that were
461 // attached (as opposed to having been obtained from acquireBuffer), so it
462 // doesn't make sense to maintain the slot/buffer mappings, which would
463 // become invalid for every buffer during detach/attach. By setting this to
464 // false, the valid GraphicBuffer pointer will always be sent with acquire
465 // for attached buffers.
466 mSlots[*outSlot].mAcquireCalled = false;
467
468 VALIDATE_CONSISTENCY();
469 }
470
471 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_CONSUMER_ATTACH_CALLBACK)
472 if (listener != nullptr) {
473 listener->onBufferAttached();
474 }
475 #endif
476
477 return NO_ERROR;
478 }
479
releaseBuffer(int slot,uint64_t frameNumber,const sp<Fence> & releaseFence,EGLDisplay eglDisplay,EGLSyncKHR eglFence)480 status_t BufferQueueConsumer::releaseBuffer(int slot, uint64_t frameNumber,
481 const sp<Fence>& releaseFence, EGLDisplay eglDisplay,
482 EGLSyncKHR eglFence) {
483 ATRACE_CALL();
484 ATRACE_BUFFER_INDEX(slot);
485
486 if (slot < 0 || slot >= BufferQueueDefs::NUM_BUFFER_SLOTS ||
487 releaseFence == nullptr) {
488 BQ_LOGE("releaseBuffer: slot %d out of range or fence %p NULL", slot,
489 releaseFence.get());
490 return BAD_VALUE;
491 }
492
493 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_GL_FENCE_CLEANUP)
494 if (eglFence != EGL_NO_SYNC_KHR) {
495 // Most platforms will be using native fences, so it's unlikely that we'll ever have to
496 // process an eglFence. Ideally we can remove this code eventually. In the mean time, do our
497 // best to wait for it so the buffer stays valid, otherwise return an error to the caller.
498 //
499 // EGL_SYNC_FLUSH_COMMANDS_BIT_KHR so that we don't wait forever on a fence that hasn't
500 // shown up on the GPU yet.
501 EGLint result = eglClientWaitSyncKHR(eglDisplay, eglFence, EGL_SYNC_FLUSH_COMMANDS_BIT_KHR,
502 1000000000);
503 if (result == EGL_FALSE) {
504 BQ_LOGE("releaseBuffer: error %#x waiting for fence", eglGetError());
505 return UNKNOWN_ERROR;
506 } else if (result == EGL_TIMEOUT_EXPIRED_KHR) {
507 BQ_LOGE("releaseBuffer: timeout waiting for fence");
508 return UNKNOWN_ERROR;
509 }
510 eglDestroySyncKHR(eglDisplay, eglFence);
511 }
512 #endif
513
514 sp<IProducerListener> listener;
515 { // Autolock scope
516 std::lock_guard<std::mutex> lock(mCore->mMutex);
517
518 // If the frame number has changed because the buffer has been reallocated,
519 // we can ignore this releaseBuffer for the old buffer.
520 // Ignore this for the shared buffer where the frame number can easily
521 // get out of sync due to the buffer being queued and acquired at the
522 // same time.
523 if (frameNumber != mSlots[slot].mFrameNumber &&
524 !mSlots[slot].mBufferState.isShared()) {
525 return STALE_BUFFER_SLOT;
526 }
527
528 if (!mSlots[slot].mBufferState.isAcquired()) {
529 BQ_LOGE("releaseBuffer: attempted to release buffer slot %d "
530 "but its state was %s", slot,
531 mSlots[slot].mBufferState.string());
532 return BAD_VALUE;
533 }
534
535 #if !COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BQ_GL_FENCE_CLEANUP)
536 mSlots[slot].mEglDisplay = eglDisplay;
537 mSlots[slot].mEglFence = eglFence;
538 #endif
539 mSlots[slot].mFence = releaseFence;
540 mSlots[slot].mBufferState.release();
541
542 // After leaving shared buffer mode, the shared buffer will
543 // still be around. Mark it as no longer shared if this
544 // operation causes it to be free.
545 if (!mCore->mSharedBufferMode && mSlots[slot].mBufferState.isFree()) {
546 mSlots[slot].mBufferState.mShared = false;
547 }
548 // Don't put the shared buffer on the free list.
549 if (!mSlots[slot].mBufferState.isShared()) {
550 mCore->mActiveBuffers.erase(slot);
551 mCore->mFreeBuffers.push_back(slot);
552 }
553
554 if (mCore->mBufferReleasedCbEnabled) {
555 listener = mCore->mConnectedProducerListener;
556 }
557 BQ_LOGV("releaseBuffer: releasing slot %d", slot);
558
559 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
560 mCore->notifyBufferReleased();
561 #else
562 mCore->mDequeueCondition.notify_all();
563 #endif
564
565 VALIDATE_CONSISTENCY();
566 } // Autolock scope
567
568 // Call back without lock held
569 if (listener != nullptr) {
570 listener->onBufferReleased();
571 }
572
573 return NO_ERROR;
574 }
575
connect(const sp<IConsumerListener> & consumerListener,bool controlledByApp)576 status_t BufferQueueConsumer::connect(
577 const sp<IConsumerListener>& consumerListener, bool controlledByApp) {
578 ATRACE_CALL();
579
580 if (consumerListener == nullptr) {
581 BQ_LOGE("connect: consumerListener may not be NULL");
582 return BAD_VALUE;
583 }
584
585 BQ_LOGV("connect: controlledByApp=%s",
586 controlledByApp ? "true" : "false");
587
588 std::lock_guard<std::mutex> lock(mCore->mMutex);
589
590 if (mCore->mIsAbandoned) {
591 BQ_LOGE("connect: BufferQueue has been abandoned");
592 return NO_INIT;
593 }
594
595 mCore->mConsumerListener = consumerListener;
596 mCore->mConsumerControlledByApp = controlledByApp;
597
598 return NO_ERROR;
599 }
600
disconnect()601 status_t BufferQueueConsumer::disconnect() {
602 ATRACE_CALL();
603
604 BQ_LOGV("disconnect");
605
606 std::lock_guard<std::mutex> lock(mCore->mMutex);
607
608 if (mCore->mConsumerListener == nullptr) {
609 BQ_LOGE("disconnect: no consumer is connected");
610 return BAD_VALUE;
611 }
612
613 mCore->mIsAbandoned = true;
614 mCore->mConsumerListener = nullptr;
615 mCore->mQueue.clear();
616 mCore->freeAllBuffersLocked();
617 mCore->mSharedBufferSlot = BufferQueueCore::INVALID_BUFFER_SLOT;
618 #if COM_ANDROID_GRAPHICS_LIBGUI_FLAGS(BUFFER_RELEASE_CHANNEL)
619 mCore->notifyBufferReleased();
620 #else
621 mCore->mDequeueCondition.notify_all();
622 #endif
623 return NO_ERROR;
624 }
625
getReleasedBuffers(uint64_t * outSlotMask)626 status_t BufferQueueConsumer::getReleasedBuffers(uint64_t *outSlotMask) {
627 ATRACE_CALL();
628
629 if (outSlotMask == nullptr) {
630 BQ_LOGE("getReleasedBuffers: outSlotMask may not be NULL");
631 return BAD_VALUE;
632 }
633
634 std::lock_guard<std::mutex> lock(mCore->mMutex);
635
636 if (mCore->mIsAbandoned) {
637 BQ_LOGE("getReleasedBuffers: BufferQueue has been abandoned");
638 return NO_INIT;
639 }
640
641 uint64_t mask = 0;
642 for (int s = 0; s < BufferQueueDefs::NUM_BUFFER_SLOTS; ++s) {
643 if (!mSlots[s].mAcquireCalled) {
644 mask |= (1ULL << s);
645 }
646 }
647
648 // Remove from the mask queued buffers for which acquire has been called,
649 // since the consumer will not receive their buffer addresses and so must
650 // retain their cached information
651 BufferQueueCore::Fifo::iterator current(mCore->mQueue.begin());
652 while (current != mCore->mQueue.end()) {
653 if (current->mAcquireCalled) {
654 mask &= ~(1ULL << current->mSlot);
655 }
656 ++current;
657 }
658
659 BQ_LOGV("getReleasedBuffers: returning mask %#" PRIx64, mask);
660 *outSlotMask = mask;
661 return NO_ERROR;
662 }
663
setDefaultBufferSize(uint32_t width,uint32_t height)664 status_t BufferQueueConsumer::setDefaultBufferSize(uint32_t width,
665 uint32_t height) {
666 ATRACE_CALL();
667
668 if (width == 0 || height == 0) {
669 BQ_LOGV("setDefaultBufferSize: dimensions cannot be 0 (width=%u "
670 "height=%u)", width, height);
671 return BAD_VALUE;
672 }
673
674 BQ_LOGV("setDefaultBufferSize: width=%u height=%u", width, height);
675
676 std::lock_guard<std::mutex> lock(mCore->mMutex);
677 mCore->mDefaultWidth = width;
678 mCore->mDefaultHeight = height;
679 return NO_ERROR;
680 }
681
setMaxBufferCount(int bufferCount)682 status_t BufferQueueConsumer::setMaxBufferCount(int bufferCount) {
683 ATRACE_CALL();
684
685 if (bufferCount < 1 || bufferCount > BufferQueueDefs::NUM_BUFFER_SLOTS) {
686 BQ_LOGE("setMaxBufferCount: invalid count %d", bufferCount);
687 return BAD_VALUE;
688 }
689
690 std::lock_guard<std::mutex> lock(mCore->mMutex);
691
692 if (mCore->mConnectedApi != BufferQueueCore::NO_CONNECTED_API) {
693 BQ_LOGE("setMaxBufferCount: producer is already connected");
694 return INVALID_OPERATION;
695 }
696
697 if (bufferCount < mCore->mMaxAcquiredBufferCount) {
698 BQ_LOGE("setMaxBufferCount: invalid buffer count (%d) less than"
699 "mMaxAcquiredBufferCount (%d)", bufferCount,
700 mCore->mMaxAcquiredBufferCount);
701 return BAD_VALUE;
702 }
703
704 int delta = mCore->getMaxBufferCountLocked(mCore->mAsyncMode,
705 mCore->mDequeueBufferCannotBlock, bufferCount) -
706 mCore->getMaxBufferCountLocked();
707 if (!mCore->adjustAvailableSlotsLocked(delta)) {
708 BQ_LOGE("setMaxBufferCount: BufferQueue failed to adjust the number of "
709 "available slots. Delta = %d", delta);
710 return BAD_VALUE;
711 }
712
713 mCore->mMaxBufferCount = bufferCount;
714 return NO_ERROR;
715 }
716
setMaxAcquiredBufferCount(int maxAcquiredBuffers)717 status_t BufferQueueConsumer::setMaxAcquiredBufferCount(
718 int maxAcquiredBuffers) {
719 ATRACE_FORMAT("%s(%d)", __func__, maxAcquiredBuffers);
720
721 if (maxAcquiredBuffers < 1 ||
722 maxAcquiredBuffers > BufferQueueCore::MAX_MAX_ACQUIRED_BUFFERS) {
723 BQ_LOGE("setMaxAcquiredBufferCount: invalid count %d",
724 maxAcquiredBuffers);
725 return BAD_VALUE;
726 }
727
728 sp<IConsumerListener> listener;
729 { // Autolock scope
730 std::unique_lock<std::mutex> lock(mCore->mMutex);
731 mCore->waitWhileAllocatingLocked(lock);
732
733 if (mCore->mIsAbandoned) {
734 BQ_LOGE("setMaxAcquiredBufferCount: consumer is abandoned");
735 return NO_INIT;
736 }
737
738 if (maxAcquiredBuffers == mCore->mMaxAcquiredBufferCount) {
739 return NO_ERROR;
740 }
741
742 // The new maxAcquiredBuffers count should not be violated by the number
743 // of currently acquired buffers
744 int acquiredCount = 0;
745 for (int slot : mCore->mActiveBuffers) {
746 if (mSlots[slot].mBufferState.isAcquired()) {
747 acquiredCount++;
748 }
749 }
750 if (acquiredCount > maxAcquiredBuffers) {
751 BQ_LOGE("setMaxAcquiredBufferCount: the requested maxAcquiredBuffer"
752 "count (%d) exceeds the current acquired buffer count (%d)",
753 maxAcquiredBuffers, acquiredCount);
754 return BAD_VALUE;
755 }
756
757 if ((maxAcquiredBuffers + mCore->mMaxDequeuedBufferCount +
758 (mCore->mAsyncMode || mCore->mDequeueBufferCannotBlock ? 1 : 0))
759 > mCore->mMaxBufferCount) {
760 BQ_LOGE("setMaxAcquiredBufferCount: %d acquired buffers would "
761 "exceed the maxBufferCount (%d) (maxDequeued %d async %d)",
762 maxAcquiredBuffers, mCore->mMaxBufferCount,
763 mCore->mMaxDequeuedBufferCount, mCore->mAsyncMode ||
764 mCore->mDequeueBufferCannotBlock);
765 return BAD_VALUE;
766 }
767
768 int delta = maxAcquiredBuffers - mCore->mMaxAcquiredBufferCount;
769 if (!mCore->adjustAvailableSlotsLocked(delta)) {
770 return BAD_VALUE;
771 }
772
773 BQ_LOGV("setMaxAcquiredBufferCount: %d", maxAcquiredBuffers);
774 mCore->mMaxAcquiredBufferCount = maxAcquiredBuffers;
775 VALIDATE_CONSISTENCY();
776 if (delta < 0 && mCore->mBufferReleasedCbEnabled) {
777 listener = mCore->mConsumerListener;
778 }
779 }
780 // Call back without lock held
781 if (listener != nullptr) {
782 listener->onBuffersReleased();
783 }
784
785 return NO_ERROR;
786 }
787
setConsumerName(const String8 & name)788 status_t BufferQueueConsumer::setConsumerName(const String8& name) {
789 ATRACE_CALL();
790 BQ_LOGV("setConsumerName: '%s'", name.c_str());
791 std::lock_guard<std::mutex> lock(mCore->mMutex);
792 mCore->mConsumerName = name;
793 mConsumerName = name;
794 return NO_ERROR;
795 }
796
setDefaultBufferFormat(PixelFormat defaultFormat)797 status_t BufferQueueConsumer::setDefaultBufferFormat(PixelFormat defaultFormat) {
798 ATRACE_CALL();
799 BQ_LOGV("setDefaultBufferFormat: %u", defaultFormat);
800 std::lock_guard<std::mutex> lock(mCore->mMutex);
801 mCore->mDefaultBufferFormat = defaultFormat;
802 return NO_ERROR;
803 }
804
setDefaultBufferDataSpace(android_dataspace defaultDataSpace)805 status_t BufferQueueConsumer::setDefaultBufferDataSpace(
806 android_dataspace defaultDataSpace) {
807 ATRACE_CALL();
808 BQ_LOGV("setDefaultBufferDataSpace: %u", defaultDataSpace);
809 std::lock_guard<std::mutex> lock(mCore->mMutex);
810 mCore->mDefaultBufferDataSpace = defaultDataSpace;
811 return NO_ERROR;
812 }
813
setConsumerUsageBits(uint64_t usage)814 status_t BufferQueueConsumer::setConsumerUsageBits(uint64_t usage) {
815 ATRACE_CALL();
816 BQ_LOGV("setConsumerUsageBits: %#" PRIx64, usage);
817 std::lock_guard<std::mutex> lock(mCore->mMutex);
818 mCore->mConsumerUsageBits = usage;
819 return NO_ERROR;
820 }
821
setConsumerIsProtected(bool isProtected)822 status_t BufferQueueConsumer::setConsumerIsProtected(bool isProtected) {
823 ATRACE_CALL();
824 BQ_LOGV("setConsumerIsProtected: %s", isProtected ? "true" : "false");
825 std::lock_guard<std::mutex> lock(mCore->mMutex);
826 mCore->mConsumerIsProtected = isProtected;
827 return NO_ERROR;
828 }
829
setTransformHint(uint32_t hint)830 status_t BufferQueueConsumer::setTransformHint(uint32_t hint) {
831 ATRACE_CALL();
832 BQ_LOGV("setTransformHint: %#x", hint);
833 std::lock_guard<std::mutex> lock(mCore->mMutex);
834 mCore->mTransformHint = hint;
835 return NO_ERROR;
836 }
837
getSidebandStream(sp<NativeHandle> * outStream) const838 status_t BufferQueueConsumer::getSidebandStream(sp<NativeHandle>* outStream) const {
839 std::lock_guard<std::mutex> lock(mCore->mMutex);
840 *outStream = mCore->mSidebandStream;
841 return NO_ERROR;
842 }
843
getOccupancyHistory(bool forceFlush,std::vector<OccupancyTracker::Segment> * outHistory)844 status_t BufferQueueConsumer::getOccupancyHistory(bool forceFlush,
845 std::vector<OccupancyTracker::Segment>* outHistory) {
846 std::lock_guard<std::mutex> lock(mCore->mMutex);
847 #ifndef NO_BINDER
848 *outHistory = mCore->mOccupancyTracker.getSegmentHistory(forceFlush);
849 #else
850 (void)forceFlush;
851 outHistory->clear();
852 #endif
853 return NO_ERROR;
854 }
855
discardFreeBuffers()856 status_t BufferQueueConsumer::discardFreeBuffers() {
857 std::lock_guard<std::mutex> lock(mCore->mMutex);
858 mCore->discardFreeBuffersLocked();
859 return NO_ERROR;
860 }
861
dumpState(const String8 & prefix,String8 * outResult) const862 status_t BufferQueueConsumer::dumpState(const String8& prefix, String8* outResult) const {
863 struct passwd* pwd = getpwnam("shell");
864 uid_t shellUid = pwd ? pwd->pw_uid : 0;
865 if (!shellUid) {
866 int savedErrno = errno;
867 BQ_LOGE("Cannot get AID_SHELL");
868 return savedErrno ? -savedErrno : UNKNOWN_ERROR;
869 }
870
871 bool denied = false;
872 const uid_t uid = BufferQueueThreadState::getCallingUid();
873 #if !defined(__ANDROID_VNDK__) && !defined(NO_BINDER)
874 // permission check can't be done for vendors as vendors have no access to
875 // the PermissionController.
876 const pid_t pid = BufferQueueThreadState::getCallingPid();
877 if ((uid != shellUid) &&
878 !PermissionCache::checkPermission(String16("android.permission.DUMP"), pid, uid)) {
879 outResult->appendFormat("Permission Denial: can't dump BufferQueueConsumer "
880 "from pid=%d, uid=%d\n",
881 pid, uid);
882 denied = true;
883 }
884 #else
885 if (uid != shellUid) {
886 denied = true;
887 }
888 #endif
889 if (denied) {
890 android_errorWriteWithInfoLog(0x534e4554, "27046057",
891 static_cast<int32_t>(uid), nullptr, 0);
892 return PERMISSION_DENIED;
893 }
894
895 mCore->dumpState(prefix, outResult);
896 return NO_ERROR;
897 }
898
setAllowExtraAcquire(bool allow)899 void BufferQueueConsumer::setAllowExtraAcquire(bool allow) {
900 std::lock_guard<std::mutex> lock(mCore->mMutex);
901 mCore->mAllowExtraAcquire = allow;
902 }
903
904 } // namespace android
905