1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "hw-Parcel"
18 //#define LOG_NDEBUG 0
19
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <inttypes.h>
23 #include <pthread.h>
24 #include <stdint.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <sys/mman.h>
28 #include <sys/stat.h>
29 #include <sys/types.h>
30 #include <sys/resource.h>
31 #include <unistd.h>
32
33 #include <hwbinder/Binder.h>
34 #include <hwbinder/BpHwBinder.h>
35 #include <hwbinder/IPCThreadState.h>
36 #include <hwbinder/Parcel.h>
37 #include <hwbinder/ProcessState.h>
38
39 #include <cutils/ashmem.h>
40 #include <utils/Log.h>
41 #include <utils/misc.h>
42 #include <utils/String8.h>
43 #include <utils/String16.h>
44
45 #include "binder_kernel.h"
46 #include <hwbinder/Static.h>
47 #include "TextOutput.h"
48 #include "Utils.h"
49
50 #include <atomic>
51
52 #define LOG_REFS(...)
53 //#define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
54 #define LOG_ALLOC(...)
55 //#define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
56 #define LOG_BUFFER(...)
57 // #define LOG_BUFFER(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
58
59 // ---------------------------------------------------------------------------
60
61 // This macro should never be used at runtime, as a too large value
62 // of s could cause an integer overflow. Instead, you should always
63 // use the wrapper function pad_size()
64 #define PAD_SIZE_UNSAFE(s) (((s)+3)&~3)
65
pad_size(size_t s)66 static size_t pad_size(size_t s) {
67 if (s > (std::numeric_limits<size_t>::max() - 3)) {
68 LOG_ALWAYS_FATAL("pad size too big %zu", s);
69 }
70 return PAD_SIZE_UNSAFE(s);
71 }
72
73 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
74 #define STRICT_MODE_PENALTY_GATHER (0x40 << 16)
75
76 namespace android {
77 namespace hardware {
78
79 static std::atomic<size_t> gParcelGlobalAllocCount;
80 static std::atomic<size_t> gParcelGlobalAllocSize;
81
82 static size_t gMaxFds = 0;
83
acquire_binder_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)84 void acquire_binder_object(const sp<ProcessState>& proc,
85 const flat_binder_object& obj, const void* who)
86 {
87 switch (obj.hdr.type) {
88 case BINDER_TYPE_BINDER:
89 if (obj.binder) {
90 LOG_REFS("Parcel %p acquiring reference on local %llu", who, obj.cookie);
91 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
92 }
93 return;
94 case BINDER_TYPE_WEAK_BINDER:
95 if (obj.binder)
96 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who);
97 return;
98 case BINDER_TYPE_HANDLE: {
99 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
100 if (b != nullptr) {
101 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
102 b->incStrong(who);
103 }
104 return;
105 }
106 case BINDER_TYPE_WEAK_HANDLE: {
107 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
108 if (b != nullptr) b.get_refs()->incWeak(who);
109 return;
110 }
111 }
112
113 ALOGD("Invalid object type 0x%08x", obj.hdr.type);
114 }
115
acquire_object(const sp<ProcessState> & proc,const binder_object_header & obj,const void * who)116 void acquire_object(const sp<ProcessState>& proc, const binder_object_header& obj,
117 const void *who) {
118 switch (obj.type) {
119 case BINDER_TYPE_BINDER:
120 case BINDER_TYPE_WEAK_BINDER:
121 case BINDER_TYPE_HANDLE:
122 case BINDER_TYPE_WEAK_HANDLE: {
123 const flat_binder_object& fbo = reinterpret_cast<const flat_binder_object&>(obj);
124 acquire_binder_object(proc, fbo, who);
125 break;
126 }
127 }
128 }
129
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)130 void release_object(const sp<ProcessState>& proc,
131 const flat_binder_object& obj, const void* who)
132 {
133 switch (obj.hdr.type) {
134 case BINDER_TYPE_BINDER:
135 if (obj.binder) {
136 LOG_REFS("Parcel %p releasing reference on local %llu", who, obj.cookie);
137 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
138 }
139 return;
140 case BINDER_TYPE_WEAK_BINDER:
141 if (obj.binder)
142 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who);
143 return;
144 case BINDER_TYPE_HANDLE: {
145 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
146 if (b != nullptr) {
147 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
148 b->decStrong(who);
149 }
150 return;
151 }
152 case BINDER_TYPE_WEAK_HANDLE: {
153 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
154 if (b != nullptr) b.get_refs()->decWeak(who);
155 return;
156 }
157 case BINDER_TYPE_FD: {
158 if (obj.cookie != 0) { // owned
159 close(obj.handle);
160 }
161 return;
162 }
163 case BINDER_TYPE_PTR: {
164 // The relevant buffer is part of the transaction buffer and will be freed that way
165 return;
166 }
167 case BINDER_TYPE_FDA: {
168 // The enclosed file descriptors are closed in the kernel
169 return;
170 }
171 }
172
173 ALOGE("Invalid object type 0x%08x", obj.hdr.type);
174 }
175
finish_flatten_binder(const sp<IBinder> &,const flat_binder_object & flat,Parcel * out)176 inline static status_t finish_flatten_binder(
177 const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
178 {
179 return out->writeObject(flat);
180 }
181
flatten_binder(const sp<ProcessState> &,const sp<IBinder> & binder,Parcel * out)182 status_t flatten_binder(const sp<ProcessState>& /*proc*/,
183 const sp<IBinder>& binder, Parcel* out)
184 {
185 flat_binder_object obj = {};
186
187 if (binder != nullptr) {
188 BHwBinder *local = binder->localBinder();
189 if (!local) {
190 BpHwBinder *proxy = binder->remoteBinder();
191 if (proxy == nullptr) {
192 ALOGE("null proxy");
193 }
194 const int32_t handle = proxy ? proxy->handle() : 0;
195 obj.hdr.type = BINDER_TYPE_HANDLE;
196 obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
197 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
198 obj.handle = handle;
199 obj.cookie = 0;
200 } else {
201 // Get policy and convert it
202 int policy = local->getMinSchedulingPolicy();
203 int priority = local->getMinSchedulingPriority();
204
205 obj.flags = priority & FLAT_BINDER_FLAG_PRIORITY_MASK;
206 obj.flags |= FLAT_BINDER_FLAG_ACCEPTS_FDS | FLAT_BINDER_FLAG_INHERIT_RT;
207 obj.flags |= (policy & 3) << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
208 if (local->isRequestingSid()) {
209 obj.flags |= FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
210 }
211 obj.hdr.type = BINDER_TYPE_BINDER;
212 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
213 obj.cookie = reinterpret_cast<uintptr_t>(local);
214 }
215 } else {
216 obj.hdr.type = BINDER_TYPE_BINDER;
217 obj.binder = 0;
218 obj.cookie = 0;
219 }
220
221 return finish_flatten_binder(binder, obj, out);
222 }
223
finish_unflatten_binder(BpHwBinder *,const flat_binder_object &,const Parcel &)224 inline static status_t finish_unflatten_binder(
225 BpHwBinder* /*proxy*/, const flat_binder_object& /*flat*/,
226 const Parcel& /*in*/)
227 {
228 return NO_ERROR;
229 }
230
unflatten_binder(const sp<ProcessState> & proc,const Parcel & in,sp<IBinder> * out)231 status_t unflatten_binder(const sp<ProcessState>& proc,
232 const Parcel& in, sp<IBinder>* out)
233 {
234 const flat_binder_object* flat = in.readObject<flat_binder_object>();
235
236 if (flat) {
237 switch (flat->hdr.type) {
238 case BINDER_TYPE_BINDER:
239 *out = reinterpret_cast<IBinder*>(flat->cookie);
240 return finish_unflatten_binder(nullptr, *flat, in);
241 case BINDER_TYPE_HANDLE:
242 *out = proc->getStrongProxyForHandle(flat->handle);
243 return finish_unflatten_binder(
244 static_cast<BpHwBinder*>(out->get()), *flat, in);
245 }
246 }
247 return BAD_TYPE;
248 }
249
250 // ---------------------------------------------------------------------------
251
Parcel()252 Parcel::Parcel()
253 {
254 LOG_ALLOC("Parcel %p: constructing", this);
255 initState();
256 }
257
~Parcel()258 Parcel::~Parcel()
259 {
260 freeDataNoInit();
261 LOG_ALLOC("Parcel %p: destroyed", this);
262 }
263
getGlobalAllocSize()264 size_t Parcel::getGlobalAllocSize() {
265 return gParcelGlobalAllocSize.load();
266 }
267
getGlobalAllocCount()268 size_t Parcel::getGlobalAllocCount() {
269 return gParcelGlobalAllocCount.load();
270 }
271
data() const272 const uint8_t* Parcel::data() const
273 {
274 return mData;
275 }
276
dataSize() const277 size_t Parcel::dataSize() const
278 {
279 return (mDataSize > mDataPos ? mDataSize : mDataPos);
280 }
281
dataAvail() const282 size_t Parcel::dataAvail() const
283 {
284 size_t result = dataSize() - dataPosition();
285 if (result > INT32_MAX) {
286 LOG_ALWAYS_FATAL("result too big: %zu", result);
287 }
288 return result;
289 }
290
dataPosition() const291 size_t Parcel::dataPosition() const
292 {
293 return mDataPos;
294 }
295
dataCapacity() const296 size_t Parcel::dataCapacity() const
297 {
298 return mDataCapacity;
299 }
300
setDataSize(size_t size)301 status_t Parcel::setDataSize(size_t size)
302 {
303 if (size > INT32_MAX) {
304 // don't accept size_t values which may have come from an
305 // inadvertent conversion from a negative int.
306 return BAD_VALUE;
307 }
308
309 status_t err;
310 err = continueWrite(size);
311 if (err == NO_ERROR) {
312 mDataSize = size;
313 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
314 }
315 return err;
316 }
317
setDataPosition(size_t pos) const318 void Parcel::setDataPosition(size_t pos) const
319 {
320 if (pos > INT32_MAX) {
321 // don't accept size_t values which may have come from an
322 // inadvertent conversion from a negative int.
323 LOG_ALWAYS_FATAL("pos too big: %zu", pos);
324 }
325
326 mDataPos = pos;
327 mNextObjectHint = 0;
328 }
329
setDataCapacity(size_t size)330 status_t Parcel::setDataCapacity(size_t size)
331 {
332 if (size > INT32_MAX) {
333 // don't accept size_t values which may have come from an
334 // inadvertent conversion from a negative int.
335 return BAD_VALUE;
336 }
337
338 if (size > mDataCapacity) return continueWrite(size);
339 return NO_ERROR;
340 }
341
markSensitive() const342 void Parcel::markSensitive() const
343 {
344 mDeallocZero = true;
345 }
346
347 // Write RPC headers. (previously just the interface token)
writeInterfaceToken(const char * interface)348 status_t Parcel::writeInterfaceToken(const char* interface)
349 {
350 // currently the interface identification token is just its name as a string
351 return writeCString(interface);
352 }
353
enforceInterface(const char * interface) const354 bool Parcel::enforceInterface(const char* interface) const
355 {
356 const char* str = readCString();
357 if (str != nullptr && strcmp(str, interface) == 0) {
358 return true;
359 } else {
360 ALOGW("**** enforceInterface() expected '%s' but read '%s'",
361 interface, (str ? str : "<empty string>"));
362 return false;
363 }
364 }
365
objects() const366 const binder_size_t* Parcel::objects() const
367 {
368 return mObjects;
369 }
370
objectsCount() const371 size_t Parcel::objectsCount() const
372 {
373 return mObjectsSize;
374 }
375
errorCheck() const376 status_t Parcel::errorCheck() const
377 {
378 return mError;
379 }
380
setError(status_t err)381 void Parcel::setError(status_t err)
382 {
383 mError = err;
384 }
385
finishWrite(size_t len)386 status_t Parcel::finishWrite(size_t len)
387 {
388 if (len > INT32_MAX) {
389 // don't accept size_t values which may have come from an
390 // inadvertent conversion from a negative int.
391 return BAD_VALUE;
392 }
393
394 //printf("Finish write of %d\n", len);
395 mDataPos += len;
396 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
397 if (mDataPos > mDataSize) {
398 mDataSize = mDataPos;
399 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
400 }
401 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
402 return NO_ERROR;
403 }
404
write(const void * data,size_t len)405 status_t Parcel::write(const void* data, size_t len)
406 {
407 if (len > INT32_MAX) {
408 // don't accept size_t values which may have come from an
409 // inadvertent conversion from a negative int.
410 return BAD_VALUE;
411 }
412
413 void* const d = writeInplace(len);
414 if (d) {
415 memcpy(d, data, len);
416 return NO_ERROR;
417 }
418 return mError;
419 }
420
writeInplace(size_t len)421 void* Parcel::writeInplace(size_t len)
422 {
423 if (len > INT32_MAX) {
424 // don't accept size_t values which may have come from an
425 // inadvertent conversion from a negative int.
426 return nullptr;
427 }
428
429 const size_t padded = pad_size(len);
430
431 // validate for integer overflow
432 if (mDataPos+padded < mDataPos) {
433 return nullptr;
434 }
435
436 if ((mDataPos+padded) <= mDataCapacity) {
437 restart_write:
438 //printf("Writing %ld bytes, padded to %ld\n", len, padded);
439 uint8_t* const data = mData+mDataPos;
440
441 // Need to pad at end?
442 if (padded != len) {
443 #if BYTE_ORDER == BIG_ENDIAN
444 static const uint32_t mask[4] = {
445 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
446 };
447 #endif
448 #if BYTE_ORDER == LITTLE_ENDIAN
449 static const uint32_t mask[4] = {
450 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
451 };
452 #endif
453 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
454 // *reinterpret_cast<void**>(data+padded-4));
455 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
456 }
457
458 finishWrite(padded);
459 return data;
460 }
461
462 status_t err = growData(padded);
463 if (err == NO_ERROR) goto restart_write;
464 return nullptr;
465 }
466
writeInt8(int8_t val)467 status_t Parcel::writeInt8(int8_t val)
468 {
469 return write(&val, sizeof(val));
470 }
471
writeUint8(uint8_t val)472 status_t Parcel::writeUint8(uint8_t val)
473 {
474 return write(&val, sizeof(val));
475 }
476
writeInt16(int16_t val)477 status_t Parcel::writeInt16(int16_t val)
478 {
479 return write(&val, sizeof(val));
480 }
481
writeUint16(uint16_t val)482 status_t Parcel::writeUint16(uint16_t val)
483 {
484 return write(&val, sizeof(val));
485 }
486
writeInt32(int32_t val)487 status_t Parcel::writeInt32(int32_t val)
488 {
489 return writeAligned(val);
490 }
491
writeUint32(uint32_t val)492 status_t Parcel::writeUint32(uint32_t val)
493 {
494 return writeAligned(val);
495 }
496
writeBool(bool val)497 status_t Parcel::writeBool(bool val)
498 {
499 return writeInt8(int8_t(val));
500 }
writeInt64(int64_t val)501 status_t Parcel::writeInt64(int64_t val)
502 {
503 return writeAligned(val);
504 }
505
writeUint64(uint64_t val)506 status_t Parcel::writeUint64(uint64_t val)
507 {
508 return writeAligned(val);
509 }
510
writePointer(uintptr_t val)511 status_t Parcel::writePointer(uintptr_t val)
512 {
513 return writeAligned<binder_uintptr_t>(val);
514 }
515
writeFloat(float val)516 status_t Parcel::writeFloat(float val)
517 {
518 return writeAligned(val);
519 }
520
521 #if defined(__mips__) && defined(__mips_hard_float)
522
writeDouble(double val)523 status_t Parcel::writeDouble(double val)
524 {
525 union {
526 double d;
527 unsigned long long ll;
528 } u;
529 u.d = val;
530 return writeAligned(u.ll);
531 }
532
533 #else
534
writeDouble(double val)535 status_t Parcel::writeDouble(double val)
536 {
537 return writeAligned(val);
538 }
539
540 #endif
541
writeCString(const char * str)542 status_t Parcel::writeCString(const char* str)
543 {
544 return write(str, strlen(str)+1);
545 }
writeString16(const std::unique_ptr<String16> & str)546 status_t Parcel::writeString16(const std::unique_ptr<String16>& str)
547 {
548 if (!str) {
549 return writeInt32(-1);
550 }
551
552 return writeString16(*str);
553 }
554
writeString16(const String16 & str)555 status_t Parcel::writeString16(const String16& str)
556 {
557 return writeString16(str.c_str(), str.size());
558 }
559
writeString16(const char16_t * str,size_t len)560 status_t Parcel::writeString16(const char16_t* str, size_t len)
561 {
562 if (str == nullptr) return writeInt32(-1);
563
564 status_t err = writeInt32(len);
565 if (err == NO_ERROR) {
566 len *= sizeof(char16_t);
567 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
568 if (data) {
569 memcpy(data, str, len);
570 *reinterpret_cast<char16_t*>(data+len) = 0;
571 return NO_ERROR;
572 }
573 err = mError;
574 }
575 return err;
576 }
writeStrongBinder(const sp<IBinder> & val)577 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
578 {
579 return flatten_binder(ProcessState::self(), val, this);
580 }
581
582 template <typename T>
writeObject(const T & val)583 status_t Parcel::writeObject(const T& val)
584 {
585 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
586 const bool enoughObjects = mObjectsSize < mObjectsCapacity;
587 if (enoughData && enoughObjects) {
588 restart_write:
589 *reinterpret_cast<T*>(mData+mDataPos) = val;
590
591 const binder_object_header* hdr = reinterpret_cast<binder_object_header*>(mData+mDataPos);
592 switch (hdr->type) {
593 case BINDER_TYPE_BINDER:
594 case BINDER_TYPE_WEAK_BINDER:
595 case BINDER_TYPE_HANDLE:
596 case BINDER_TYPE_WEAK_HANDLE: {
597 const flat_binder_object *fbo = reinterpret_cast<const flat_binder_object*>(hdr);
598 if (fbo->binder != 0) {
599 mObjects[mObjectsSize++] = mDataPos;
600 acquire_binder_object(ProcessState::self(), *fbo, this);
601 }
602 break;
603 }
604 case BINDER_TYPE_FD: {
605 // remember if it's a file descriptor
606 if (!mAllowFds) {
607 // fail before modifying our object index
608 return FDS_NOT_ALLOWED;
609 }
610 mHasFds = mFdsKnown = true;
611 mObjects[mObjectsSize++] = mDataPos;
612 break;
613 }
614 case BINDER_TYPE_FDA:
615 mObjects[mObjectsSize++] = mDataPos;
616 break;
617 case BINDER_TYPE_PTR: {
618 const binder_buffer_object *buffer_obj = reinterpret_cast<
619 const binder_buffer_object*>(hdr);
620 if ((void *)buffer_obj->buffer != nullptr) {
621 mObjects[mObjectsSize++] = mDataPos;
622 }
623 break;
624 }
625 default: {
626 ALOGE("writeObject: unknown type %d", hdr->type);
627 break;
628 }
629 }
630 return finishWrite(sizeof(val));
631 }
632
633 if (!enoughData) {
634 const status_t err = growData(sizeof(val));
635 if (err != NO_ERROR) return err;
636 }
637 if (!enoughObjects) {
638 if (mObjectsSize > SIZE_MAX - 2) return NO_MEMORY; // overflow
639 if (mObjectsSize + 2 > SIZE_MAX / 3) return NO_MEMORY; // overflow
640 size_t newSize = ((mObjectsSize+2)*3)/2;
641 if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
642 binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
643 if (objects == nullptr) return NO_MEMORY;
644 mObjects = objects;
645 mObjectsCapacity = newSize;
646 }
647
648 goto restart_write;
649 }
650
651 template status_t Parcel::writeObject<flat_binder_object>(const flat_binder_object& val);
652 template status_t Parcel::writeObject<binder_fd_object>(const binder_fd_object& val);
653 template status_t Parcel::writeObject<binder_buffer_object>(const binder_buffer_object& val);
654 template status_t Parcel::writeObject<binder_fd_array_object>(const binder_fd_array_object& val);
655
validateBufferChild(size_t child_buffer_handle,size_t child_offset) const656 bool Parcel::validateBufferChild(size_t child_buffer_handle,
657 size_t child_offset) const {
658 if (child_buffer_handle >= mObjectsSize)
659 return false;
660 binder_buffer_object *child = reinterpret_cast<binder_buffer_object*>
661 (mData + mObjects[child_buffer_handle]);
662 if (child->hdr.type != BINDER_TYPE_PTR || child_offset > child->length) {
663 // Parent object not a buffer, or not large enough
664 LOG_BUFFER("writeEmbeddedReference found weird child. "
665 "child_offset = %zu, child->length = %zu",
666 child_offset, (size_t)child->length);
667 return false;
668 }
669 return true;
670 }
671
validateBufferParent(size_t parent_buffer_handle,size_t parent_offset) const672 bool Parcel::validateBufferParent(size_t parent_buffer_handle,
673 size_t parent_offset) const {
674 if (parent_buffer_handle >= mObjectsSize)
675 return false;
676 binder_buffer_object *parent = reinterpret_cast<binder_buffer_object*>
677 (mData + mObjects[parent_buffer_handle]);
678 if (parent->hdr.type != BINDER_TYPE_PTR ||
679 sizeof(binder_uintptr_t) > parent->length ||
680 parent_offset > parent->length - sizeof(binder_uintptr_t)) {
681 // Parent object not a buffer, or not large enough
682 return false;
683 }
684 return true;
685 }
writeEmbeddedBuffer(const void * buffer,size_t length,size_t * handle,size_t parent_buffer_handle,size_t parent_offset)686 status_t Parcel::writeEmbeddedBuffer(
687 const void *buffer, size_t length, size_t *handle,
688 size_t parent_buffer_handle, size_t parent_offset) {
689 LOG_BUFFER("writeEmbeddedBuffer(%p, %zu, parent = (%zu, %zu)) -> %zu",
690 buffer, length, parent_buffer_handle,
691 parent_offset, mObjectsSize);
692 if(!validateBufferParent(parent_buffer_handle, parent_offset))
693 return BAD_VALUE;
694 binder_buffer_object obj = {
695 .hdr = { .type = BINDER_TYPE_PTR },
696 .flags = BINDER_BUFFER_FLAG_HAS_PARENT,
697 .buffer = reinterpret_cast<binder_uintptr_t>(buffer),
698 .length = length,
699 .parent = parent_buffer_handle,
700 .parent_offset = parent_offset,
701 };
702 if (handle != nullptr) {
703 // We use an index into mObjects as a handle
704 *handle = mObjectsSize;
705 }
706 return writeObject(obj);
707 }
708
writeBuffer(const void * buffer,size_t length,size_t * handle)709 status_t Parcel::writeBuffer(const void *buffer, size_t length, size_t *handle)
710 {
711 LOG_BUFFER("writeBuffer(%p, %zu) -> %zu",
712 buffer, length, mObjectsSize);
713 binder_buffer_object obj {
714 .hdr = { .type = BINDER_TYPE_PTR },
715 .flags = 0,
716 .buffer = reinterpret_cast<binder_uintptr_t>(buffer),
717 .length = length,
718 };
719 if (handle != nullptr) {
720 // We use an index into mObjects as a handle
721 *handle = mObjectsSize;
722 }
723 return writeObject(obj);
724 }
725
clearCache() const726 void Parcel::clearCache() const {
727 LOG_BUFFER("clearing cache.");
728 mBufCachePos = 0;
729 mBufCache.clear();
730 }
731
updateCache() const732 void Parcel::updateCache() const {
733 if(mBufCachePos == mObjectsSize)
734 return;
735 LOG_BUFFER("updating cache from %zu to %zu", mBufCachePos, mObjectsSize);
736 for(size_t i = mBufCachePos; i < mObjectsSize; i++) {
737 binder_size_t dataPos = mObjects[i];
738 binder_buffer_object *obj =
739 reinterpret_cast<binder_buffer_object*>(mData+dataPos);
740 if(obj->hdr.type != BINDER_TYPE_PTR)
741 continue;
742 BufferInfo ifo;
743 ifo.index = i;
744 ifo.buffer = obj->buffer;
745 ifo.bufend = obj->buffer + obj->length;
746 mBufCache.push_back(ifo);
747 }
748 mBufCachePos = mObjectsSize;
749 }
750
751 /* O(n) (n=#buffers) to find a buffer that contains the given addr */
findBuffer(const void * ptr,size_t length,bool * found,size_t * handle,size_t * offset) const752 status_t Parcel::findBuffer(const void *ptr, size_t length, bool *found,
753 size_t *handle, size_t *offset) const {
754 if(found == nullptr)
755 return UNKNOWN_ERROR;
756 updateCache();
757 binder_uintptr_t ptrVal = reinterpret_cast<binder_uintptr_t>(ptr);
758 // true if the pointer is in some buffer, but the length is too big
759 // so that ptr + length doesn't fit into the buffer.
760 bool suspectRejectBadPointer = false;
761 LOG_BUFFER("findBuffer examining %zu objects.", mObjectsSize);
762 for(auto entry = mBufCache.rbegin(); entry != mBufCache.rend(); ++entry ) {
763 if(entry->buffer <= ptrVal && ptrVal < entry->bufend) {
764 // might have found it.
765 if(ptrVal + length <= entry->bufend) {
766 *found = true;
767 if(handle != nullptr) *handle = entry->index;
768 if(offset != nullptr) *offset = ptrVal - entry->buffer;
769 LOG_BUFFER(" findBuffer has a match at %zu!", entry->index);
770 return OK;
771 } else {
772 suspectRejectBadPointer = true;
773 }
774 }
775 }
776 LOG_BUFFER("findBuffer did not find for ptr = %p.", ptr);
777 *found = false;
778 return suspectRejectBadPointer ? BAD_VALUE : OK;
779 }
780
781 /* findBuffer with the assumption that ptr = .buffer (so it points to top
782 * of the buffer, aka offset 0).
783 * */
quickFindBuffer(const void * ptr,size_t * handle) const784 status_t Parcel::quickFindBuffer(const void *ptr, size_t *handle) const {
785 updateCache();
786 binder_uintptr_t ptrVal = reinterpret_cast<binder_uintptr_t>(ptr);
787 LOG_BUFFER("quickFindBuffer examining %zu objects.", mObjectsSize);
788 for(auto entry = mBufCache.rbegin(); entry != mBufCache.rend(); ++entry ) {
789 if(entry->buffer == ptrVal) {
790 if(handle != nullptr) *handle = entry->index;
791 return OK;
792 }
793 }
794 LOG_BUFFER("quickFindBuffer did not find for ptr = %p.", ptr);
795 return NO_INIT;
796 }
797
writeNativeHandleNoDup(const native_handle_t * handle,bool embedded,size_t parent_buffer_handle,size_t parent_offset)798 status_t Parcel::writeNativeHandleNoDup(const native_handle_t *handle,
799 bool embedded,
800 size_t parent_buffer_handle,
801 size_t parent_offset)
802 {
803 size_t buffer_handle;
804 status_t status = OK;
805
806 if (handle == nullptr) {
807 status = writeUint64(0);
808 return status;
809 }
810
811 size_t native_handle_size = sizeof(native_handle_t)
812 + handle->numFds * sizeof(int) + handle->numInts * sizeof(int);
813 writeUint64(native_handle_size);
814
815 if (embedded) {
816 status = writeEmbeddedBuffer((void*) handle,
817 native_handle_size, &buffer_handle,
818 parent_buffer_handle, parent_offset);
819 } else {
820 status = writeBuffer((void*) handle, native_handle_size, &buffer_handle);
821 }
822
823 if (status != OK) {
824 return status;
825 }
826
827 struct binder_fd_array_object fd_array {
828 .hdr = { .type = BINDER_TYPE_FDA },
829 .num_fds = static_cast<binder_size_t>(handle->numFds),
830 .parent = buffer_handle,
831 .parent_offset = offsetof(native_handle_t, data),
832 };
833
834 return writeObject(fd_array);
835 }
836
writeNativeHandleNoDup(const native_handle_t * handle)837 status_t Parcel::writeNativeHandleNoDup(const native_handle_t *handle)
838 {
839 return writeNativeHandleNoDup(handle, false /* embedded */);
840 }
841
writeEmbeddedNativeHandle(const native_handle_t * handle,size_t parent_buffer_handle,size_t parent_offset)842 status_t Parcel::writeEmbeddedNativeHandle(const native_handle_t *handle,
843 size_t parent_buffer_handle,
844 size_t parent_offset)
845 {
846 return writeNativeHandleNoDup(handle, true /* embedded */,
847 parent_buffer_handle, parent_offset);
848 }
849
read(void * outData,size_t len) const850 status_t Parcel::read(void* outData, size_t len) const
851 {
852 if (len > INT32_MAX) {
853 // don't accept size_t values which may have come from an
854 // inadvertent conversion from a negative int.
855 return BAD_VALUE;
856 }
857
858 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
859 && len <= pad_size(len)) {
860 memcpy(outData, mData+mDataPos, len);
861 mDataPos += pad_size(len);
862 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
863 return NO_ERROR;
864 }
865 return NOT_ENOUGH_DATA;
866 }
867
readInplace(size_t len) const868 const void* Parcel::readInplace(size_t len) const
869 {
870 if (len > INT32_MAX) {
871 // don't accept size_t values which may have come from an
872 // inadvertent conversion from a negative int.
873 return nullptr;
874 }
875
876 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
877 && len <= pad_size(len)) {
878 const void* data = mData+mDataPos;
879 mDataPos += pad_size(len);
880 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
881 return data;
882 }
883 return nullptr;
884 }
885
886 template<class T>
readAligned(T * pArg) const887 status_t Parcel::readAligned(T *pArg) const {
888 static_assert(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
889
890 if ((mDataPos+sizeof(T)) <= mDataSize) {
891 const void* data = mData+mDataPos;
892 mDataPos += sizeof(T);
893 *pArg = *reinterpret_cast<const T*>(data);
894 return NO_ERROR;
895 } else {
896 return NOT_ENOUGH_DATA;
897 }
898 }
899
900 template<class T>
readAligned() const901 T Parcel::readAligned() const {
902 T result;
903 if (readAligned(&result) != NO_ERROR) {
904 result = 0;
905 }
906
907 return result;
908 }
909
910 template<class T>
writeAligned(T val)911 status_t Parcel::writeAligned(T val) {
912 static_assert(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
913
914 if ((mDataPos+sizeof(val)) <= mDataCapacity) {
915 restart_write:
916 *reinterpret_cast<T*>(mData+mDataPos) = val;
917 return finishWrite(sizeof(val));
918 }
919
920 status_t err = growData(sizeof(val));
921 if (err == NO_ERROR) goto restart_write;
922 return err;
923 }
924
readInt8(int8_t * pArg) const925 status_t Parcel::readInt8(int8_t *pArg) const
926 {
927 return read(pArg, sizeof(*pArg));
928 }
929
readUint8(uint8_t * pArg) const930 status_t Parcel::readUint8(uint8_t *pArg) const
931 {
932 return read(pArg, sizeof(*pArg));
933 }
934
readInt16(int16_t * pArg) const935 status_t Parcel::readInt16(int16_t *pArg) const
936 {
937 return read(pArg, sizeof(*pArg));
938 }
939
readUint16(uint16_t * pArg) const940 status_t Parcel::readUint16(uint16_t *pArg) const
941 {
942 return read(pArg, sizeof(*pArg));
943 }
944
readInt32(int32_t * pArg) const945 status_t Parcel::readInt32(int32_t *pArg) const
946 {
947 return readAligned(pArg);
948 }
949
readInt32() const950 int32_t Parcel::readInt32() const
951 {
952 return readAligned<int32_t>();
953 }
954
readUint32(uint32_t * pArg) const955 status_t Parcel::readUint32(uint32_t *pArg) const
956 {
957 return readAligned(pArg);
958 }
959
readUint32() const960 uint32_t Parcel::readUint32() const
961 {
962 return readAligned<uint32_t>();
963 }
964
readInt64(int64_t * pArg) const965 status_t Parcel::readInt64(int64_t *pArg) const
966 {
967 return readAligned(pArg);
968 }
969
readInt64() const970 int64_t Parcel::readInt64() const
971 {
972 return readAligned<int64_t>();
973 }
974
readUint64(uint64_t * pArg) const975 status_t Parcel::readUint64(uint64_t *pArg) const
976 {
977 return readAligned(pArg);
978 }
979
readUint64() const980 uint64_t Parcel::readUint64() const
981 {
982 return readAligned<uint64_t>();
983 }
984
readPointer(uintptr_t * pArg) const985 status_t Parcel::readPointer(uintptr_t *pArg) const
986 {
987 status_t ret;
988 binder_uintptr_t ptr;
989 ret = readAligned(&ptr);
990 if (!ret)
991 *pArg = ptr;
992 return ret;
993 }
994
readPointer() const995 uintptr_t Parcel::readPointer() const
996 {
997 return readAligned<binder_uintptr_t>();
998 }
999
1000
readFloat(float * pArg) const1001 status_t Parcel::readFloat(float *pArg) const
1002 {
1003 return readAligned(pArg);
1004 }
1005
1006
readFloat() const1007 float Parcel::readFloat() const
1008 {
1009 return readAligned<float>();
1010 }
1011
1012 #if defined(__mips__) && defined(__mips_hard_float)
1013
readDouble(double * pArg) const1014 status_t Parcel::readDouble(double *pArg) const
1015 {
1016 union {
1017 double d;
1018 unsigned long long ll;
1019 } u;
1020 u.d = 0;
1021 status_t status;
1022 status = readAligned(&u.ll);
1023 *pArg = u.d;
1024 return status;
1025 }
1026
readDouble() const1027 double Parcel::readDouble() const
1028 {
1029 union {
1030 double d;
1031 unsigned long long ll;
1032 } u;
1033 u.ll = readAligned<unsigned long long>();
1034 return u.d;
1035 }
1036
1037 #else
1038
readDouble(double * pArg) const1039 status_t Parcel::readDouble(double *pArg) const
1040 {
1041 return readAligned(pArg);
1042 }
1043
readDouble() const1044 double Parcel::readDouble() const
1045 {
1046 return readAligned<double>();
1047 }
1048
1049 #endif
1050
readBool(bool * pArg) const1051 status_t Parcel::readBool(bool *pArg) const
1052 {
1053 int8_t tmp;
1054 status_t ret = readInt8(&tmp);
1055 *pArg = (tmp != 0);
1056 return ret;
1057 }
1058
readBool() const1059 bool Parcel::readBool() const
1060 {
1061 int8_t tmp;
1062 status_t err = readInt8(&tmp);
1063
1064 if (err != OK) {
1065 return 0;
1066 }
1067
1068 return tmp != 0;
1069 }
1070
readCString() const1071 const char* Parcel::readCString() const
1072 {
1073 if (mDataPos < mDataSize) {
1074 const size_t avail = mDataSize-mDataPos;
1075 const char* str = reinterpret_cast<const char*>(mData+mDataPos);
1076 // is the string's trailing NUL within the parcel's valid bounds?
1077 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
1078 if (eos) {
1079 const size_t len = eos - str;
1080 mDataPos += pad_size(len+1);
1081 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
1082 return str;
1083 }
1084 }
1085 return nullptr;
1086 }
readString16() const1087 String16 Parcel::readString16() const
1088 {
1089 size_t len;
1090 const char16_t* str = readString16Inplace(&len);
1091 if (str) return String16(str, len);
1092 ALOGE("Reading a NULL string not supported here.");
1093 return String16();
1094 }
1095
readString16(std::unique_ptr<String16> * pArg) const1096 status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const
1097 {
1098 const int32_t start = dataPosition();
1099 int32_t size;
1100 status_t status = readInt32(&size);
1101 pArg->reset();
1102
1103 if (status != OK || size < 0) {
1104 return status;
1105 }
1106
1107 setDataPosition(start);
1108 pArg->reset(new (std::nothrow) String16());
1109
1110 status = readString16(pArg->get());
1111
1112 if (status != OK) {
1113 pArg->reset();
1114 }
1115
1116 return status;
1117 }
1118
readString16(String16 * pArg) const1119 status_t Parcel::readString16(String16* pArg) const
1120 {
1121 size_t len;
1122 const char16_t* str = readString16Inplace(&len);
1123 if (str) {
1124 pArg->setTo(str, len);
1125 return 0;
1126 } else {
1127 *pArg = String16();
1128 return UNEXPECTED_NULL;
1129 }
1130 }
1131
readString16Inplace(size_t * outLen) const1132 const char16_t* Parcel::readString16Inplace(size_t* outLen) const
1133 {
1134 int32_t size = readInt32();
1135 // watch for potential int overflow from size+1
1136 if (size >= 0 && size < INT32_MAX) {
1137 *outLen = size;
1138 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
1139 if (str != nullptr) {
1140 return str;
1141 }
1142 }
1143 *outLen = 0;
1144 return nullptr;
1145 }
readStrongBinder(sp<IBinder> * val) const1146 status_t Parcel::readStrongBinder(sp<IBinder>* val) const
1147 {
1148 status_t status = readNullableStrongBinder(val);
1149 if (status == OK && !val->get()) {
1150 status = UNEXPECTED_NULL;
1151 }
1152 return status;
1153 }
1154
readNullableStrongBinder(sp<IBinder> * val) const1155 status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const
1156 {
1157 return unflatten_binder(ProcessState::self(), *this, val);
1158 }
1159
readStrongBinder() const1160 sp<IBinder> Parcel::readStrongBinder() const
1161 {
1162 sp<IBinder> val;
1163 // Note that a lot of code in Android reads binders by hand with this
1164 // method, and that code has historically been ok with getting nullptr
1165 // back (while ignoring error codes).
1166 readNullableStrongBinder(&val);
1167 return val;
1168 }
1169
1170 template<typename T>
readObject(size_t * objects_offset) const1171 const T* Parcel::readObject(size_t *objects_offset) const
1172 {
1173 const size_t DPOS = mDataPos;
1174 if (objects_offset != nullptr) {
1175 *objects_offset = 0;
1176 }
1177
1178 if ((DPOS+sizeof(T)) <= mDataSize) {
1179 const T* obj = reinterpret_cast<const T*>(mData+DPOS);
1180 mDataPos = DPOS + sizeof(T);
1181 const binder_object_header *hdr = reinterpret_cast<const binder_object_header*>(obj);
1182 switch (hdr->type) {
1183 case BINDER_TYPE_BINDER:
1184 case BINDER_TYPE_WEAK_BINDER:
1185 case BINDER_TYPE_HANDLE:
1186 case BINDER_TYPE_WEAK_HANDLE: {
1187 const flat_binder_object *flat_obj =
1188 reinterpret_cast<const flat_binder_object*>(hdr);
1189 if (flat_obj->cookie == 0 && flat_obj->binder == 0) {
1190 // When transferring a NULL binder object, we don't write it into
1191 // the object list, so we don't want to check for it when
1192 // reading.
1193 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
1194 return obj;
1195 }
1196 break;
1197 }
1198 case BINDER_TYPE_FD:
1199 case BINDER_TYPE_FDA:
1200 // fd (-arrays) must always appear in the meta-data list (eg touched by the kernel)
1201 break;
1202 case BINDER_TYPE_PTR: {
1203 const binder_buffer_object *buffer_obj =
1204 reinterpret_cast<const binder_buffer_object*>(hdr);
1205 if ((void *)buffer_obj->buffer == nullptr) {
1206 // null pointers can be returned directly - they're not written in the
1207 // object list. All non-null buffers must appear in the objects list.
1208 return obj;
1209 }
1210 break;
1211 }
1212 }
1213 // Ensure that this object is valid...
1214 binder_size_t* const OBJS = mObjects;
1215 const size_t N = mObjectsSize;
1216 size_t opos = mNextObjectHint;
1217
1218 if (N > 0) {
1219 ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
1220 this, DPOS, opos);
1221
1222 // Start at the current hint position, looking for an object at
1223 // the current data position.
1224 if (opos < N) {
1225 while (opos < (N-1) && OBJS[opos] < DPOS) {
1226 opos++;
1227 }
1228 } else {
1229 opos = N-1;
1230 }
1231 if (OBJS[opos] == DPOS) {
1232 // Found it!
1233 ALOGV("Parcel %p found obj %zu at index %zu with forward search",
1234 this, DPOS, opos);
1235 mNextObjectHint = opos+1;
1236 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
1237 if (objects_offset != nullptr) {
1238 *objects_offset = opos;
1239 }
1240 return obj;
1241 }
1242
1243 // Look backwards for it...
1244 while (opos > 0 && OBJS[opos] > DPOS) {
1245 opos--;
1246 }
1247 if (OBJS[opos] == DPOS) {
1248 // Found it!
1249 ALOGV("Parcel %p found obj %zu at index %zu with backward search",
1250 this, DPOS, opos);
1251 mNextObjectHint = opos+1;
1252 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
1253 if (objects_offset != nullptr) {
1254 *objects_offset = opos;
1255 }
1256 return obj;
1257 }
1258 }
1259 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list",
1260 this, DPOS);
1261 }
1262 return nullptr;
1263 }
1264
1265 template const flat_binder_object* Parcel::readObject<flat_binder_object>(size_t *objects_offset) const;
1266
1267 template const binder_fd_object* Parcel::readObject<binder_fd_object>(size_t *objects_offset) const;
1268
1269 template const binder_buffer_object* Parcel::readObject<binder_buffer_object>(size_t *objects_offset) const;
1270
1271 template const binder_fd_array_object* Parcel::readObject<binder_fd_array_object>(size_t *objects_offset) const;
1272
verifyBufferObject(const binder_buffer_object * buffer_obj,size_t size,uint32_t flags,size_t parent,size_t parentOffset) const1273 bool Parcel::verifyBufferObject(const binder_buffer_object *buffer_obj,
1274 size_t size, uint32_t flags, size_t parent,
1275 size_t parentOffset) const {
1276 if (buffer_obj->length != size) {
1277 ALOGE("Buffer length %" PRIu64 " does not match expected size %zu.",
1278 static_cast<uint64_t>(buffer_obj->length), size);
1279 return false;
1280 }
1281
1282 if (buffer_obj->flags != flags) {
1283 ALOGE("Buffer flags 0x%02X do not match expected flags 0x%02X.", buffer_obj->flags, flags);
1284 return false;
1285 }
1286
1287 if (flags & BINDER_BUFFER_FLAG_HAS_PARENT) {
1288 if (buffer_obj->parent != parent) {
1289 ALOGE("Buffer parent %" PRIu64 " does not match expected parent %zu.",
1290 static_cast<uint64_t>(buffer_obj->parent), parent);
1291 return false;
1292 }
1293 if (buffer_obj->parent_offset != parentOffset) {
1294 ALOGE("Buffer parent offset %" PRIu64 " does not match expected offset %zu.",
1295 static_cast<uint64_t>(buffer_obj->parent_offset), parentOffset);
1296 return false;
1297 }
1298
1299 // checked by kernel driver, but needed for fuzzer
1300 if (parent >= mObjectsSize) {
1301 ALOGE("Parent index %zu but only have %zu objects", parent, mObjectsSize);
1302 return false;
1303 }
1304
1305 binder_buffer_object *parentBuffer =
1306 reinterpret_cast<binder_buffer_object*>(mData + mObjects[parent]);
1307 void* bufferInParent = *reinterpret_cast<void**>(
1308 reinterpret_cast<uint8_t*>(parentBuffer->buffer) + parentOffset);
1309 void* childBuffer = reinterpret_cast<void*>(buffer_obj->buffer);
1310
1311 if (bufferInParent != childBuffer) {
1312 ALOGE("Buffer in parent %p differs from embedded buffer %p",
1313 bufferInParent, childBuffer);
1314 android_errorWriteLog(0x534e4554, "179289794");
1315 return false;
1316 }
1317 }
1318
1319 return true;
1320 }
1321
readBuffer(size_t buffer_size,size_t * buffer_handle,uint32_t flags,size_t parent,size_t parentOffset,const void ** buffer_out) const1322 status_t Parcel::readBuffer(size_t buffer_size, size_t *buffer_handle,
1323 uint32_t flags, size_t parent, size_t parentOffset,
1324 const void **buffer_out) const {
1325
1326 const binder_buffer_object* buffer_obj = readObject<binder_buffer_object>(buffer_handle);
1327
1328 if (buffer_obj == nullptr || buffer_obj->hdr.type != BINDER_TYPE_PTR) {
1329 return BAD_VALUE;
1330 }
1331
1332 if (!verifyBufferObject(buffer_obj, buffer_size, flags, parent, parentOffset)) {
1333 return BAD_VALUE;
1334 }
1335
1336 // in read side, always use .buffer and .length.
1337 *buffer_out = reinterpret_cast<void*>(buffer_obj->buffer);
1338
1339 return OK;
1340 }
1341
readNullableBuffer(size_t buffer_size,size_t * buffer_handle,const void ** buffer_out) const1342 status_t Parcel::readNullableBuffer(size_t buffer_size, size_t *buffer_handle,
1343 const void **buffer_out) const
1344 {
1345 return readBuffer(buffer_size, buffer_handle,
1346 0 /* flags */, 0 /* parent */, 0 /* parentOffset */,
1347 buffer_out);
1348 }
1349
readBuffer(size_t buffer_size,size_t * buffer_handle,const void ** buffer_out) const1350 status_t Parcel::readBuffer(size_t buffer_size, size_t *buffer_handle,
1351 const void **buffer_out) const
1352 {
1353 status_t status = readNullableBuffer(buffer_size, buffer_handle, buffer_out);
1354 if (status == OK && *buffer_out == nullptr) {
1355 return UNEXPECTED_NULL;
1356 }
1357 return status;
1358 }
1359
1360
readEmbeddedBuffer(size_t buffer_size,size_t * buffer_handle,size_t parent_buffer_handle,size_t parent_offset,const void ** buffer_out) const1361 status_t Parcel::readEmbeddedBuffer(size_t buffer_size,
1362 size_t *buffer_handle,
1363 size_t parent_buffer_handle,
1364 size_t parent_offset,
1365 const void **buffer_out) const
1366 {
1367 status_t status = readNullableEmbeddedBuffer(buffer_size, buffer_handle,
1368 parent_buffer_handle,
1369 parent_offset, buffer_out);
1370 if (status == OK && *buffer_out == nullptr) {
1371 return UNEXPECTED_NULL;
1372 }
1373 return status;
1374 }
1375
readNullableEmbeddedBuffer(size_t buffer_size,size_t * buffer_handle,size_t parent_buffer_handle,size_t parent_offset,const void ** buffer_out) const1376 status_t Parcel::readNullableEmbeddedBuffer(size_t buffer_size,
1377 size_t *buffer_handle,
1378 size_t parent_buffer_handle,
1379 size_t parent_offset,
1380 const void **buffer_out) const
1381 {
1382 return readBuffer(buffer_size, buffer_handle, BINDER_BUFFER_FLAG_HAS_PARENT,
1383 parent_buffer_handle, parent_offset, buffer_out);
1384 }
1385
readEmbeddedNativeHandle(size_t parent_buffer_handle,size_t parent_offset,const native_handle_t ** handle) const1386 status_t Parcel::readEmbeddedNativeHandle(size_t parent_buffer_handle,
1387 size_t parent_offset,
1388 const native_handle_t **handle) const
1389 {
1390 status_t status = readNullableEmbeddedNativeHandle(parent_buffer_handle, parent_offset, handle);
1391 if (status == OK && *handle == nullptr) {
1392 return UNEXPECTED_NULL;
1393 }
1394 return status;
1395 }
1396
readNullableNativeHandleNoDup(const native_handle_t ** handle,bool embedded,size_t parent_buffer_handle,size_t parent_offset) const1397 status_t Parcel::readNullableNativeHandleNoDup(const native_handle_t **handle,
1398 bool embedded,
1399 size_t parent_buffer_handle,
1400 size_t parent_offset) const
1401 {
1402 uint64_t nativeHandleSize;
1403 status_t status = readUint64(&nativeHandleSize);
1404 if (status != OK) {
1405 return BAD_VALUE;
1406 }
1407
1408 if (nativeHandleSize == 0) {
1409 // If !embedded, then parent_* vars are 0 and don't actually correspond
1410 // to anything. In that case, we're actually reading this data into
1411 // writable memory, and the handle returned from here will actually be
1412 // used (rather than be ignored).
1413 if (embedded) {
1414 if(!validateBufferParent(parent_buffer_handle, parent_offset)) {
1415 ALOGE("Buffer in parent %zu offset %zu invalid.", parent_buffer_handle, parent_offset);
1416 return BAD_VALUE;
1417 }
1418
1419 binder_buffer_object *parentBuffer =
1420 reinterpret_cast<binder_buffer_object*>(mData + mObjects[parent_buffer_handle]);
1421
1422 void* bufferInParent = *reinterpret_cast<void**>(
1423 reinterpret_cast<uint8_t*>(parentBuffer->buffer) + parent_offset);
1424
1425 if (bufferInParent != nullptr) {
1426 ALOGE("Buffer in (handle) parent %p is not nullptr.", bufferInParent);
1427 android_errorWriteLog(0x534e4554, "179289794");
1428 return BAD_VALUE;
1429 }
1430 }
1431
1432 *handle = nullptr;
1433 return status;
1434 }
1435
1436 if (nativeHandleSize < sizeof(native_handle_t) || nativeHandleSize > std::numeric_limits<uint32_t>::max()) {
1437 ALOGE("Invalid native_handle_t size: %" PRIu64, nativeHandleSize);
1438 return BAD_VALUE;
1439 }
1440
1441 size_t fdaParent;
1442 if (embedded) {
1443 status = readNullableEmbeddedBuffer(nativeHandleSize, &fdaParent,
1444 parent_buffer_handle, parent_offset,
1445 reinterpret_cast<const void**>(handle));
1446 } else {
1447 status = readNullableBuffer(nativeHandleSize, &fdaParent,
1448 reinterpret_cast<const void**>(handle));
1449 }
1450
1451 if (status != OK) {
1452 return status;
1453 }
1454
1455 if (*handle == nullptr) {
1456 // null handle already read above
1457 ALOGE("Expecting non-null handle buffer");
1458 return BAD_VALUE;
1459 }
1460
1461 int numFds = (*handle)->numFds;
1462 int numInts = (*handle)->numInts;
1463
1464 if (numFds < 0 || numFds > NATIVE_HANDLE_MAX_FDS) {
1465 ALOGE("Received native_handle with invalid number of fds.");
1466 return BAD_VALUE;
1467 }
1468
1469 if (numInts < 0 || numInts > NATIVE_HANDLE_MAX_INTS) {
1470 ALOGE("Received native_handle with invalid number of ints.");
1471 return BAD_VALUE;
1472 }
1473
1474 if (nativeHandleSize != (sizeof(native_handle_t) + ((numFds + numInts) * sizeof(int)))) {
1475 ALOGE("Size of native_handle doesn't match.");
1476 return BAD_VALUE;
1477 }
1478
1479 const binder_fd_array_object* fd_array_obj = readObject<binder_fd_array_object>();
1480
1481 if (fd_array_obj == nullptr || fd_array_obj->hdr.type != BINDER_TYPE_FDA) {
1482 ALOGE("Can't find file-descriptor array object.");
1483 return BAD_VALUE;
1484 }
1485
1486 if (static_cast<int>(fd_array_obj->num_fds) != numFds) {
1487 ALOGE("Number of native handles does not match.");
1488 return BAD_VALUE;
1489 }
1490
1491 if (fd_array_obj->parent != fdaParent) {
1492 ALOGE("Parent handle of file-descriptor array not correct.");
1493 return BAD_VALUE;
1494 }
1495
1496 if (fd_array_obj->parent_offset != offsetof(native_handle_t, data)) {
1497 ALOGE("FD array object not properly offset in parent.");
1498 return BAD_VALUE;
1499 }
1500
1501 return OK;
1502 }
1503
readNullableEmbeddedNativeHandle(size_t parent_buffer_handle,size_t parent_offset,const native_handle_t ** handle) const1504 status_t Parcel::readNullableEmbeddedNativeHandle(size_t parent_buffer_handle,
1505 size_t parent_offset,
1506 const native_handle_t **handle) const
1507 {
1508 return readNullableNativeHandleNoDup(handle, true /* embedded */, parent_buffer_handle,
1509 parent_offset);
1510 }
1511
readNativeHandleNoDup(const native_handle_t ** handle) const1512 status_t Parcel::readNativeHandleNoDup(const native_handle_t **handle) const
1513 {
1514 status_t status = readNullableNativeHandleNoDup(handle);
1515 if (status == OK && *handle == nullptr) {
1516 return UNEXPECTED_NULL;
1517 }
1518 return status;
1519 }
1520
readNullableNativeHandleNoDup(const native_handle_t ** handle) const1521 status_t Parcel::readNullableNativeHandleNoDup(const native_handle_t **handle) const
1522 {
1523 return readNullableNativeHandleNoDup(handle, false /* embedded */);
1524 }
1525
closeFileDescriptors()1526 void Parcel::closeFileDescriptors()
1527 {
1528 size_t i = mObjectsSize;
1529 if (i > 0) {
1530 //ALOGI("Closing file descriptors for %zu objects...", i);
1531 }
1532 while (i > 0) {
1533 i--;
1534 const flat_binder_object* flat
1535 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
1536 if (flat->hdr.type == BINDER_TYPE_FD) {
1537 //ALOGI("Closing fd: %ld", flat->handle);
1538 close(flat->handle);
1539 }
1540 }
1541 }
1542
ipcData() const1543 uintptr_t Parcel::ipcData() const
1544 {
1545 return reinterpret_cast<uintptr_t>(mData);
1546 }
1547
ipcDataSize() const1548 size_t Parcel::ipcDataSize() const
1549 {
1550 return mDataSize > mDataPos ? mDataSize : mDataPos;
1551 }
1552
ipcObjects() const1553 uintptr_t Parcel::ipcObjects() const
1554 {
1555 return reinterpret_cast<uintptr_t>(mObjects);
1556 }
1557
ipcObjectsCount() const1558 size_t Parcel::ipcObjectsCount() const
1559 {
1560 return mObjectsSize;
1561 }
1562
1563 #define BUFFER_ALIGNMENT_BYTES 8
ipcBufferSize() const1564 size_t Parcel::ipcBufferSize() const
1565 {
1566 size_t totalBuffersSize = 0;
1567 // Add size for BINDER_TYPE_PTR
1568 size_t i = mObjectsSize;
1569 while (i > 0) {
1570 i--;
1571 const binder_buffer_object* buffer
1572 = reinterpret_cast<binder_buffer_object*>(mData+mObjects[i]);
1573 if (buffer->hdr.type == BINDER_TYPE_PTR) {
1574 /* The binder kernel driver requires each buffer to be 8-byte
1575 * aligned */
1576 size_t alignedSize = (buffer->length + (BUFFER_ALIGNMENT_BYTES - 1))
1577 & ~(BUFFER_ALIGNMENT_BYTES - 1);
1578 if (alignedSize > SIZE_MAX - totalBuffersSize) {
1579 ALOGE("ipcBuffersSize(): invalid buffer sizes.");
1580 return 0;
1581 }
1582 totalBuffersSize += alignedSize;
1583 }
1584 }
1585 return totalBuffersSize;
1586 }
1587
ipcSetDataReference(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount,release_func relFunc,void * relCookie)1588 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
1589 const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
1590 {
1591 binder_size_t minOffset = 0;
1592 freeDataNoInit();
1593 mError = NO_ERROR;
1594 mData = const_cast<uint8_t*>(data);
1595 mDataSize = mDataCapacity = dataSize;
1596 //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
1597 mDataPos = 0;
1598 ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
1599 mObjects = const_cast<binder_size_t*>(objects);
1600 mObjectsSize = mObjectsCapacity = objectsCount;
1601 mNextObjectHint = 0;
1602 clearCache();
1603 mOwner = relFunc;
1604 mOwnerCookie = relCookie;
1605 for (size_t i = 0; i < mObjectsSize; i++) {
1606 binder_size_t offset = mObjects[i];
1607 if (offset < minOffset) {
1608 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
1609 __func__, (uint64_t)offset, (uint64_t)minOffset);
1610 mObjectsSize = 0;
1611 break;
1612 }
1613 minOffset = offset + sizeof(flat_binder_object);
1614 }
1615 scanForFds();
1616 }
1617
print(TextOutput & to,uint32_t) const1618 void Parcel::print(TextOutput& to, uint32_t /*flags*/) const
1619 {
1620 to << "Parcel(";
1621
1622 if (errorCheck() != NO_ERROR) {
1623 const status_t err = errorCheck();
1624 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
1625 } else if (dataSize() > 0) {
1626 const uint8_t* DATA = data();
1627 to << indent << HexDump(DATA, dataSize()) << dedent;
1628 const binder_size_t* OBJS = objects();
1629 const size_t N = objectsCount();
1630 for (size_t i=0; i<N; i++) {
1631 const flat_binder_object* flat
1632 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]);
1633 if (flat->hdr.type == BINDER_TYPE_PTR) {
1634 const binder_buffer_object* buffer
1635 = reinterpret_cast<const binder_buffer_object*>(DATA+OBJS[i]);
1636 HexDump bufferDump((const uint8_t*)buffer->buffer, (size_t)buffer->length);
1637 bufferDump.setSingleLineCutoff(0);
1638 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << " (buffer size " << buffer->length << "):";
1639 to << indent << bufferDump << dedent;
1640 } else {
1641 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
1642 << TypeCode(flat->hdr.type & 0x7f7f7f00)
1643 << " = " << flat->binder;
1644 }
1645 }
1646 } else {
1647 to << "NULL";
1648 }
1649
1650 to << ")";
1651 }
1652
releaseObjects()1653 void Parcel::releaseObjects()
1654 {
1655 const sp<ProcessState> proc(ProcessState::self());
1656 size_t i = mObjectsSize;
1657 uint8_t* const data = mData;
1658 binder_size_t* const objects = mObjects;
1659 while (i > 0) {
1660 i--;
1661 const flat_binder_object* flat
1662 = reinterpret_cast<flat_binder_object*>(data+objects[i]);
1663 release_object(proc, *flat, this);
1664 }
1665 }
1666
acquireObjects()1667 void Parcel::acquireObjects()
1668 {
1669 const sp<ProcessState> proc(ProcessState::self());
1670 size_t i = mObjectsSize;
1671 uint8_t* const data = mData;
1672 binder_size_t* const objects = mObjects;
1673 while (i > 0) {
1674 i--;
1675 const binder_object_header* flat
1676 = reinterpret_cast<binder_object_header*>(data+objects[i]);
1677 acquire_object(proc, *flat, this);
1678 }
1679 }
1680
freeData()1681 void Parcel::freeData()
1682 {
1683 freeDataNoInit();
1684 initState();
1685 }
1686
freeDataNoInit()1687 void Parcel::freeDataNoInit()
1688 {
1689 if (mOwner) {
1690 LOG_ALLOC("Parcel %p: freeing other owner data", this);
1691 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
1692 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
1693 } else {
1694 LOG_ALLOC("Parcel %p: freeing allocated data", this);
1695 releaseObjects();
1696 if (mData) {
1697 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
1698 gParcelGlobalAllocSize -= mDataCapacity;
1699 gParcelGlobalAllocCount--;
1700 if (mDeallocZero) {
1701 zeroMemory(mData, mDataSize);
1702 }
1703 free(mData);
1704 }
1705 if (mObjects) free(mObjects);
1706 }
1707 }
1708
growData(size_t len)1709 status_t Parcel::growData(size_t len)
1710 {
1711 if (len > INT32_MAX) {
1712 // don't accept size_t values which may have come from an
1713 // inadvertent conversion from a negative int.
1714 return BAD_VALUE;
1715 }
1716 if (len > SIZE_MAX - mDataSize) return NO_MEMORY; // overflow
1717 if (mDataSize + len > SIZE_MAX / 3) return NO_MEMORY; // overflow
1718 size_t newSize = ((mDataSize+len)*3)/2;
1719 return continueWrite(newSize);
1720 }
1721
reallocZeroFree(uint8_t * data,size_t oldCapacity,size_t newCapacity,bool zero)1722 static uint8_t* reallocZeroFree(uint8_t* data, size_t oldCapacity, size_t newCapacity, bool zero) {
1723 if (!zero) {
1724 return (uint8_t*)realloc(data, newCapacity);
1725 }
1726 uint8_t* newData = (uint8_t*)malloc(newCapacity);
1727 if (!newData) {
1728 return nullptr;
1729 }
1730
1731 memcpy(newData, data, std::min(oldCapacity, newCapacity));
1732 zeroMemory(data, oldCapacity);
1733 free(data);
1734 return newData;
1735 }
1736
continueWrite(size_t desired)1737 status_t Parcel::continueWrite(size_t desired)
1738 {
1739 if (desired > INT32_MAX) {
1740 // don't accept size_t values which may have come from an
1741 // inadvertent conversion from a negative int.
1742 return BAD_VALUE;
1743 }
1744
1745 // If shrinking, first adjust for any objects that appear
1746 // after the new data size.
1747 size_t objectsSize = mObjectsSize;
1748 if (desired < mDataSize) {
1749 if (desired == 0) {
1750 objectsSize = 0;
1751 } else {
1752 while (objectsSize > 0) {
1753 if (mObjects[objectsSize-1] < desired)
1754 break;
1755 objectsSize--;
1756 }
1757 }
1758 }
1759
1760 if (mOwner) {
1761 // If the size is going to zero, just release the owner's data.
1762 if (desired == 0) {
1763 freeData();
1764 return NO_ERROR;
1765 }
1766
1767 // If there is a different owner, we need to take
1768 // posession.
1769 uint8_t* data = (uint8_t*)malloc(desired);
1770 if (!data) {
1771 mError = NO_MEMORY;
1772 return NO_MEMORY;
1773 }
1774 binder_size_t* objects = nullptr;
1775
1776 if (objectsSize) {
1777 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
1778 if (!objects) {
1779 free(data);
1780
1781 mError = NO_MEMORY;
1782 return NO_MEMORY;
1783 }
1784
1785 // Little hack to only acquire references on objects
1786 // we will be keeping.
1787 size_t oldObjectsSize = mObjectsSize;
1788 mObjectsSize = objectsSize;
1789 acquireObjects();
1790 mObjectsSize = oldObjectsSize;
1791 }
1792
1793 if (mData) {
1794 memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
1795 }
1796 if (objects && mObjects) {
1797 memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
1798 }
1799 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
1800 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
1801 mOwner = nullptr;
1802
1803 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
1804 gParcelGlobalAllocSize += desired;
1805 gParcelGlobalAllocCount++;
1806
1807 mData = data;
1808 mObjects = objects;
1809 mDataSize = (mDataSize < desired) ? mDataSize : desired;
1810 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
1811 mDataCapacity = desired;
1812 mObjectsSize = mObjectsCapacity = objectsSize;
1813 mNextObjectHint = 0;
1814
1815 clearCache();
1816 } else if (mData) {
1817 if (objectsSize < mObjectsSize) {
1818 // Need to release refs on any objects we are dropping.
1819 const sp<ProcessState> proc(ProcessState::self());
1820 for (size_t i=objectsSize; i<mObjectsSize; i++) {
1821 const flat_binder_object* flat
1822 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
1823 if (flat->hdr.type == BINDER_TYPE_FD) {
1824 // will need to rescan because we may have lopped off the only FDs
1825 mFdsKnown = false;
1826 }
1827 release_object(proc, *flat, this);
1828 }
1829
1830 if (objectsSize == 0) {
1831 free(mObjects);
1832 mObjects = nullptr;
1833 } else {
1834 binder_size_t* objects =
1835 (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
1836 if (objects) {
1837 mObjects = objects;
1838 }
1839 }
1840 mObjectsSize = objectsSize;
1841 mNextObjectHint = 0;
1842
1843 clearCache();
1844 }
1845
1846 // We own the data, so we can just do a realloc().
1847 if (desired > mDataCapacity) {
1848 uint8_t* data = reallocZeroFree(mData, mDataCapacity, desired, mDeallocZero);
1849 if (data) {
1850 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
1851 desired);
1852 gParcelGlobalAllocSize += desired;
1853 gParcelGlobalAllocSize -= mDataCapacity;
1854 mData = data;
1855 mDataCapacity = desired;
1856 } else {
1857 mError = NO_MEMORY;
1858 return NO_MEMORY;
1859 }
1860 } else {
1861 if (mDataSize > desired) {
1862 mDataSize = desired;
1863 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
1864 }
1865 if (mDataPos > desired) {
1866 mDataPos = desired;
1867 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
1868 }
1869 }
1870
1871 } else {
1872 // This is the first data. Easy!
1873 uint8_t* data = (uint8_t*)malloc(desired);
1874 if (!data) {
1875 mError = NO_MEMORY;
1876 return NO_MEMORY;
1877 }
1878
1879 if(!(mDataCapacity == 0 && mObjects == nullptr
1880 && mObjectsCapacity == 0)) {
1881 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
1882 }
1883
1884 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
1885 gParcelGlobalAllocSize += desired;
1886 gParcelGlobalAllocCount++;
1887
1888 mData = data;
1889 mDataSize = mDataPos = 0;
1890 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
1891 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
1892 mDataCapacity = desired;
1893 }
1894
1895 return NO_ERROR;
1896 }
1897
initState()1898 void Parcel::initState()
1899 {
1900 LOG_ALLOC("Parcel %p: initState", this);
1901 mError = NO_ERROR;
1902 mData = nullptr;
1903 mDataSize = 0;
1904 mDataCapacity = 0;
1905 mDataPos = 0;
1906 ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
1907 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
1908 mObjects = nullptr;
1909 mObjectsSize = 0;
1910 mObjectsCapacity = 0;
1911 mNextObjectHint = 0;
1912 mHasFds = false;
1913 mFdsKnown = true;
1914 mAllowFds = true;
1915 mDeallocZero = false;
1916 mOwner = nullptr;
1917 clearCache();
1918
1919 // racing multiple init leads only to multiple identical write
1920 if (gMaxFds == 0) {
1921 struct rlimit result;
1922 if (!getrlimit(RLIMIT_NOFILE, &result)) {
1923 gMaxFds = (size_t)result.rlim_cur;
1924 //ALOGI("parcel fd limit set to %zu", gMaxFds);
1925 } else {
1926 ALOGW("Unable to getrlimit: %s", strerror(errno));
1927 gMaxFds = 1024;
1928 }
1929 }
1930 }
1931
scanForFds() const1932 void Parcel::scanForFds() const
1933 {
1934 bool hasFds = false;
1935 for (size_t i=0; i<mObjectsSize; i++) {
1936 const flat_binder_object* flat
1937 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]);
1938 if (flat->hdr.type == BINDER_TYPE_FD) {
1939 hasFds = true;
1940 break;
1941 }
1942 }
1943 mHasFds = hasFds;
1944 mFdsKnown = true;
1945 }
1946
1947 } // namespace hardware
1948 } // namespace android
1949