1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <gtest/gtest.h>
18
19 #include <elf.h>
20 #include <limits.h>
21 #include <malloc.h>
22 #include <pthread.h>
23 #include <semaphore.h>
24 #include <signal.h>
25 #include <stdint.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/auxv.h>
30 #include <sys/cdefs.h>
31 #include <sys/prctl.h>
32 #include <sys/types.h>
33 #include <sys/wait.h>
34 #include <unistd.h>
35
36 #include <algorithm>
37 #include <atomic>
38 #include <functional>
39 #include <string>
40 #include <thread>
41 #include <unordered_map>
42 #include <utility>
43 #include <vector>
44
45 #include <tinyxml2.h>
46
47 #include <android-base/file.h>
48 #include <android-base/test_utils.h>
49
50 #include "DoNotOptimize.h"
51 #include "utils.h"
52
53 #if defined(__BIONIC__)
54
55 #include "SignalUtils.h"
56 #include "dlext_private.h"
57
58 #include "platform/bionic/malloc.h"
59 #include "platform/bionic/mte.h"
60 #include "platform/bionic/reserved_signals.h"
61 #include "private/bionic_config.h"
62
63 #define HAVE_REALLOCARRAY 1
64
65 #elif defined(__GLIBC__)
66
67 #define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
68
69 #elif defined(ANDROID_HOST_MUSL)
70
71 #define HAVE_REALLOCARRAY 1
72
73 #endif
74
TEST(malloc,malloc_std)75 TEST(malloc, malloc_std) {
76 // Simple malloc test.
77 void *ptr = malloc(100);
78 ASSERT_TRUE(ptr != nullptr);
79 ASSERT_LE(100U, malloc_usable_size(ptr));
80 free(ptr);
81 }
82
TEST(malloc,malloc_overflow)83 TEST(malloc, malloc_overflow) {
84 SKIP_WITH_HWASAN;
85 errno = 0;
86 ASSERT_EQ(nullptr, malloc(SIZE_MAX));
87 ASSERT_ERRNO(ENOMEM);
88 }
89
TEST(malloc,calloc_std)90 TEST(malloc, calloc_std) {
91 // Simple calloc test.
92 size_t alloc_len = 100;
93 char *ptr = (char *)calloc(1, alloc_len);
94 ASSERT_TRUE(ptr != nullptr);
95 ASSERT_LE(alloc_len, malloc_usable_size(ptr));
96 for (size_t i = 0; i < alloc_len; i++) {
97 ASSERT_EQ(0, ptr[i]);
98 }
99 free(ptr);
100 }
101
TEST(malloc,calloc_mem_init_disabled)102 TEST(malloc, calloc_mem_init_disabled) {
103 #if defined(__BIONIC__)
104 // calloc should still zero memory if mem-init is disabled.
105 // With jemalloc the mallopts will fail but that shouldn't affect the
106 // execution of the test.
107 mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
108 size_t alloc_len = 100;
109 char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
110 for (size_t i = 0; i < alloc_len; i++) {
111 ASSERT_EQ(0, ptr[i]);
112 }
113 free(ptr);
114 mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
115 #else
116 GTEST_SKIP() << "bionic-only test";
117 #endif
118 }
119
TEST(malloc,calloc_illegal)120 TEST(malloc, calloc_illegal) {
121 SKIP_WITH_HWASAN;
122 errno = 0;
123 ASSERT_EQ(nullptr, calloc(-1, 100));
124 ASSERT_ERRNO(ENOMEM);
125 }
126
TEST(malloc,calloc_overflow)127 TEST(malloc, calloc_overflow) {
128 SKIP_WITH_HWASAN;
129 errno = 0;
130 ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
131 ASSERT_ERRNO(ENOMEM);
132 errno = 0;
133 ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
134 ASSERT_ERRNO(ENOMEM);
135 errno = 0;
136 ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
137 ASSERT_ERRNO(ENOMEM);
138 errno = 0;
139 ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
140 ASSERT_ERRNO(ENOMEM);
141 }
142
TEST(malloc,memalign_multiple)143 TEST(malloc, memalign_multiple) {
144 SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
145 // Memalign test where the alignment is any value.
146 for (size_t i = 0; i <= 12; i++) {
147 for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
148 char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
149 ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
150 ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
151 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
152 << "Failed at alignment " << alignment;
153 free(ptr);
154 }
155 }
156 }
157
TEST(malloc,memalign_overflow)158 TEST(malloc, memalign_overflow) {
159 SKIP_WITH_HWASAN;
160 ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
161 }
162
TEST(malloc,memalign_non_power2)163 TEST(malloc, memalign_non_power2) {
164 SKIP_WITH_HWASAN;
165 void* ptr;
166 for (size_t align = 0; align <= 256; align++) {
167 ptr = memalign(align, 1024);
168 ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
169 free(ptr);
170 }
171 }
172
TEST(malloc,memalign_realloc)173 TEST(malloc, memalign_realloc) {
174 // Memalign and then realloc the pointer a couple of times.
175 for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
176 char *ptr = (char*)memalign(alignment, 100);
177 ASSERT_TRUE(ptr != nullptr);
178 ASSERT_LE(100U, malloc_usable_size(ptr));
179 ASSERT_EQ(0U, (intptr_t)ptr % alignment);
180 memset(ptr, 0x23, 100);
181
182 ptr = (char*)realloc(ptr, 200);
183 ASSERT_TRUE(ptr != nullptr);
184 ASSERT_LE(200U, malloc_usable_size(ptr));
185 ASSERT_TRUE(ptr != nullptr);
186 for (size_t i = 0; i < 100; i++) {
187 ASSERT_EQ(0x23, ptr[i]);
188 }
189 memset(ptr, 0x45, 200);
190
191 ptr = (char*)realloc(ptr, 300);
192 ASSERT_TRUE(ptr != nullptr);
193 ASSERT_LE(300U, malloc_usable_size(ptr));
194 for (size_t i = 0; i < 200; i++) {
195 ASSERT_EQ(0x45, ptr[i]);
196 }
197 memset(ptr, 0x67, 300);
198
199 ptr = (char*)realloc(ptr, 250);
200 ASSERT_TRUE(ptr != nullptr);
201 ASSERT_LE(250U, malloc_usable_size(ptr));
202 for (size_t i = 0; i < 250; i++) {
203 ASSERT_EQ(0x67, ptr[i]);
204 }
205 free(ptr);
206 }
207 }
208
TEST(malloc,malloc_realloc_larger)209 TEST(malloc, malloc_realloc_larger) {
210 // Realloc to a larger size, malloc is used for the original allocation.
211 char *ptr = (char *)malloc(100);
212 ASSERT_TRUE(ptr != nullptr);
213 ASSERT_LE(100U, malloc_usable_size(ptr));
214 memset(ptr, 67, 100);
215
216 ptr = (char *)realloc(ptr, 200);
217 ASSERT_TRUE(ptr != nullptr);
218 ASSERT_LE(200U, malloc_usable_size(ptr));
219 for (size_t i = 0; i < 100; i++) {
220 ASSERT_EQ(67, ptr[i]);
221 }
222 free(ptr);
223 }
224
TEST(malloc,malloc_realloc_smaller)225 TEST(malloc, malloc_realloc_smaller) {
226 // Realloc to a smaller size, malloc is used for the original allocation.
227 char *ptr = (char *)malloc(200);
228 ASSERT_TRUE(ptr != nullptr);
229 ASSERT_LE(200U, malloc_usable_size(ptr));
230 memset(ptr, 67, 200);
231
232 ptr = (char *)realloc(ptr, 100);
233 ASSERT_TRUE(ptr != nullptr);
234 ASSERT_LE(100U, malloc_usable_size(ptr));
235 for (size_t i = 0; i < 100; i++) {
236 ASSERT_EQ(67, ptr[i]);
237 }
238 free(ptr);
239 }
240
TEST(malloc,malloc_multiple_realloc)241 TEST(malloc, malloc_multiple_realloc) {
242 // Multiple reallocs, malloc is used for the original allocation.
243 char *ptr = (char *)malloc(200);
244 ASSERT_TRUE(ptr != nullptr);
245 ASSERT_LE(200U, malloc_usable_size(ptr));
246 memset(ptr, 0x23, 200);
247
248 ptr = (char *)realloc(ptr, 100);
249 ASSERT_TRUE(ptr != nullptr);
250 ASSERT_LE(100U, malloc_usable_size(ptr));
251 for (size_t i = 0; i < 100; i++) {
252 ASSERT_EQ(0x23, ptr[i]);
253 }
254
255 ptr = (char*)realloc(ptr, 50);
256 ASSERT_TRUE(ptr != nullptr);
257 ASSERT_LE(50U, malloc_usable_size(ptr));
258 for (size_t i = 0; i < 50; i++) {
259 ASSERT_EQ(0x23, ptr[i]);
260 }
261
262 ptr = (char*)realloc(ptr, 150);
263 ASSERT_TRUE(ptr != nullptr);
264 ASSERT_LE(150U, malloc_usable_size(ptr));
265 for (size_t i = 0; i < 50; i++) {
266 ASSERT_EQ(0x23, ptr[i]);
267 }
268 memset(ptr, 0x23, 150);
269
270 ptr = (char*)realloc(ptr, 425);
271 ASSERT_TRUE(ptr != nullptr);
272 ASSERT_LE(425U, malloc_usable_size(ptr));
273 for (size_t i = 0; i < 150; i++) {
274 ASSERT_EQ(0x23, ptr[i]);
275 }
276 free(ptr);
277 }
278
TEST(malloc,calloc_realloc_larger)279 TEST(malloc, calloc_realloc_larger) {
280 // Realloc to a larger size, calloc is used for the original allocation.
281 char *ptr = (char *)calloc(1, 100);
282 ASSERT_TRUE(ptr != nullptr);
283 ASSERT_LE(100U, malloc_usable_size(ptr));
284
285 ptr = (char *)realloc(ptr, 200);
286 ASSERT_TRUE(ptr != nullptr);
287 ASSERT_LE(200U, malloc_usable_size(ptr));
288 for (size_t i = 0; i < 100; i++) {
289 ASSERT_EQ(0, ptr[i]);
290 }
291 free(ptr);
292 }
293
TEST(malloc,calloc_realloc_smaller)294 TEST(malloc, calloc_realloc_smaller) {
295 // Realloc to a smaller size, calloc is used for the original allocation.
296 char *ptr = (char *)calloc(1, 200);
297 ASSERT_TRUE(ptr != nullptr);
298 ASSERT_LE(200U, malloc_usable_size(ptr));
299
300 ptr = (char *)realloc(ptr, 100);
301 ASSERT_TRUE(ptr != nullptr);
302 ASSERT_LE(100U, malloc_usable_size(ptr));
303 for (size_t i = 0; i < 100; i++) {
304 ASSERT_EQ(0, ptr[i]);
305 }
306 free(ptr);
307 }
308
TEST(malloc,calloc_multiple_realloc)309 TEST(malloc, calloc_multiple_realloc) {
310 // Multiple reallocs, calloc is used for the original allocation.
311 char *ptr = (char *)calloc(1, 200);
312 ASSERT_TRUE(ptr != nullptr);
313 ASSERT_LE(200U, malloc_usable_size(ptr));
314
315 ptr = (char *)realloc(ptr, 100);
316 ASSERT_TRUE(ptr != nullptr);
317 ASSERT_LE(100U, malloc_usable_size(ptr));
318 for (size_t i = 0; i < 100; i++) {
319 ASSERT_EQ(0, ptr[i]);
320 }
321
322 ptr = (char*)realloc(ptr, 50);
323 ASSERT_TRUE(ptr != nullptr);
324 ASSERT_LE(50U, malloc_usable_size(ptr));
325 for (size_t i = 0; i < 50; i++) {
326 ASSERT_EQ(0, ptr[i]);
327 }
328
329 ptr = (char*)realloc(ptr, 150);
330 ASSERT_TRUE(ptr != nullptr);
331 ASSERT_LE(150U, malloc_usable_size(ptr));
332 for (size_t i = 0; i < 50; i++) {
333 ASSERT_EQ(0, ptr[i]);
334 }
335 memset(ptr, 0, 150);
336
337 ptr = (char*)realloc(ptr, 425);
338 ASSERT_TRUE(ptr != nullptr);
339 ASSERT_LE(425U, malloc_usable_size(ptr));
340 for (size_t i = 0; i < 150; i++) {
341 ASSERT_EQ(0, ptr[i]);
342 }
343 free(ptr);
344 }
345
TEST(malloc,realloc_overflow)346 TEST(malloc, realloc_overflow) {
347 SKIP_WITH_HWASAN;
348 errno = 0;
349 ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
350 ASSERT_ERRNO(ENOMEM);
351 void* ptr = malloc(100);
352 ASSERT_TRUE(ptr != nullptr);
353 errno = 0;
354 ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
355 ASSERT_ERRNO(ENOMEM);
356 free(ptr);
357 }
358
359 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
360 extern "C" void* pvalloc(size_t);
361 extern "C" void* valloc(size_t);
362 #endif
363
TEST(malloc,pvalloc_std)364 TEST(malloc, pvalloc_std) {
365 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
366 size_t pagesize = sysconf(_SC_PAGESIZE);
367 void* ptr = pvalloc(100);
368 ASSERT_TRUE(ptr != nullptr);
369 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
370 ASSERT_LE(pagesize, malloc_usable_size(ptr));
371 free(ptr);
372 #else
373 GTEST_SKIP() << "pvalloc not supported.";
374 #endif
375 }
376
TEST(malloc,pvalloc_overflow)377 TEST(malloc, pvalloc_overflow) {
378 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
379 ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
380 #else
381 GTEST_SKIP() << "pvalloc not supported.";
382 #endif
383 }
384
TEST(malloc,valloc_std)385 TEST(malloc, valloc_std) {
386 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
387 size_t pagesize = sysconf(_SC_PAGESIZE);
388 void* ptr = valloc(100);
389 ASSERT_TRUE(ptr != nullptr);
390 ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
391 free(ptr);
392 #else
393 GTEST_SKIP() << "valloc not supported.";
394 #endif
395 }
396
TEST(malloc,valloc_overflow)397 TEST(malloc, valloc_overflow) {
398 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
399 ASSERT_EQ(nullptr, valloc(SIZE_MAX));
400 #else
401 GTEST_SKIP() << "valloc not supported.";
402 #endif
403 }
404
TEST(malloc,malloc_info)405 TEST(malloc, malloc_info) {
406 #ifdef __BIONIC__
407 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
408
409 TemporaryFile tf;
410 ASSERT_TRUE(tf.fd != -1);
411 FILE* fp = fdopen(tf.fd, "w+");
412 tf.release();
413 ASSERT_TRUE(fp != nullptr);
414 ASSERT_EQ(0, malloc_info(0, fp));
415 ASSERT_EQ(0, fclose(fp));
416
417 std::string contents;
418 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
419
420 tinyxml2::XMLDocument doc;
421 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
422
423 auto root = doc.FirstChildElement();
424 ASSERT_NE(nullptr, root);
425 ASSERT_STREQ("malloc", root->Name());
426 std::string version(root->Attribute("version"));
427 if (version == "jemalloc-1") {
428 auto arena = root->FirstChildElement();
429 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
430 int val;
431
432 ASSERT_STREQ("heap", arena->Name());
433 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
434 ASSERT_EQ(tinyxml2::XML_SUCCESS,
435 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
436 ASSERT_EQ(tinyxml2::XML_SUCCESS,
437 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
438 ASSERT_EQ(tinyxml2::XML_SUCCESS,
439 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
440 ASSERT_EQ(tinyxml2::XML_SUCCESS,
441 arena->FirstChildElement("bins-total")->QueryIntText(&val));
442
443 auto bin = arena->FirstChildElement("bin");
444 for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
445 if (strcmp(bin->Name(), "bin") == 0) {
446 ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
447 ASSERT_EQ(tinyxml2::XML_SUCCESS,
448 bin->FirstChildElement("allocated")->QueryIntText(&val));
449 ASSERT_EQ(tinyxml2::XML_SUCCESS,
450 bin->FirstChildElement("nmalloc")->QueryIntText(&val));
451 ASSERT_EQ(tinyxml2::XML_SUCCESS,
452 bin->FirstChildElement("ndalloc")->QueryIntText(&val));
453 }
454 }
455 }
456 } else if (version == "scudo-1") {
457 auto element = root->FirstChildElement();
458 for (; element != nullptr; element = element->NextSiblingElement()) {
459 int val;
460
461 ASSERT_STREQ("alloc", element->Name());
462 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
463 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
464 }
465 } else {
466 // Do not verify output for debug malloc.
467 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
468 }
469 #endif
470 }
471
TEST(malloc,malloc_info_matches_mallinfo)472 TEST(malloc, malloc_info_matches_mallinfo) {
473 #ifdef __BIONIC__
474 SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
475
476 TemporaryFile tf;
477 ASSERT_TRUE(tf.fd != -1);
478 FILE* fp = fdopen(tf.fd, "w+");
479 tf.release();
480 ASSERT_TRUE(fp != nullptr);
481 size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
482 ASSERT_EQ(0, malloc_info(0, fp));
483 size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
484 ASSERT_EQ(0, fclose(fp));
485
486 std::string contents;
487 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
488
489 tinyxml2::XMLDocument doc;
490 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
491
492 size_t total_allocated_bytes = 0;
493 auto root = doc.FirstChildElement();
494 ASSERT_NE(nullptr, root);
495 ASSERT_STREQ("malloc", root->Name());
496 std::string version(root->Attribute("version"));
497 if (version == "jemalloc-1") {
498 auto arena = root->FirstChildElement();
499 for (; arena != nullptr; arena = arena->NextSiblingElement()) {
500 int val;
501
502 ASSERT_STREQ("heap", arena->Name());
503 ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
504 ASSERT_EQ(tinyxml2::XML_SUCCESS,
505 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
506 total_allocated_bytes += val;
507 ASSERT_EQ(tinyxml2::XML_SUCCESS,
508 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
509 total_allocated_bytes += val;
510 ASSERT_EQ(tinyxml2::XML_SUCCESS,
511 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
512 total_allocated_bytes += val;
513 ASSERT_EQ(tinyxml2::XML_SUCCESS,
514 arena->FirstChildElement("bins-total")->QueryIntText(&val));
515 }
516 // The total needs to be between the mallinfo call before and after
517 // since malloc_info allocates some memory.
518 EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
519 EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
520 } else if (version == "scudo-1") {
521 auto element = root->FirstChildElement();
522 for (; element != nullptr; element = element->NextSiblingElement()) {
523 ASSERT_STREQ("alloc", element->Name());
524 int size;
525 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
526 int count;
527 ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
528 total_allocated_bytes += size * count;
529 }
530 // Scudo only gives the information on the primary, so simply make
531 // sure that the value is non-zero.
532 EXPECT_NE(0U, total_allocated_bytes);
533 } else {
534 // Do not verify output for debug malloc.
535 ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
536 }
537 #endif
538 }
539
TEST(malloc,calloc_usable_size)540 TEST(malloc, calloc_usable_size) {
541 for (size_t size = 1; size <= 2048; size++) {
542 void* pointer = malloc(size);
543 ASSERT_TRUE(pointer != nullptr);
544 memset(pointer, 0xeb, malloc_usable_size(pointer));
545 free(pointer);
546
547 // We should get a previous pointer that has been set to non-zero.
548 // If calloc does not zero out all of the data, this will fail.
549 uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
550 ASSERT_TRUE(pointer != nullptr);
551 size_t usable_size = malloc_usable_size(zero_mem);
552 for (size_t i = 0; i < usable_size; i++) {
553 ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
554 }
555 free(zero_mem);
556 }
557 }
558
TEST(malloc,malloc_0)559 TEST(malloc, malloc_0) {
560 void* p = malloc(0);
561 ASSERT_TRUE(p != nullptr);
562 free(p);
563 }
564
TEST(malloc,calloc_0_0)565 TEST(malloc, calloc_0_0) {
566 void* p = calloc(0, 0);
567 ASSERT_TRUE(p != nullptr);
568 free(p);
569 }
570
TEST(malloc,calloc_0_1)571 TEST(malloc, calloc_0_1) {
572 void* p = calloc(0, 1);
573 ASSERT_TRUE(p != nullptr);
574 free(p);
575 }
576
TEST(malloc,calloc_1_0)577 TEST(malloc, calloc_1_0) {
578 void* p = calloc(1, 0);
579 ASSERT_TRUE(p != nullptr);
580 free(p);
581 }
582
TEST(malloc,realloc_nullptr_0)583 TEST(malloc, realloc_nullptr_0) {
584 // realloc(nullptr, size) is actually malloc(size).
585 void* p = realloc(nullptr, 0);
586 ASSERT_TRUE(p != nullptr);
587 free(p);
588 }
589
TEST(malloc,realloc_0)590 TEST(malloc, realloc_0) {
591 void* p = malloc(1024);
592 ASSERT_TRUE(p != nullptr);
593 // realloc(p, 0) is actually free(p).
594 void* p2 = realloc(p, 0);
595 ASSERT_TRUE(p2 == nullptr);
596 }
597
598 constexpr size_t MAX_LOOPS = 200;
599
600 // Make sure that memory returned by malloc is aligned to allow these data types.
TEST(malloc,verify_alignment)601 TEST(malloc, verify_alignment) {
602 uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
603 uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
604 long double** values_ldouble = new long double*[MAX_LOOPS];
605 // Use filler to attempt to force the allocator to get potentially bad alignments.
606 void** filler = new void*[MAX_LOOPS];
607
608 for (size_t i = 0; i < MAX_LOOPS; i++) {
609 // Check uint32_t pointers.
610 filler[i] = malloc(1);
611 ASSERT_TRUE(filler[i] != nullptr);
612
613 values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
614 ASSERT_TRUE(values_32[i] != nullptr);
615 *values_32[i] = i;
616 ASSERT_EQ(*values_32[i], i);
617 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
618
619 free(filler[i]);
620 }
621
622 for (size_t i = 0; i < MAX_LOOPS; i++) {
623 // Check uint64_t pointers.
624 filler[i] = malloc(1);
625 ASSERT_TRUE(filler[i] != nullptr);
626
627 values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
628 ASSERT_TRUE(values_64[i] != nullptr);
629 *values_64[i] = 0x1000 + i;
630 ASSERT_EQ(*values_64[i], 0x1000 + i);
631 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
632
633 free(filler[i]);
634 }
635
636 for (size_t i = 0; i < MAX_LOOPS; i++) {
637 // Check long double pointers.
638 filler[i] = malloc(1);
639 ASSERT_TRUE(filler[i] != nullptr);
640
641 values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
642 ASSERT_TRUE(values_ldouble[i] != nullptr);
643 *values_ldouble[i] = 5.5 + i;
644 ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
645 // 32 bit glibc has a long double size of 12 bytes, so hardcode the
646 // required alignment to 0x7.
647 #if !defined(__BIONIC__) && !defined(__LP64__)
648 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
649 #else
650 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
651 #endif
652
653 free(filler[i]);
654 }
655
656 for (size_t i = 0; i < MAX_LOOPS; i++) {
657 free(values_32[i]);
658 free(values_64[i]);
659 free(values_ldouble[i]);
660 }
661
662 delete[] filler;
663 delete[] values_32;
664 delete[] values_64;
665 delete[] values_ldouble;
666 }
667
TEST(malloc,mallopt_smoke)668 TEST(malloc, mallopt_smoke) {
669 #if defined(__BIONIC__)
670 errno = 0;
671 ASSERT_EQ(0, mallopt(-1000, 1));
672 // mallopt doesn't set errno.
673 ASSERT_ERRNO(0);
674 #else
675 GTEST_SKIP() << "bionic-only test";
676 #endif
677 }
678
TEST(malloc,mallopt_decay)679 TEST(malloc, mallopt_decay) {
680 #if defined(__BIONIC__)
681 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
682 ASSERT_EQ(1, mallopt(M_DECAY_TIME, -1));
683 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
684 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
685 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
686 ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
687 ASSERT_EQ(1, mallopt(M_DECAY_TIME, -1));
688 #else
689 GTEST_SKIP() << "bionic-only test";
690 #endif
691 }
692
TEST(malloc,mallopt_purge)693 TEST(malloc, mallopt_purge) {
694 #if defined(__BIONIC__)
695 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
696 ASSERT_EQ(1, mallopt(M_PURGE, 0));
697 #else
698 GTEST_SKIP() << "bionic-only test";
699 #endif
700 }
701
TEST(malloc,mallopt_purge_all)702 TEST(malloc, mallopt_purge_all) {
703 #if defined(__BIONIC__)
704 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
705 ASSERT_EQ(1, mallopt(M_PURGE_ALL, 0));
706 #else
707 GTEST_SKIP() << "bionic-only test";
708 #endif
709 }
710
TEST(malloc,mallopt_log_stats)711 TEST(malloc, mallopt_log_stats) {
712 #if defined(__BIONIC__)
713 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
714 ASSERT_EQ(1, mallopt(M_LOG_STATS, 0));
715 #else
716 GTEST_SKIP() << "bionic-only test";
717 #endif
718 }
719
720 // Verify that all of the mallopt values are unique.
TEST(malloc,mallopt_unique_params)721 TEST(malloc, mallopt_unique_params) {
722 #if defined(__BIONIC__)
723 std::vector<std::pair<int, std::string>> params{
724 std::make_pair(M_DECAY_TIME, "M_DECAY_TIME"),
725 std::make_pair(M_PURGE, "M_PURGE"),
726 std::make_pair(M_PURGE_ALL, "M_PURGE_ALL"),
727 std::make_pair(M_MEMTAG_TUNING, "M_MEMTAG_TUNING"),
728 std::make_pair(M_THREAD_DISABLE_MEM_INIT, "M_THREAD_DISABLE_MEM_INIT"),
729 std::make_pair(M_CACHE_COUNT_MAX, "M_CACHE_COUNT_MAX"),
730 std::make_pair(M_CACHE_SIZE_MAX, "M_CACHE_SIZE_MAX"),
731 std::make_pair(M_TSDS_COUNT_MAX, "M_TSDS_COUNT_MAX"),
732 std::make_pair(M_BIONIC_ZERO_INIT, "M_BIONIC_ZERO_INIT"),
733 std::make_pair(M_BIONIC_SET_HEAP_TAGGING_LEVEL, "M_BIONIC_SET_HEAP_TAGGING_LEVEL"),
734 std::make_pair(M_LOG_STATS, "M_LOG_STATS"),
735 };
736
737 std::unordered_map<int, std::string> all_params;
738 for (const auto& param : params) {
739 EXPECT_TRUE(all_params.count(param.first) == 0)
740 << "mallopt params " << all_params[param.first] << " and " << param.second
741 << " have the same value " << param.first;
742 all_params.insert(param);
743 }
744 #else
745 GTEST_SKIP() << "bionic-only test";
746 #endif
747 }
748
749 #if defined(__BIONIC__)
GetAllocatorVersion(bool * allocator_scudo)750 static void GetAllocatorVersion(bool* allocator_scudo) {
751 TemporaryFile tf;
752 ASSERT_TRUE(tf.fd != -1);
753 FILE* fp = fdopen(tf.fd, "w+");
754 tf.release();
755 ASSERT_TRUE(fp != nullptr);
756 if (malloc_info(0, fp) != 0) {
757 *allocator_scudo = false;
758 return;
759 }
760 ASSERT_EQ(0, fclose(fp));
761
762 std::string contents;
763 ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
764
765 tinyxml2::XMLDocument doc;
766 ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
767
768 auto root = doc.FirstChildElement();
769 ASSERT_NE(nullptr, root);
770 ASSERT_STREQ("malloc", root->Name());
771 std::string version(root->Attribute("version"));
772 *allocator_scudo = (version == "scudo-1");
773 }
774 #endif
775
TEST(malloc,mallopt_scudo_only_options)776 TEST(malloc, mallopt_scudo_only_options) {
777 #if defined(__BIONIC__)
778 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
779 bool allocator_scudo;
780 GetAllocatorVersion(&allocator_scudo);
781 if (!allocator_scudo) {
782 GTEST_SKIP() << "scudo allocator only test";
783 }
784 ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
785 ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
786 ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
787 #else
788 GTEST_SKIP() << "bionic-only test";
789 #endif
790 }
791
TEST(malloc,reallocarray_overflow)792 TEST(malloc, reallocarray_overflow) {
793 #if HAVE_REALLOCARRAY
794 // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
795 size_t a = static_cast<size_t>(INTPTR_MIN + 4);
796 size_t b = 2;
797
798 errno = 0;
799 ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
800 ASSERT_ERRNO(ENOMEM);
801
802 errno = 0;
803 ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
804 ASSERT_ERRNO(ENOMEM);
805 #else
806 GTEST_SKIP() << "reallocarray not available";
807 #endif
808 }
809
TEST(malloc,reallocarray)810 TEST(malloc, reallocarray) {
811 #if HAVE_REALLOCARRAY
812 void* p = reallocarray(nullptr, 2, 32);
813 ASSERT_TRUE(p != nullptr);
814 ASSERT_GE(malloc_usable_size(p), 64U);
815 #else
816 GTEST_SKIP() << "reallocarray not available";
817 #endif
818 }
819
TEST(malloc,mallinfo)820 TEST(malloc, mallinfo) {
821 #if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
822 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
823 static size_t sizes[] = {
824 8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
825 };
826
827 static constexpr size_t kMaxAllocs = 50;
828
829 for (size_t size : sizes) {
830 // If some of these allocations are stuck in a thread cache, then keep
831 // looping until we make an allocation that changes the total size of the
832 // memory allocated.
833 // jemalloc implementations counts the thread cache allocations against
834 // total memory allocated.
835 void* ptrs[kMaxAllocs] = {};
836 bool pass = false;
837 for (size_t i = 0; i < kMaxAllocs; i++) {
838 size_t allocated = mallinfo().uordblks;
839 ptrs[i] = malloc(size);
840 ASSERT_TRUE(ptrs[i] != nullptr);
841 size_t new_allocated = mallinfo().uordblks;
842 if (allocated != new_allocated) {
843 size_t usable_size = malloc_usable_size(ptrs[i]);
844 // Only check if the total got bigger by at least allocation size.
845 // Sometimes the mallinfo numbers can go backwards due to compaction
846 // and/or freeing of cached data.
847 if (new_allocated >= allocated + usable_size) {
848 pass = true;
849 break;
850 }
851 }
852 }
853 for (void* ptr : ptrs) {
854 free(ptr);
855 }
856 ASSERT_TRUE(pass)
857 << "For size " << size << " allocated bytes did not increase after "
858 << kMaxAllocs << " allocations.";
859 }
860 #else
861 GTEST_SKIP() << "glibc is broken";
862 #endif
863 }
864
TEST(malloc,mallinfo2)865 TEST(malloc, mallinfo2) {
866 #if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
867 SKIP_WITH_HWASAN << "hwasan does not implement mallinfo2";
868 static size_t sizes[] = {8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000};
869
870 static constexpr size_t kMaxAllocs = 50;
871
872 for (size_t size : sizes) {
873 // If some of these allocations are stuck in a thread cache, then keep
874 // looping until we make an allocation that changes the total size of the
875 // memory allocated.
876 // jemalloc implementations counts the thread cache allocations against
877 // total memory allocated.
878 void* ptrs[kMaxAllocs] = {};
879 bool pass = false;
880 for (size_t i = 0; i < kMaxAllocs; i++) {
881 struct mallinfo info = mallinfo();
882 struct mallinfo2 info2 = mallinfo2();
883 // Verify that mallinfo and mallinfo2 are exactly the same.
884 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
885 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
886 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
887 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
888 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
889 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
890 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
891 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
892 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
893 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
894
895 size_t allocated = info2.uordblks;
896 ptrs[i] = malloc(size);
897 ASSERT_TRUE(ptrs[i] != nullptr);
898
899 info = mallinfo();
900 info2 = mallinfo2();
901 // Verify that mallinfo and mallinfo2 are exactly the same.
902 ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
903 ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
904 ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
905 ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
906 ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
907 ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
908 ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
909 ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
910 ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
911 ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
912
913 size_t new_allocated = info2.uordblks;
914 if (allocated != new_allocated) {
915 size_t usable_size = malloc_usable_size(ptrs[i]);
916 // Only check if the total got bigger by at least allocation size.
917 // Sometimes the mallinfo2 numbers can go backwards due to compaction
918 // and/or freeing of cached data.
919 if (new_allocated >= allocated + usable_size) {
920 pass = true;
921 break;
922 }
923 }
924 }
925 for (void* ptr : ptrs) {
926 free(ptr);
927 }
928 ASSERT_TRUE(pass) << "For size " << size << " allocated bytes did not increase after "
929 << kMaxAllocs << " allocations.";
930 }
931 #else
932 GTEST_SKIP() << "glibc is broken";
933 #endif
934 }
935
936 template <typename Type>
VerifyAlignment(Type * floating)937 void __attribute__((optnone)) VerifyAlignment(Type* floating) {
938 size_t expected_alignment = alignof(Type);
939 if (expected_alignment != 0) {
940 ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
941 << "Expected alignment " << expected_alignment << " ptr value "
942 << static_cast<void*>(floating);
943 }
944 }
945
946 template <typename Type>
TestAllocateType()947 void __attribute__((optnone)) TestAllocateType() {
948 // The number of allocations to do in a row. This is to attempt to
949 // expose the worst case alignment for native allocators that use
950 // bins.
951 static constexpr size_t kMaxConsecutiveAllocs = 100;
952
953 // Verify using new directly.
954 Type* types[kMaxConsecutiveAllocs];
955 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
956 types[i] = new Type;
957 VerifyAlignment(types[i]);
958 if (::testing::Test::HasFatalFailure()) {
959 return;
960 }
961 }
962 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
963 delete types[i];
964 }
965
966 // Verify using malloc.
967 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
968 types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
969 ASSERT_TRUE(types[i] != nullptr);
970 VerifyAlignment(types[i]);
971 if (::testing::Test::HasFatalFailure()) {
972 return;
973 }
974 }
975 for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
976 free(types[i]);
977 }
978
979 // Verify using a vector.
980 std::vector<Type> type_vector(kMaxConsecutiveAllocs);
981 for (size_t i = 0; i < type_vector.size(); i++) {
982 VerifyAlignment(&type_vector[i]);
983 if (::testing::Test::HasFatalFailure()) {
984 return;
985 }
986 }
987 }
988
989 #if defined(__ANDROID__)
AndroidVerifyAlignment(size_t alloc_size,size_t aligned_bytes)990 static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
991 void* ptrs[100];
992 uintptr_t mask = aligned_bytes - 1;
993 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
994 ptrs[i] = malloc(alloc_size);
995 ASSERT_TRUE(ptrs[i] != nullptr);
996 ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
997 << "Expected at least " << aligned_bytes << " byte alignment: size "
998 << alloc_size << " actual ptr " << ptrs[i];
999 }
1000 }
1001 #endif
1002
AlignCheck()1003 void AlignCheck() {
1004 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
1005 // for a discussion of type alignment.
1006 ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
1007 ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
1008 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
1009
1010 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
1011 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
1012 ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
1013 ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
1014 ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
1015 ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
1016 ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
1017 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
1018 ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
1019 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
1020 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
1021 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
1022 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
1023 ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
1024
1025 #if defined(__ANDROID__)
1026 // On Android, there is a lot of code that expects certain alignments:
1027 // 1. Allocations of a size that rounds up to a multiple of 16 bytes
1028 // must have at least 16 byte alignment.
1029 // 2. Allocations of a size that rounds up to a multiple of 8 bytes and
1030 // not 16 bytes, are only required to have at least 8 byte alignment.
1031 // In addition, on Android clang has been configured for 64 bit such that:
1032 // 3. Allocations <= 8 bytes must be aligned to at least 8 bytes.
1033 // 4. Allocations > 8 bytes must be aligned to at least 16 bytes.
1034 // For 32 bit environments, only the first two requirements must be met.
1035
1036 // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
1037 // a discussion of this alignment mess. The code below is enforcing
1038 // strong-alignment, since who knows what code depends on this behavior now.
1039 // As mentioned before, for 64 bit this will enforce the higher
1040 // requirement since clang expects this behavior on Android now.
1041 for (size_t i = 1; i <= 128; i++) {
1042 #if defined(__LP64__)
1043 if (i <= 8) {
1044 AndroidVerifyAlignment(i, 8);
1045 } else {
1046 AndroidVerifyAlignment(i, 16);
1047 }
1048 #else
1049 size_t rounded = (i + 7) & ~7;
1050 if ((rounded % 16) == 0) {
1051 AndroidVerifyAlignment(i, 16);
1052 } else {
1053 AndroidVerifyAlignment(i, 8);
1054 }
1055 #endif
1056 if (::testing::Test::HasFatalFailure()) {
1057 return;
1058 }
1059 }
1060 #endif
1061 }
1062
TEST(malloc,align_check)1063 TEST(malloc, align_check) {
1064 AlignCheck();
1065 }
1066
1067 // Jemalloc doesn't pass this test right now, so leave it as disabled.
TEST(malloc,DISABLED_alloc_after_fork)1068 TEST(malloc, DISABLED_alloc_after_fork) {
1069 // Both of these need to be a power of 2.
1070 static constexpr size_t kMinAllocationSize = 8;
1071 static constexpr size_t kMaxAllocationSize = 2097152;
1072
1073 static constexpr size_t kNumAllocatingThreads = 5;
1074 static constexpr size_t kNumForkLoops = 100;
1075
1076 std::atomic_bool stop;
1077
1078 // Create threads that simply allocate and free different sizes.
1079 std::vector<std::thread*> threads;
1080 for (size_t i = 0; i < kNumAllocatingThreads; i++) {
1081 std::thread* t = new std::thread([&stop] {
1082 while (!stop) {
1083 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
1084 void* ptr;
1085 DoNotOptimize(ptr = malloc(size));
1086 free(ptr);
1087 }
1088 }
1089 });
1090 threads.push_back(t);
1091 }
1092
1093 // Create a thread to fork and allocate.
1094 for (size_t i = 0; i < kNumForkLoops; i++) {
1095 pid_t pid;
1096 if ((pid = fork()) == 0) {
1097 for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
1098 void* ptr;
1099 DoNotOptimize(ptr = malloc(size));
1100 ASSERT_TRUE(ptr != nullptr);
1101 // Make sure we can touch all of the allocation.
1102 memset(ptr, 0x1, size);
1103 ASSERT_LE(size, malloc_usable_size(ptr));
1104 free(ptr);
1105 }
1106 _exit(10);
1107 }
1108 ASSERT_NE(-1, pid);
1109 AssertChildExited(pid, 10);
1110 }
1111
1112 stop = true;
1113 for (auto thread : threads) {
1114 thread->join();
1115 delete thread;
1116 }
1117 }
1118
TEST(android_mallopt,error_on_unexpected_option)1119 TEST(android_mallopt, error_on_unexpected_option) {
1120 #if defined(__BIONIC__)
1121 const int unrecognized_option = -1;
1122 errno = 0;
1123 EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
1124 EXPECT_ERRNO(ENOTSUP);
1125 #else
1126 GTEST_SKIP() << "bionic-only test";
1127 #endif
1128 }
1129
IsDynamic()1130 bool IsDynamic() {
1131 #if defined(__LP64__)
1132 Elf64_Ehdr ehdr;
1133 #else
1134 Elf32_Ehdr ehdr;
1135 #endif
1136 std::string path(android::base::GetExecutablePath());
1137
1138 int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
1139 if (fd == -1) {
1140 // Assume dynamic on error.
1141 return true;
1142 }
1143 bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
1144 close(fd);
1145 // Assume dynamic in error cases.
1146 return !read_completed || ehdr.e_type == ET_DYN;
1147 }
1148
TEST(android_mallopt,init_zygote_child_profiling)1149 TEST(android_mallopt, init_zygote_child_profiling) {
1150 #if defined(__BIONIC__)
1151 // Successful call.
1152 errno = 0;
1153 if (IsDynamic()) {
1154 EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1155 EXPECT_ERRNO(0);
1156 } else {
1157 // Not supported in static executables.
1158 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1159 EXPECT_ERRNO(ENOTSUP);
1160 }
1161
1162 // Unexpected arguments rejected.
1163 errno = 0;
1164 char unexpected = 0;
1165 EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
1166 if (IsDynamic()) {
1167 EXPECT_ERRNO(EINVAL);
1168 } else {
1169 EXPECT_ERRNO(ENOTSUP);
1170 }
1171 #else
1172 GTEST_SKIP() << "bionic-only test";
1173 #endif
1174 }
1175
1176 #if defined(__BIONIC__)
1177 template <typename FuncType>
CheckAllocationFunction(FuncType func)1178 void CheckAllocationFunction(FuncType func) {
1179 // Assumes that no more than 108MB of memory is allocated before this.
1180 size_t limit = 128 * 1024 * 1024;
1181 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1182 if (!func(20 * 1024 * 1024))
1183 exit(1);
1184 if (func(128 * 1024 * 1024))
1185 exit(1);
1186 exit(0);
1187 }
1188 #endif
1189
TEST(android_mallopt,set_allocation_limit)1190 TEST(android_mallopt, set_allocation_limit) {
1191 #if defined(__BIONIC__)
1192 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1193 testing::ExitedWithCode(0), "");
1194 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1195 testing::ExitedWithCode(0), "");
1196 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1197 testing::ExitedWithCode(0), "");
1198 EXPECT_EXIT(CheckAllocationFunction(
1199 [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1200 testing::ExitedWithCode(0), "");
1201 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1202 void* ptr;
1203 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1204 }),
1205 testing::ExitedWithCode(0), "");
1206 EXPECT_EXIT(CheckAllocationFunction(
1207 [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1208 testing::ExitedWithCode(0), "");
1209 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1210 void* p = malloc(1024 * 1024);
1211 return realloc(p, bytes) != nullptr;
1212 }),
1213 testing::ExitedWithCode(0), "");
1214 #if !defined(__LP64__)
1215 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1216 testing::ExitedWithCode(0), "");
1217 EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1218 testing::ExitedWithCode(0), "");
1219 #endif
1220 #else
1221 GTEST_SKIP() << "bionic extension";
1222 #endif
1223 }
1224
TEST(android_mallopt,set_allocation_limit_multiple)1225 TEST(android_mallopt, set_allocation_limit_multiple) {
1226 #if defined(__BIONIC__)
1227 // Only the first set should work.
1228 size_t limit = 256 * 1024 * 1024;
1229 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1230 limit = 32 * 1024 * 1024;
1231 ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1232 #else
1233 GTEST_SKIP() << "bionic extension";
1234 #endif
1235 }
1236
1237 #if defined(__BIONIC__)
1238 static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1239
GetMaxAllocations()1240 static size_t GetMaxAllocations() {
1241 size_t max_pointers = 0;
1242 void* ptrs[20];
1243 for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1244 ptrs[i] = malloc(kAllocationSize);
1245 if (ptrs[i] == nullptr) {
1246 max_pointers = i;
1247 break;
1248 }
1249 }
1250 for (size_t i = 0; i < max_pointers; i++) {
1251 free(ptrs[i]);
1252 }
1253 return max_pointers;
1254 }
1255
VerifyMaxPointers(size_t max_pointers)1256 static void VerifyMaxPointers(size_t max_pointers) {
1257 // Now verify that we can allocate the same number as before.
1258 void* ptrs[20];
1259 for (size_t i = 0; i < max_pointers; i++) {
1260 ptrs[i] = malloc(kAllocationSize);
1261 ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1262 }
1263
1264 // Make sure the next allocation still fails.
1265 ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1266 for (size_t i = 0; i < max_pointers; i++) {
1267 free(ptrs[i]);
1268 }
1269 }
1270 #endif
1271
TEST(android_mallopt,set_allocation_limit_realloc_increase)1272 TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1273 #if defined(__BIONIC__)
1274 size_t limit = 128 * 1024 * 1024;
1275 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1276
1277 size_t max_pointers = GetMaxAllocations();
1278 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1279
1280 void* memory = malloc(10 * 1024 * 1024);
1281 ASSERT_TRUE(memory != nullptr);
1282
1283 // Increase size.
1284 memory = realloc(memory, 20 * 1024 * 1024);
1285 ASSERT_TRUE(memory != nullptr);
1286 memory = realloc(memory, 40 * 1024 * 1024);
1287 ASSERT_TRUE(memory != nullptr);
1288 memory = realloc(memory, 60 * 1024 * 1024);
1289 ASSERT_TRUE(memory != nullptr);
1290 memory = realloc(memory, 80 * 1024 * 1024);
1291 ASSERT_TRUE(memory != nullptr);
1292 // Now push past limit.
1293 memory = realloc(memory, 130 * 1024 * 1024);
1294 ASSERT_TRUE(memory == nullptr);
1295
1296 VerifyMaxPointers(max_pointers);
1297 #else
1298 GTEST_SKIP() << "bionic extension";
1299 #endif
1300 }
1301
TEST(android_mallopt,set_allocation_limit_realloc_decrease)1302 TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1303 #if defined(__BIONIC__)
1304 size_t limit = 100 * 1024 * 1024;
1305 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1306
1307 size_t max_pointers = GetMaxAllocations();
1308 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1309
1310 void* memory = malloc(80 * 1024 * 1024);
1311 ASSERT_TRUE(memory != nullptr);
1312
1313 // Decrease size.
1314 memory = realloc(memory, 60 * 1024 * 1024);
1315 ASSERT_TRUE(memory != nullptr);
1316 memory = realloc(memory, 40 * 1024 * 1024);
1317 ASSERT_TRUE(memory != nullptr);
1318 memory = realloc(memory, 20 * 1024 * 1024);
1319 ASSERT_TRUE(memory != nullptr);
1320 memory = realloc(memory, 10 * 1024 * 1024);
1321 ASSERT_TRUE(memory != nullptr);
1322 free(memory);
1323
1324 VerifyMaxPointers(max_pointers);
1325 #else
1326 GTEST_SKIP() << "bionic extension";
1327 #endif
1328 }
1329
TEST(android_mallopt,set_allocation_limit_realloc_free)1330 TEST(android_mallopt, set_allocation_limit_realloc_free) {
1331 #if defined(__BIONIC__)
1332 size_t limit = 100 * 1024 * 1024;
1333 ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1334
1335 size_t max_pointers = GetMaxAllocations();
1336 ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1337
1338 void* memory = malloc(60 * 1024 * 1024);
1339 ASSERT_TRUE(memory != nullptr);
1340
1341 memory = realloc(memory, 0);
1342 ASSERT_TRUE(memory == nullptr);
1343
1344 VerifyMaxPointers(max_pointers);
1345 #else
1346 GTEST_SKIP() << "bionic extension";
1347 #endif
1348 }
1349
1350 #if defined(__BIONIC__)
SetAllocationLimitMultipleThreads()1351 static void SetAllocationLimitMultipleThreads() {
1352 static constexpr size_t kNumThreads = 4;
1353 std::atomic_bool start_running = false;
1354 std::atomic<size_t> num_running;
1355 std::atomic<size_t> num_successful;
1356 std::unique_ptr<std::thread> threads[kNumThreads];
1357 for (size_t i = 0; i < kNumThreads; i++) {
1358 threads[i].reset(new std::thread([&num_running, &start_running, &num_successful] {
1359 ++num_running;
1360 while (!start_running) {
1361 }
1362 size_t limit = 500 * 1024 * 1024;
1363 if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1364 ++num_successful;
1365 }
1366 }));
1367 }
1368
1369 // Wait until all of the threads have started.
1370 while (num_running != kNumThreads)
1371 ;
1372
1373 // Now start all of the threads setting the mallopt at once.
1374 start_running = true;
1375
1376 // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1377 // heapprofd handler. This will verify that changing the limit while
1378 // the allocation handlers are being changed at the same time works,
1379 // or that the limit handler is changed first and this also works properly.
1380 union sigval signal_value {};
1381 ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
1382
1383 // Wait for all of the threads to finish.
1384 for (size_t i = 0; i < kNumThreads; i++) {
1385 threads[i]->join();
1386 }
1387 ASSERT_EQ(1U, num_successful) << "Only one thread should be able to set the limit.";
1388 _exit(0);
1389 }
1390 #endif
1391
TEST(android_mallopt,set_allocation_limit_multiple_threads)1392 TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1393 #if defined(__BIONIC__)
1394 if (IsDynamic()) {
1395 ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1396 }
1397
1398 // Run this a number of times as a stress test.
1399 for (size_t i = 0; i < 100; i++) {
1400 // Not using ASSERT_EXIT because errors messages are not displayed.
1401 pid_t pid;
1402 if ((pid = fork()) == 0) {
1403 ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1404 }
1405 ASSERT_NE(-1, pid);
1406 int status;
1407 ASSERT_EQ(pid, wait(&status));
1408 ASSERT_EQ(0, WEXITSTATUS(status));
1409 }
1410 #else
1411 GTEST_SKIP() << "bionic extension";
1412 #endif
1413 }
1414
1415 #if defined(__BIONIC__)
1416 using Mode = android_mallopt_gwp_asan_options_t::Mode;
TEST(android_mallopt,DISABLED_multiple_enable_gwp_asan)1417 TEST(android_mallopt, DISABLED_multiple_enable_gwp_asan) {
1418 android_mallopt_gwp_asan_options_t options;
1419 options.program_name = ""; // Don't infer GWP-ASan options from sysprops.
1420 options.mode = Mode::APP_MANIFEST_NEVER;
1421 // GWP-ASan should already be enabled. Trying to enable or disable it should
1422 // always pass.
1423 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
1424 options.mode = Mode::APP_MANIFEST_DEFAULT;
1425 ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
1426 }
1427 #endif // defined(__BIONIC__)
1428
TEST(android_mallopt,multiple_enable_gwp_asan)1429 TEST(android_mallopt, multiple_enable_gwp_asan) {
1430 #if defined(__BIONIC__)
1431 // Always enable GWP-Asan, with default options.
1432 RunGwpAsanTest("*.DISABLED_multiple_enable_gwp_asan");
1433 #else
1434 GTEST_SKIP() << "bionic extension";
1435 #endif
1436 }
1437
TEST(android_mallopt,memtag_stack_is_on)1438 TEST(android_mallopt, memtag_stack_is_on) {
1439 #if defined(__BIONIC__)
1440 bool memtag_stack;
1441 EXPECT_TRUE(android_mallopt(M_MEMTAG_STACK_IS_ON, &memtag_stack, sizeof(memtag_stack)));
1442 #else
1443 GTEST_SKIP() << "bionic extension";
1444 #endif
1445 }
1446
TestHeapZeroing(int num_iterations,int (* get_alloc_size)(int iteration))1447 void TestHeapZeroing(int num_iterations, int (*get_alloc_size)(int iteration)) {
1448 std::vector<void*> allocs;
1449 constexpr int kMaxBytesToCheckZero = 64;
1450 const char kBlankMemory[kMaxBytesToCheckZero] = {};
1451
1452 for (int i = 0; i < num_iterations; ++i) {
1453 int size = get_alloc_size(i);
1454 allocs.push_back(malloc(size));
1455 memset(allocs.back(), 'X', std::min(size, kMaxBytesToCheckZero));
1456 }
1457
1458 for (void* alloc : allocs) {
1459 free(alloc);
1460 }
1461 allocs.clear();
1462
1463 for (int i = 0; i < num_iterations; ++i) {
1464 int size = get_alloc_size(i);
1465 allocs.push_back(malloc(size));
1466 ASSERT_EQ(0, memcmp(allocs.back(), kBlankMemory, std::min(size, kMaxBytesToCheckZero)));
1467 }
1468
1469 for (void* alloc : allocs) {
1470 free(alloc);
1471 }
1472 }
1473
TEST(malloc,zero_init)1474 TEST(malloc, zero_init) {
1475 #if defined(__BIONIC__)
1476 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1477 bool allocator_scudo;
1478 GetAllocatorVersion(&allocator_scudo);
1479 if (!allocator_scudo) {
1480 GTEST_SKIP() << "scudo allocator only test";
1481 }
1482
1483 mallopt(M_BIONIC_ZERO_INIT, 1);
1484
1485 // Test using a block of 4K small (1-32 byte) allocations.
1486 TestHeapZeroing(/* num_iterations */ 0x1000, [](int iteration) -> int {
1487 return 1 + iteration % 32;
1488 });
1489
1490 // Also test large allocations that land in the scudo secondary, as this is
1491 // the only part of Scudo that's changed by enabling zero initialization with
1492 // MTE. Uses 32 allocations, totalling 60MiB memory. Decay time (time to
1493 // release secondary allocations back to the OS) was modified to 0ms/1ms by
1494 // mallopt_decay. Ensure that we delay for at least a second before releasing
1495 // pages to the OS in order to avoid implicit zeroing by the kernel.
1496 mallopt(M_DECAY_TIME, 1);
1497 TestHeapZeroing(/* num_iterations */ 32, [](int iteration) -> int {
1498 return 1 << (19 + iteration % 4);
1499 });
1500
1501 #else
1502 GTEST_SKIP() << "bionic-only test";
1503 #endif
1504 }
1505
1506 // Note that MTE is enabled on cc_tests on devices that support MTE.
TEST(malloc,disable_mte)1507 TEST(malloc, disable_mte) {
1508 #if defined(__BIONIC__)
1509 if (!mte_supported()) {
1510 GTEST_SKIP() << "This function can only be tested with MTE";
1511 }
1512
1513 sem_t sem;
1514 ASSERT_EQ(0, sem_init(&sem, 0, 0));
1515
1516 pthread_t thread;
1517 ASSERT_EQ(0, pthread_create(
1518 &thread, nullptr,
1519 [](void* ptr) -> void* {
1520 auto* sem = reinterpret_cast<sem_t*>(ptr);
1521 sem_wait(sem);
1522 return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1523 },
1524 &sem));
1525
1526 ASSERT_EQ(1, mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_NONE));
1527 ASSERT_EQ(0, sem_post(&sem));
1528
1529 int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
1530 ASSERT_EQ(static_cast<unsigned long>(PR_MTE_TCF_NONE), my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
1531
1532 void* retval;
1533 ASSERT_EQ(0, pthread_join(thread, &retval));
1534 int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1535 ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
1536 #else
1537 GTEST_SKIP() << "bionic extension";
1538 #endif
1539 }
1540
TEST(malloc,allocation_slack)1541 TEST(malloc, allocation_slack) {
1542 #if defined(__BIONIC__)
1543 SKIP_WITH_NATIVE_BRIDGE; // http://b/189606147
1544
1545 bool allocator_scudo;
1546 GetAllocatorVersion(&allocator_scudo);
1547 if (!allocator_scudo) {
1548 GTEST_SKIP() << "scudo allocator only test";
1549 }
1550
1551 // Test that older target SDK levels let you access a few bytes off the end of
1552 // a large allocation.
1553 android_set_application_target_sdk_version(29);
1554 auto p = std::make_unique<char[]>(131072);
1555 volatile char *vp = p.get();
1556 volatile char oob ATTRIBUTE_UNUSED = vp[131072];
1557 #else
1558 GTEST_SKIP() << "bionic extension";
1559 #endif
1560 }
1561
1562 // Regression test for b/206701345 -- scudo bug, MTE only.
1563 // Fix: https://reviews.llvm.org/D105261
1564 // Fix: https://android-review.googlesource.com/c/platform/external/scudo/+/1763655
TEST(malloc,realloc_mte_crash_b206701345)1565 TEST(malloc, realloc_mte_crash_b206701345) {
1566 // We want to hit in-place realloc at the very end of an mmap-ed region. Not
1567 // all size classes allow such placement - mmap size has to be divisible by
1568 // the block size. At the time of writing this could only be reproduced with
1569 // 64 byte size class (i.e. 48 byte allocations), but that may change in the
1570 // future. Try several different classes at the lower end.
1571 std::vector<void*> ptrs(10000);
1572 for (int i = 1; i < 32; ++i) {
1573 size_t sz = 16 * i - 1;
1574 for (void*& p : ptrs) {
1575 p = realloc(malloc(sz), sz + 1);
1576 }
1577
1578 for (void* p : ptrs) {
1579 free(p);
1580 }
1581 }
1582 }
1583
VerifyAllocationsAreZero(std::function<void * (size_t)> alloc_func,std::string function_name,std::vector<size_t> & test_sizes,size_t max_allocations)1584 void VerifyAllocationsAreZero(std::function<void*(size_t)> alloc_func, std::string function_name,
1585 std::vector<size_t>& test_sizes, size_t max_allocations) {
1586 // Vector of zero'd data used for comparisons. Make it twice the largest size.
1587 std::vector<char> zero(test_sizes.back() * 2, 0);
1588
1589 SCOPED_TRACE(testing::Message() << function_name << " failed to zero memory");
1590
1591 for (size_t test_size : test_sizes) {
1592 std::vector<void*> ptrs(max_allocations);
1593 for (size_t i = 0; i < ptrs.size(); i++) {
1594 SCOPED_TRACE(testing::Message() << "size " << test_size << " at iteration " << i);
1595 ptrs[i] = alloc_func(test_size);
1596 ASSERT_TRUE(ptrs[i] != nullptr);
1597 size_t alloc_size = malloc_usable_size(ptrs[i]);
1598 ASSERT_LE(alloc_size, zero.size());
1599 ASSERT_EQ(0, memcmp(ptrs[i], zero.data(), alloc_size));
1600
1601 // Set the memory to non-zero to make sure if the pointer
1602 // is reused it's still zero.
1603 memset(ptrs[i], 0xab, alloc_size);
1604 }
1605 // Free the pointers.
1606 for (size_t i = 0; i < ptrs.size(); i++) {
1607 free(ptrs[i]);
1608 }
1609 for (size_t i = 0; i < ptrs.size(); i++) {
1610 SCOPED_TRACE(testing::Message() << "size " << test_size << " at iteration " << i);
1611 ptrs[i] = malloc(test_size);
1612 ASSERT_TRUE(ptrs[i] != nullptr);
1613 size_t alloc_size = malloc_usable_size(ptrs[i]);
1614 ASSERT_LE(alloc_size, zero.size());
1615 ASSERT_EQ(0, memcmp(ptrs[i], zero.data(), alloc_size));
1616 }
1617 // Free all of the pointers later to maximize the chance of reusing from
1618 // the first loop.
1619 for (size_t i = 0; i < ptrs.size(); i++) {
1620 free(ptrs[i]);
1621 }
1622 }
1623 }
1624
1625 // Verify that small and medium allocations are always zero.
1626 // @CddTest = 9.7/C-4-1
TEST(malloc,zeroed_allocations_small_medium_sizes)1627 TEST(malloc, zeroed_allocations_small_medium_sizes) {
1628 #if !defined(__BIONIC__)
1629 GTEST_SKIP() << "Only valid on bionic";
1630 #endif
1631 SKIP_WITH_HWASAN << "Only test system allocator, not hwasan allocator.";
1632
1633 if (IsLowRamDevice()) {
1634 GTEST_SKIP() << "Skipped on low memory devices.";
1635 }
1636
1637 constexpr size_t kMaxAllocations = 1024;
1638 std::vector<size_t> test_sizes = {16, 48, 128, 1024, 4096, 65536};
1639 VerifyAllocationsAreZero([](size_t size) -> void* { return malloc(size); }, "malloc", test_sizes,
1640 kMaxAllocations);
1641
1642 VerifyAllocationsAreZero([](size_t size) -> void* { return memalign(64, size); }, "memalign",
1643 test_sizes, kMaxAllocations);
1644
1645 VerifyAllocationsAreZero(
1646 [](size_t size) -> void* {
1647 void* ptr;
1648 if (posix_memalign(&ptr, 64, size) == 0) {
1649 return ptr;
1650 }
1651 return nullptr;
1652 },
1653 "posix_memalign", test_sizes, kMaxAllocations);
1654 }
1655
1656 // Verify that large allocations are always zero.
1657 // @CddTest = 9.7/C-4-1
TEST(malloc,zeroed_allocations_large_sizes)1658 TEST(malloc, zeroed_allocations_large_sizes) {
1659 #if !defined(__BIONIC__)
1660 GTEST_SKIP() << "Only valid on bionic";
1661 #endif
1662 SKIP_WITH_HWASAN << "Only test system allocator, not hwasan allocator.";
1663
1664 if (IsLowRamDevice()) {
1665 GTEST_SKIP() << "Skipped on low memory devices.";
1666 }
1667
1668 constexpr size_t kMaxAllocations = 20;
1669 std::vector<size_t> test_sizes = {1000000, 2000000, 3000000, 4000000};
1670 VerifyAllocationsAreZero([](size_t size) -> void* { return malloc(size); }, "malloc", test_sizes,
1671 kMaxAllocations);
1672
1673 VerifyAllocationsAreZero([](size_t size) -> void* { return memalign(64, size); }, "memalign",
1674 test_sizes, kMaxAllocations);
1675
1676 VerifyAllocationsAreZero(
1677 [](size_t size) -> void* {
1678 void* ptr;
1679 if (posix_memalign(&ptr, 64, size) == 0) {
1680 return ptr;
1681 }
1682 return nullptr;
1683 },
1684 "posix_memalign", test_sizes, kMaxAllocations);
1685 }
1686
1687 // Verify that reallocs are zeroed when expanded.
1688 // @CddTest = 9.7/C-4-1
TEST(malloc,zeroed_allocations_realloc)1689 TEST(malloc, zeroed_allocations_realloc) {
1690 #if !defined(__BIONIC__)
1691 GTEST_SKIP() << "Only valid on bionic";
1692 #endif
1693 SKIP_WITH_HWASAN << "Only test system allocator, not hwasan allocator.";
1694
1695 if (IsLowRamDevice()) {
1696 GTEST_SKIP() << "Skipped on low memory devices.";
1697 }
1698
1699 // Vector of zero'd data used for comparisons.
1700 constexpr size_t kMaxMemorySize = 131072;
1701 std::vector<char> zero(kMaxMemorySize, 0);
1702
1703 constexpr size_t kMaxAllocations = 1024;
1704 std::vector<size_t> test_sizes = {16, 48, 128, 1024, 4096, 65536};
1705 // Do a number of allocations and set them to non-zero.
1706 for (size_t test_size : test_sizes) {
1707 std::vector<void*> ptrs(kMaxAllocations);
1708 for (size_t i = 0; i < kMaxAllocations; i++) {
1709 ptrs[i] = malloc(test_size);
1710 ASSERT_TRUE(ptrs[i] != nullptr);
1711
1712 // Set the memory to non-zero to make sure if the pointer
1713 // is reused it's still zero.
1714 memset(ptrs[i], 0xab, malloc_usable_size(ptrs[i]));
1715 }
1716 // Free the pointers.
1717 for (size_t i = 0; i < kMaxAllocations; i++) {
1718 free(ptrs[i]);
1719 }
1720 }
1721
1722 // Do the reallocs to a larger size and verify the rest of the allocation
1723 // is zero.
1724 constexpr size_t kInitialSize = 8;
1725 for (size_t test_size : test_sizes) {
1726 std::vector<void*> ptrs(kMaxAllocations);
1727 for (size_t i = 0; i < kMaxAllocations; i++) {
1728 ptrs[i] = malloc(kInitialSize);
1729 ASSERT_TRUE(ptrs[i] != nullptr);
1730 size_t orig_alloc_size = malloc_usable_size(ptrs[i]);
1731
1732 ptrs[i] = realloc(ptrs[i], test_size);
1733 ASSERT_TRUE(ptrs[i] != nullptr);
1734 size_t new_alloc_size = malloc_usable_size(ptrs[i]);
1735 char* ptr = reinterpret_cast<char*>(ptrs[i]);
1736 ASSERT_EQ(0, memcmp(&ptr[orig_alloc_size], zero.data(), new_alloc_size - orig_alloc_size))
1737 << "realloc from " << kInitialSize << " to size " << test_size << " at iteration " << i;
1738 }
1739 for (size_t i = 0; i < kMaxAllocations; i++) {
1740 free(ptrs[i]);
1741 }
1742 }
1743 }
1744
TEST(android_mallopt,get_decay_time_enabled_errors)1745 TEST(android_mallopt, get_decay_time_enabled_errors) {
1746 #if defined(__BIONIC__)
1747 errno = 0;
1748 EXPECT_FALSE(android_mallopt(M_GET_DECAY_TIME_ENABLED, nullptr, sizeof(bool)));
1749 EXPECT_ERRNO(EINVAL);
1750
1751 errno = 0;
1752 int value;
1753 EXPECT_FALSE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1754 EXPECT_ERRNO(EINVAL);
1755 #else
1756 GTEST_SKIP() << "bionic-only test";
1757 #endif
1758 }
1759
TEST(android_mallopt,get_decay_time_enabled)1760 TEST(android_mallopt, get_decay_time_enabled) {
1761 #if defined(__BIONIC__)
1762 SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1763
1764 EXPECT_EQ(1, mallopt(M_DECAY_TIME, 0));
1765
1766 bool value;
1767 EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1768 EXPECT_FALSE(value);
1769
1770 EXPECT_EQ(1, mallopt(M_DECAY_TIME, 1));
1771 EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1772 EXPECT_TRUE(value);
1773
1774 EXPECT_EQ(1, mallopt(M_DECAY_TIME, -1));
1775 EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1776 EXPECT_FALSE(value);
1777 #else
1778 GTEST_SKIP() << "bionic-only test";
1779 #endif
1780 }
1781