1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "perfetto/ext/base/paged_memory.h"
18
19 #include <stdint.h>
20
21 #include "perfetto/base/build_config.h"
22 #include "perfetto/ext/base/utils.h"
23 #include "src/base/test/vm_test_utils.h"
24 #include "test/gtest_and_gmock.h"
25
26 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) && \
27 !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
28 !PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
29 #include <sys/resource.h>
30 #endif
31
32 namespace perfetto {
33 namespace base {
34 namespace {
35
TEST(PagedMemoryTest,Basic)36 TEST(PagedMemoryTest, Basic) {
37 const size_t kNumPages = 10;
38 const size_t kSize = GetSysPageSize() * kNumPages;
39 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
40 void* ptr_raw = nullptr;
41 #endif
42 {
43 PagedMemory mem = PagedMemory::Allocate(kSize);
44 ASSERT_TRUE(mem.IsValid());
45 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(mem.Get()) % GetSysPageSize());
46 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
47 ptr_raw = mem.Get();
48 #endif
49 for (size_t i = 0; i < kSize / sizeof(uint64_t); i++)
50 ASSERT_EQ(0u, *(reinterpret_cast<uint64_t*>(mem.Get()) + i));
51
52 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
53 ASSERT_TRUE(vm_test_utils::IsMapped(ptr_raw, kSize));
54 #endif
55
56 #if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
57 PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
58 ASSERT_TRUE(mem.AdviseDontNeed(ptr_raw, kSize));
59
60 // Make sure the pages were removed from the working set.
61 ASSERT_FALSE(vm_test_utils::IsMapped(ptr_raw, kSize));
62 #endif
63 }
64
65 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
66 // Freed memory is necessarily not mapped in to the process.
67 ASSERT_FALSE(vm_test_utils::IsMapped(ptr_raw, kSize));
68 #endif
69 }
70
TEST(PagedMemoryTest,SubPageGranularity)71 TEST(PagedMemoryTest, SubPageGranularity) {
72 const size_t kSize = GetSysPageSize() + 1024;
73 PagedMemory mem = PagedMemory::Allocate(kSize);
74 ASSERT_TRUE(mem.IsValid());
75 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(mem.Get()) % GetSysPageSize());
76 void* ptr_raw = mem.Get();
77 for (size_t i = 0; i < kSize / sizeof(uint64_t); i++) {
78 auto* ptr64 = reinterpret_cast<volatile uint64_t*>(ptr_raw) + i;
79 ASSERT_EQ(0u, *ptr64);
80 *ptr64 = i;
81 }
82
83 #if PERFETTO_BUILDFLAG(PERFETTO_OS_LINUX) || \
84 PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
85 // Do an AdviseDontNeed on the whole range, which is NOT an integer multiple
86 // of the page size. The initial page must be cleared. The remaining 1024
87 // might or might not be cleared depending on the OS implementation.
88 ASSERT_TRUE(mem.AdviseDontNeed(ptr_raw, kSize));
89 ASSERT_FALSE(vm_test_utils::IsMapped(ptr_raw, GetSysPageSize()));
90 for (size_t i = 0; i < GetSysPageSize() / sizeof(uint64_t); i++) {
91 auto* ptr64 = reinterpret_cast<volatile uint64_t*>(ptr_raw) + i;
92 ASSERT_EQ(0u, *ptr64);
93 }
94 #endif
95 }
96
TEST(PagedMemoryTest,Uncommitted)97 TEST(PagedMemoryTest, Uncommitted) {
98 const size_t kNumPages = 4096;
99 const size_t kSize = GetSysPageSize() * kNumPages;
100 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
101 char* ptr_raw = nullptr;
102 #endif
103 {
104 PagedMemory mem = PagedMemory::Allocate(kSize, PagedMemory::kDontCommit);
105 ASSERT_TRUE(mem.IsValid());
106 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
107 ptr_raw = reinterpret_cast<char*>(mem.Get());
108 #endif
109
110 #if PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
111 // Windows only commits the first 1024 pages.
112 constexpr size_t kMappedSize = 4096 * 1024;
113
114 for (size_t i = 0; i < kMappedSize / sizeof(uint64_t); i++)
115 ASSERT_EQ(0u, *(reinterpret_cast<uint64_t*>(mem.Get()) + i));
116
117 ASSERT_TRUE(vm_test_utils::IsMapped(ptr_raw, kMappedSize));
118
119 // Next page shouldn't be mapped.
120 ASSERT_FALSE(vm_test_utils::IsMapped(ptr_raw + kMappedSize, 4096));
121 EXPECT_DEATH_IF_SUPPORTED({ ptr_raw[kMappedSize] = 'x'; }, ".*");
122
123 // Commit the remaining pages.
124 mem.EnsureCommitted(kSize);
125
126 for (size_t i = kMappedSize / sizeof(uint64_t);
127 i < kSize / sizeof(uint64_t); i++) {
128 ASSERT_EQ(0u, *(reinterpret_cast<uint64_t*>(mem.Get()) + i));
129 }
130 #elif PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
131 // Fuchsia doesn't yet support paging. So this should be a no-op.
132 mem.EnsureCommitted(kSize);
133 for (size_t i = 0; i < kSize / sizeof(uint64_t); i++)
134 ASSERT_EQ(0u, *(reinterpret_cast<uint64_t*>(mem.Get()) + i));
135 #else
136 // Linux only maps on access.
137 ASSERT_FALSE(vm_test_utils::IsMapped(ptr_raw, kSize));
138
139 // This should not have any effect.
140 mem.EnsureCommitted(kSize);
141 ASSERT_FALSE(vm_test_utils::IsMapped(ptr_raw, kSize));
142
143 for (size_t i = 0; i < kSize / sizeof(uint64_t); i++)
144 ASSERT_EQ(0u, *(reinterpret_cast<uint64_t*>(mem.Get()) + i));
145 ASSERT_TRUE(vm_test_utils::IsMapped(ptr_raw, kSize));
146 #endif
147 }
148
149 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA)
150 // Freed memory is necessarily not mapped in to the process.
151 ASSERT_FALSE(vm_test_utils::IsMapped(ptr_raw, kSize));
152 #endif
153 }
154
155 #if defined(ADDRESS_SANITIZER)
TEST(PagedMemoryTest,AccessUncommittedMemoryTriggersASAN)156 TEST(PagedMemoryTest, AccessUncommittedMemoryTriggersASAN) {
157 EXPECT_DEATH_IF_SUPPORTED(
158 {
159 const size_t kNumPages = 2000;
160 const size_t kSize = GetSysPageSize() * kNumPages;
161 PagedMemory mem =
162 PagedMemory::Allocate(kSize, PagedMemory::kDontCommit);
163 ASSERT_TRUE(mem.IsValid());
164 char* ptr_raw = reinterpret_cast<char*>(mem.Get());
165 // Only the first 1024 pages are mapped.
166 const size_t kMappedSize = GetSysPageSize() * 1024;
167 ptr_raw[kMappedSize] = 'x';
168 abort();
169 },
170 "AddressSanitizer: .*");
171 }
172 #endif // ADDRESS_SANITIZER
173
TEST(PagedMemoryTest,GuardRegions)174 TEST(PagedMemoryTest, GuardRegions) {
175 const size_t kSize = GetSysPageSize();
176 PagedMemory mem = PagedMemory::Allocate(kSize);
177 ASSERT_TRUE(mem.IsValid());
178 volatile char* raw = reinterpret_cast<char*>(mem.Get());
179 EXPECT_DEATH_IF_SUPPORTED({ raw[-1] = 'x'; }, ".*");
180 EXPECT_DEATH_IF_SUPPORTED({ raw[kSize] = 'x'; }, ".*");
181 }
182
183 // Disable this on:
184 // MacOS: because it doesn't seem to have an equivalent rlimit to bound mmap().
185 // Fuchsia: doesn't support rlimit.
186 // Sanitizers: they seem to try to shadow mmaped memory and fail due to OOMs.
187 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) && \
188 !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
189 !PERFETTO_BUILDFLAG(PERFETTO_OS_FUCHSIA) && !defined(ADDRESS_SANITIZER) && \
190 !defined(LEAK_SANITIZER) && !defined(THREAD_SANITIZER) && \
191 !defined(MEMORY_SANITIZER)
192 // Glibc headers hit this on RLIMIT_ macros.
193 #pragma GCC diagnostic push
194 #if defined(__clang__)
195 #pragma GCC diagnostic ignored "-Wdisabled-macro-expansion"
196 #endif
TEST(PagedMemoryTest,Unchecked)197 TEST(PagedMemoryTest, Unchecked) {
198 const size_t kMemLimit = 256 * 1024 * 1024l;
199 struct rlimit limit {
200 kMemLimit, kMemLimit
201 };
202 // ASSERT_EXIT here is to spawn the test in a sub-process and avoid
203 // propagating the setrlimit() to other test units in case of failure.
204 ASSERT_EXIT(
205 {
206 ASSERT_EQ(0, setrlimit(RLIMIT_AS, &limit));
207 auto mem = PagedMemory::Allocate(kMemLimit * 2, PagedMemory::kMayFail);
208 ASSERT_FALSE(mem.IsValid());
209 // Use _exit() instead of exit() to avoid calling destructors on child
210 // process death, which may interfere with the parent process's test
211 // launcher expectations.
212 _exit(0);
213 },
214 ::testing::ExitedWithCode(0), "");
215 }
216 #pragma GCC diagnostic pop
217 #endif
218
219 } // namespace
220 } // namespace base
221 } // namespace perfetto
222