1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <sys/mman.h>
18
19 #include "base/common_art_test.h"
20 #include "base/pointer_size.h"
21 #include "base/utils.h"
22 #include "gc/collector/immune_spaces.h"
23 #include "gc/space/image_space.h"
24 #include "gc/space/space-inl.h"
25 #include "oat/oat_file.h"
26 #include "thread-current-inl.h"
27
28 namespace art HIDDEN {
29 namespace mirror {
30 class Object;
31 } // namespace mirror
32 namespace gc {
33 namespace collector {
34
35 class FakeOatFile : public OatFile {
36 public:
FakeOatFile(uint8_t * begin,uint8_t * end)37 FakeOatFile(uint8_t* begin, uint8_t* end) : OatFile("Location", /*executable=*/ false) {
38 begin_ = begin;
39 end_ = end;
40 }
41
ComputeElfBegin(std::string * error_msg) const42 const uint8_t* ComputeElfBegin(std::string* error_msg) const override {
43 *error_msg = "Not applicable";
44 return nullptr;
45 }
46 };
47
48 class FakeImageSpace : public space::ImageSpace {
49 public:
FakeImageSpace(MemMap && map,accounting::ContinuousSpaceBitmap && live_bitmap,std::unique_ptr<FakeOatFile> && oat_file,MemMap && oat_map)50 FakeImageSpace(MemMap&& map,
51 accounting::ContinuousSpaceBitmap&& live_bitmap,
52 std::unique_ptr<FakeOatFile>&& oat_file,
53 MemMap&& oat_map)
54 : ImageSpace("FakeImageSpace",
55 /*image_location=*/"",
56 /*profile_files=*/{},
57 std::move(map),
58 std::move(live_bitmap),
59 map.End()),
60 oat_map_(std::move(oat_map)) {
61 oat_file_ = std::move(oat_file);
62 oat_file_non_owned_ = oat_file_.get();
63 }
64
65 private:
66 MemMap oat_map_;
67 };
68
69 class ImmuneSpacesTest : public CommonArtTest {
70 static constexpr size_t kMaxBitmaps = 10;
71
72 public:
ImmuneSpacesTest()73 ImmuneSpacesTest() {}
74
ReserveBitmaps()75 void ReserveBitmaps() {
76 const size_t page_size = MemMap::GetPageSize();
77
78 // Create a bunch of fake bitmaps since these are required to create image spaces. The bitmaps
79 // do not need to cover the image spaces though.
80 for (size_t i = 0; i < kMaxBitmaps; ++i) {
81 accounting::ContinuousSpaceBitmap bitmap(
82 accounting::ContinuousSpaceBitmap::Create(
83 "bitmap", reinterpret_cast<uint8_t*>(static_cast<size_t>(page_size)), page_size));
84 CHECK(bitmap.IsValid());
85 live_bitmaps_.push_back(std::move(bitmap));
86 }
87 }
88
ReserveImage(size_t image_size,std::string * error_str)89 MemMap ReserveImage(size_t image_size, /*out*/ std::string* error_str) {
90 // If the image is aligned to the current runtime page size, it will already
91 // be naturally aligned. On the other hand, MayAnonymousAligned() requires
92 // that the requested alignment is higher.
93 DCHECK_LE(MemMap::GetPageSize(), kElfSegmentAlignment);
94 if (MemMap::GetPageSize() == kElfSegmentAlignment) {
95 return MemMap::MapAnonymous("reserve",
96 image_size,
97 PROT_READ | PROT_WRITE,
98 /*low_4gb=*/true,
99 error_str);
100 }
101 return MemMap::MapAnonymousAligned("reserve",
102 image_size,
103 PROT_READ | PROT_WRITE,
104 /*low_4gb=*/true,
105 kElfSegmentAlignment,
106 error_str);
107 }
108
109 // Create an image space, the oat file is optional.
CreateImageSpace(size_t image_size,size_t oat_size,MemMap * image_reservation,MemMap * oat_reservation)110 FakeImageSpace* CreateImageSpace(size_t image_size,
111 size_t oat_size,
112 MemMap* image_reservation,
113 MemMap* oat_reservation) {
114 DCHECK(image_reservation != nullptr);
115 DCHECK(oat_reservation != nullptr);
116 std::string error_str;
117 MemMap image_map = MemMap::MapAnonymous("FakeImageSpace",
118 image_size,
119 PROT_READ | PROT_WRITE,
120 /*low_4gb=*/ true,
121 /*reservation=*/ image_reservation,
122 &error_str);
123 if (!image_map.IsValid()) {
124 LOG(ERROR) << error_str;
125 return nullptr;
126 }
127 CHECK(!live_bitmaps_.empty());
128 accounting::ContinuousSpaceBitmap live_bitmap(std::move(live_bitmaps_.back()));
129 live_bitmaps_.pop_back();
130 MemMap oat_map = MemMap::MapAnonymous("OatMap",
131 oat_size,
132 PROT_READ | PROT_WRITE,
133 /*low_4gb=*/ true,
134 /*reservation=*/ oat_reservation,
135 &error_str);
136 if (!oat_map.IsValid()) {
137 LOG(ERROR) << error_str;
138 return nullptr;
139 }
140 std::unique_ptr<FakeOatFile> oat_file(new FakeOatFile(oat_map.Begin(), oat_map.End()));
141 // Create image header.
142 ImageSection sections[ImageHeader::kSectionCount];
143 new (image_map.Begin()) ImageHeader(
144 /*image_reservation_size=*/ image_size,
145 /*component_count=*/ 1u,
146 /*image_begin=*/ PointerToLowMemUInt32(image_map.Begin()),
147 /*image_size=*/ image_size,
148 sections,
149 /*image_roots=*/ PointerToLowMemUInt32(image_map.Begin()) + 1,
150 /*oat_checksum=*/ 0u,
151 // The oat file data in the header is always right after the image space.
152 /*oat_file_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
153 /*oat_data_begin=*/ PointerToLowMemUInt32(oat_map.Begin()),
154 /*oat_data_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
155 /*oat_file_end=*/ PointerToLowMemUInt32(oat_map.Begin() + oat_size),
156 /*boot_image_begin=*/ 0u,
157 /*boot_image_size=*/ 0u,
158 /*boot_image_component_count=*/ 0u,
159 /*boot_image_checksum=*/ 0u,
160 /*pointer_size=*/ kRuntimePointerSize);
161 return new FakeImageSpace(std::move(image_map),
162 std::move(live_bitmap),
163 std::move(oat_file),
164 std::move(oat_map));
165 }
166
167 private:
168 // Bitmap pool for pre-allocated fake bitmaps. We need to pre-allocate them since we don't want
169 // them to randomly get placed somewhere where we want an image space.
170 std::vector<accounting::ContinuousSpaceBitmap> live_bitmaps_;
171 };
172
173 class FakeSpace : public space::ContinuousSpace {
174 public:
FakeSpace(uint8_t * begin,uint8_t * end)175 FakeSpace(uint8_t* begin, uint8_t* end)
176 : ContinuousSpace("FakeSpace",
177 space::kGcRetentionPolicyNeverCollect,
178 begin,
179 end,
180 /*limit=*/end) {}
181
GetType() const182 space::SpaceType GetType() const override {
183 return space::kSpaceTypeMallocSpace;
184 }
185
CanMoveObjects() const186 bool CanMoveObjects() const override {
187 return false;
188 }
189
GetLiveBitmap()190 accounting::ContinuousSpaceBitmap* GetLiveBitmap() override {
191 return nullptr;
192 }
193
GetMarkBitmap()194 accounting::ContinuousSpaceBitmap* GetMarkBitmap() override {
195 return nullptr;
196 }
197 };
198
TEST_F(ImmuneSpacesTest,AppendBasic)199 TEST_F(ImmuneSpacesTest, AppendBasic) {
200 ImmuneSpaces spaces;
201 uint8_t* const base = reinterpret_cast<uint8_t*>(0x1000);
202 FakeSpace a(base, base + 45 * KB);
203 FakeSpace b(a.Limit(), a.Limit() + 813 * KB);
204 {
205 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
206 spaces.AddSpace(&a);
207 spaces.AddSpace(&b);
208 }
209 EXPECT_TRUE(spaces.ContainsSpace(&a));
210 EXPECT_TRUE(spaces.ContainsSpace(&b));
211 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), a.Begin());
212 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), b.Limit());
213 }
214
215 // Tests [image][oat][space] producing a single large immune region.
TEST_F(ImmuneSpacesTest,AppendAfterImage)216 TEST_F(ImmuneSpacesTest, AppendAfterImage) {
217 ReserveBitmaps();
218 ImmuneSpaces spaces;
219 constexpr size_t kImageSize = 123 * kElfSegmentAlignment;
220 constexpr size_t kImageOatSize = 321 * kElfSegmentAlignment;
221 constexpr size_t kOtherSpaceSize = 100 * kElfSegmentAlignment;
222
223 std::string error_str;
224 MemMap reservation = ReserveImage(kImageSize + kImageOatSize + kOtherSpaceSize, &error_str);
225 ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
226 MemMap image_reservation = reservation.TakeReservedMemory(kImageSize);
227 ASSERT_TRUE(image_reservation.IsValid());
228 ASSERT_TRUE(reservation.IsValid());
229
230 std::unique_ptr<FakeImageSpace> image_space(CreateImageSpace(kImageSize,
231 kImageOatSize,
232 &image_reservation,
233 &reservation));
234 ASSERT_TRUE(image_space != nullptr);
235 ASSERT_FALSE(image_reservation.IsValid());
236 ASSERT_TRUE(reservation.IsValid());
237
238 const ImageHeader& image_header = image_space->GetImageHeader();
239 FakeSpace space(image_header.GetOatFileEnd(), image_header.GetOatFileEnd() + kOtherSpaceSize);
240
241 EXPECT_EQ(image_header.GetImageSize(), kImageSize);
242 EXPECT_EQ(static_cast<size_t>(image_header.GetOatFileEnd() - image_header.GetOatFileBegin()),
243 kImageOatSize);
244 EXPECT_EQ(image_space->GetOatFile()->Size(), kImageOatSize);
245 // Check that we do not include the oat if there is no space after.
246 {
247 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
248 spaces.AddSpace(image_space.get());
249 }
250 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
251 image_space->Begin());
252 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
253 image_space->Limit());
254 // Add another space and ensure it gets appended.
255 EXPECT_NE(image_space->Limit(), space.Begin());
256 {
257 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
258 spaces.AddSpace(&space);
259 }
260 EXPECT_TRUE(spaces.ContainsSpace(image_space.get()));
261 EXPECT_TRUE(spaces.ContainsSpace(&space));
262 // CreateLargestImmuneRegion should have coalesced the two spaces since the oat code after the
263 // image prevents gaps.
264 // Check that we have a continuous region.
265 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
266 image_space->Begin());
267 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space.Limit());
268 }
269
270 // Test [image1][image2][image1 oat][image2 oat][image3] producing a single large immune region.
TEST_F(ImmuneSpacesTest,MultiImage)271 TEST_F(ImmuneSpacesTest, MultiImage) {
272 ReserveBitmaps();
273 // Image 2 needs to be smaller or else it may be chosen for immune region.
274 constexpr size_t kImage1Size = kElfSegmentAlignment * 17;
275 constexpr size_t kImage2Size = kElfSegmentAlignment * 13;
276 constexpr size_t kImage3Size = kElfSegmentAlignment * 3;
277 constexpr size_t kImage1OatSize = kElfSegmentAlignment * 5;
278 constexpr size_t kImage2OatSize = kElfSegmentAlignment * 8;
279 constexpr size_t kImage3OatSize = kElfSegmentAlignment;
280 constexpr size_t kImageBytes = kImage1Size + kImage2Size + kImage3Size;
281 constexpr size_t kMemorySize = kImageBytes + kImage1OatSize + kImage2OatSize + kImage3OatSize;
282 std::string error_str;
283 MemMap reservation = ReserveImage(kMemorySize, &error_str);
284 ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
285 MemMap image_reservation = reservation.TakeReservedMemory(kImage1Size + kImage2Size);
286 ASSERT_TRUE(image_reservation.IsValid());
287 ASSERT_TRUE(reservation.IsValid());
288
289 std::unique_ptr<FakeImageSpace> space1(CreateImageSpace(kImage1Size,
290 kImage1OatSize,
291 &image_reservation,
292 &reservation));
293 ASSERT_TRUE(space1 != nullptr);
294 ASSERT_TRUE(image_reservation.IsValid());
295 ASSERT_TRUE(reservation.IsValid());
296
297 std::unique_ptr<FakeImageSpace> space2(CreateImageSpace(kImage2Size,
298 kImage2OatSize,
299 &image_reservation,
300 &reservation));
301 ASSERT_TRUE(space2 != nullptr);
302 ASSERT_FALSE(image_reservation.IsValid());
303 ASSERT_TRUE(reservation.IsValid());
304
305 // Finally put a 3rd image space.
306 image_reservation = reservation.TakeReservedMemory(kImage3Size);
307 ASSERT_TRUE(image_reservation.IsValid());
308 ASSERT_TRUE(reservation.IsValid());
309 std::unique_ptr<FakeImageSpace> space3(CreateImageSpace(kImage3Size,
310 kImage3OatSize,
311 &image_reservation,
312 &reservation));
313 ASSERT_TRUE(space3 != nullptr);
314 ASSERT_FALSE(image_reservation.IsValid());
315 ASSERT_FALSE(reservation.IsValid());
316
317 // Check that we do not include the oat if there is no space after.
318 ImmuneSpaces spaces;
319 {
320 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
321 LOG(INFO) << "Adding space1 " << reinterpret_cast<const void*>(space1->Begin());
322 spaces.AddSpace(space1.get());
323 LOG(INFO) << "Adding space2 " << reinterpret_cast<const void*>(space2->Begin());
324 spaces.AddSpace(space2.get());
325 }
326 // There are no more heap bytes, the immune region should only be the first 2 image spaces and
327 // should exclude the image oat files.
328 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
329 space1->Begin());
330 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
331 space2->Limit());
332
333 // Add another space after the oat files, now it should contain the entire memory region.
334 {
335 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
336 LOG(INFO) << "Adding space3 " << reinterpret_cast<const void*>(space3->Begin());
337 spaces.AddSpace(space3.get());
338 }
339 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
340 space1->Begin());
341 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
342 space3->Limit());
343
344 // Add a smaller non-adjacent space and ensure it does not become part of the immune region.
345 // Image size is kImageBytes - kElfSegmentAlignment
346 // Oat size is kElfSegmentAlignment.
347 // Guard pages to ensure it is not adjacent to an existing immune region.
348 // Layout: [guard page][image][oat][guard page]
349 constexpr size_t kGuardSize = kElfSegmentAlignment;
350 constexpr size_t kImage4Size = kImageBytes - kElfSegmentAlignment;
351 constexpr size_t kImage4OatSize = kElfSegmentAlignment;
352
353 reservation = ReserveImage(kImage4Size + kImage4OatSize + kGuardSize * 2, &error_str);
354 ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
355 MemMap guard = reservation.TakeReservedMemory(kGuardSize);
356 ASSERT_TRUE(guard.IsValid());
357 ASSERT_TRUE(reservation.IsValid());
358 guard.Reset(); // Release the guard memory.
359 image_reservation = reservation.TakeReservedMemory(kImage4Size);
360 ASSERT_TRUE(image_reservation.IsValid());
361 ASSERT_TRUE(reservation.IsValid());
362 std::unique_ptr<FakeImageSpace> space4(CreateImageSpace(kImage4Size,
363 kImage4OatSize,
364 &image_reservation,
365 &reservation));
366 ASSERT_TRUE(space4 != nullptr);
367 ASSERT_FALSE(image_reservation.IsValid());
368 ASSERT_TRUE(reservation.IsValid());
369 ASSERT_EQ(reservation.Size(), kGuardSize);
370 reservation.Reset(); // Release the guard memory.
371 {
372 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
373 LOG(INFO) << "Adding space4 " << reinterpret_cast<const void*>(space4->Begin());
374 spaces.AddSpace(space4.get());
375 }
376 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()),
377 space1->Begin());
378 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()),
379 space3->Limit());
380
381 // Add a larger non-adjacent space and ensure it becomes the new largest immune region.
382 // Image size is kImageBytes + kElfSegmentAlignment
383 // Oat size is kElfSegmentAlignment.
384 // Guard pages to ensure it is not adjacent to an existing immune region.
385 // Layout: [guard page][image][oat][guard page]
386 constexpr size_t kImage5Size = kImageBytes + kElfSegmentAlignment;
387 constexpr size_t kImage5OatSize = kElfSegmentAlignment;
388 reservation = ReserveImage(kImage5Size + kImage5OatSize + kGuardSize * 2, &error_str);
389 ASSERT_TRUE(reservation.IsValid()) << "Failed to allocate memory region " << error_str;
390 guard = reservation.TakeReservedMemory(kGuardSize);
391 ASSERT_TRUE(guard.IsValid());
392 ASSERT_TRUE(reservation.IsValid());
393 guard.Reset(); // Release the guard memory.
394 image_reservation = reservation.TakeReservedMemory(kImage5Size);
395 ASSERT_TRUE(image_reservation.IsValid());
396 ASSERT_TRUE(reservation.IsValid());
397 std::unique_ptr<FakeImageSpace> space5(CreateImageSpace(kImage5Size,
398 kImage5OatSize,
399 &image_reservation,
400 &reservation));
401 ASSERT_TRUE(space5 != nullptr);
402 ASSERT_FALSE(image_reservation.IsValid());
403 ASSERT_TRUE(reservation.IsValid());
404 ASSERT_EQ(reservation.Size(), kGuardSize);
405 reservation.Reset(); // Release the guard memory.
406 {
407 WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
408 LOG(INFO) << "Adding space5 " << reinterpret_cast<const void*>(space5->Begin());
409 spaces.AddSpace(space5.get());
410 }
411 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().Begin()), space5->Begin());
412 EXPECT_EQ(reinterpret_cast<uint8_t*>(spaces.GetLargestImmuneRegion().End()), space5->Limit());
413 }
414
415 } // namespace collector
416 } // namespace gc
417 } // namespace art
418