1 /*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_
18 #define ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_
19
20 #include "bump_pointer_space-inl.h"
21
22 #include "base/bit_utils.h"
23 #include "mirror/object-inl.h"
24 #include "thread-current-inl.h"
25
26 #include <memory>
27
28 namespace art HIDDEN {
29 namespace gc {
30 namespace space {
31
32 template <typename Visitor>
Walk(Visitor && visitor)33 inline void BumpPointerSpace::Walk(Visitor&& visitor) {
34 uint8_t* pos = Begin();
35 uint8_t* end = End();
36 uint8_t* main_end = pos;
37 size_t black_dense_size;
38 std::unique_ptr<std::vector<size_t>> block_sizes_copy;
39 // Internal indirection w/ NO_THREAD_SAFETY_ANALYSIS. Optimally, we'd like to have an annotation
40 // like
41 // REQUIRES_AS(visitor.operator(mirror::Object*))
42 // on Walk to expose the interprocedural nature of locks here without having to duplicate the
43 // function.
44 //
45 // NO_THREAD_SAFETY_ANALYSIS is a workaround. The problem with the workaround of course is that
46 // it doesn't complain at the callsite. However, that is strictly not worse than the
47 // ObjectCallback version it replaces.
48 auto no_thread_safety_analysis_visit = [&](mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
49 visitor(obj);
50 };
51
52 {
53 MutexLock mu(Thread::Current(), lock_);
54 // If we have 0 blocks then we need to update the main header since we have bump pointer style
55 // allocation into an unbounded region (actually bounded by Capacity()).
56 if (block_sizes_.empty()) {
57 UpdateMainBlock();
58 }
59 main_end = Begin() + main_block_size_;
60 if (block_sizes_.empty()) {
61 // We don't have any other blocks, this means someone else may be allocating into the main
62 // block. In this case, we don't want to try and visit the other blocks after the main block
63 // since these could actually be part of the main block.
64 end = main_end;
65 } else {
66 block_sizes_copy.reset(new std::vector<size_t>(block_sizes_.begin(), block_sizes_.end()));
67 }
68
69 black_dense_size = black_dense_region_size_;
70 }
71
72 // black_dense_region_size_ will be non-zero only in case of moving-space of CMC GC.
73 if (black_dense_size > 0) {
74 // Objects are not packed in this case, and therefore the bitmap is needed
75 // to walk this part of the space.
76 // Remember the last object visited using bitmap to be able to fetch its size.
77 mirror::Object* last_obj = nullptr;
78 auto return_obj_visit = [&](mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
79 visitor(obj);
80 last_obj = obj;
81 };
82 GetMarkBitmap()->VisitMarkedRange(reinterpret_cast<uintptr_t>(pos),
83 reinterpret_cast<uintptr_t>(pos + black_dense_size),
84 return_obj_visit);
85 pos += black_dense_size;
86 if (last_obj != nullptr) {
87 // If the last object visited using bitmap was large enough to go past the
88 // black-dense region, then we need to adjust for that to be able to visit
89 // objects one after the other below.
90 pos = std::max(pos, reinterpret_cast<uint8_t*>(GetNextObject(last_obj)));
91 }
92 }
93 // Walk all of the objects in the main block first.
94 while (pos < main_end) {
95 mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
96 // No read barrier because obj may not be a valid object.
97 if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() == nullptr) {
98 // There is a race condition where a thread has just allocated an object but not set the
99 // class. We can't know the size of this object, so we don't visit it and break the loop
100 pos = main_end;
101 break;
102 } else {
103 no_thread_safety_analysis_visit(obj);
104 pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
105 }
106 }
107 // Walk the other blocks (currently only TLABs).
108 if (block_sizes_copy != nullptr) {
109 size_t iter = 0;
110 size_t num_blks = block_sizes_copy->size();
111 // Skip blocks which are already visited above as part of black-dense region.
112 for (uint8_t* ptr = main_end; iter < num_blks; iter++) {
113 size_t block_size = (*block_sizes_copy)[iter];
114 ptr += block_size;
115 if (ptr > pos) {
116 // Adjust block-size in case 'pos' is in the middle of the block.
117 if (static_cast<ssize_t>(block_size) > ptr - pos) {
118 (*block_sizes_copy)[iter] = ptr - pos;
119 }
120 break;
121 }
122 }
123
124 for (; iter < num_blks; iter++) {
125 size_t block_size = (*block_sizes_copy)[iter];
126 mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
127 const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size);
128 CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End());
129 // We don't know how many objects are allocated in the current block. When we hit a null class
130 // assume it's the end. TODO: Have a thread update the header when it flushes the block?
131 // No read barrier because obj may not be a valid object.
132 while (obj < end_obj && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
133 no_thread_safety_analysis_visit(obj);
134 obj = GetNextObject(obj);
135 }
136 pos += block_size;
137 }
138 } else {
139 CHECK_EQ(end, main_end);
140 }
141 CHECK_EQ(pos, end);
142 }
143
144 } // namespace space
145 } // namespace gc
146 } // namespace art
147
148 #endif // ART_RUNTIME_GC_SPACE_BUMP_POINTER_SPACE_WALK_INL_H_
149