xref: /aosp_15_r20/art/runtime/gc/collector/mark_compact.cc (revision 795d594fd825385562da6b089ea9b2033f3abf5a)
1*795d594fSAndroid Build Coastguard Worker /*
2*795d594fSAndroid Build Coastguard Worker  * Copyright 2021 The Android Open Source Project
3*795d594fSAndroid Build Coastguard Worker  *
4*795d594fSAndroid Build Coastguard Worker  * Licensed under the Apache License, Version 2.0 (the "License");
5*795d594fSAndroid Build Coastguard Worker  * you may not use this file except in compliance with the License.
6*795d594fSAndroid Build Coastguard Worker  * You may obtain a copy of the License at
7*795d594fSAndroid Build Coastguard Worker  *
8*795d594fSAndroid Build Coastguard Worker  *      http://www.apache.org/licenses/LICENSE-2.0
9*795d594fSAndroid Build Coastguard Worker  *
10*795d594fSAndroid Build Coastguard Worker  * Unless required by applicable law or agreed to in writing, software
11*795d594fSAndroid Build Coastguard Worker  * distributed under the License is distributed on an "AS IS" BASIS,
12*795d594fSAndroid Build Coastguard Worker  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13*795d594fSAndroid Build Coastguard Worker  * See the License for the specific language governing permissions and
14*795d594fSAndroid Build Coastguard Worker  * limitations under the License.
15*795d594fSAndroid Build Coastguard Worker  */
16*795d594fSAndroid Build Coastguard Worker 
17*795d594fSAndroid Build Coastguard Worker #include <fcntl.h>
18*795d594fSAndroid Build Coastguard Worker // Glibc v2.19 doesn't include these in fcntl.h so host builds will fail without.
19*795d594fSAndroid Build Coastguard Worker #if !defined(FALLOC_FL_PUNCH_HOLE) || !defined(FALLOC_FL_KEEP_SIZE)
20*795d594fSAndroid Build Coastguard Worker #include <linux/falloc.h>
21*795d594fSAndroid Build Coastguard Worker #endif
22*795d594fSAndroid Build Coastguard Worker #include <linux/userfaultfd.h>
23*795d594fSAndroid Build Coastguard Worker #include <poll.h>
24*795d594fSAndroid Build Coastguard Worker #include <sys/ioctl.h>
25*795d594fSAndroid Build Coastguard Worker #include <sys/mman.h>
26*795d594fSAndroid Build Coastguard Worker #include <sys/resource.h>
27*795d594fSAndroid Build Coastguard Worker #include <sys/stat.h>
28*795d594fSAndroid Build Coastguard Worker #include <sys/syscall.h>
29*795d594fSAndroid Build Coastguard Worker #include <unistd.h>
30*795d594fSAndroid Build Coastguard Worker 
31*795d594fSAndroid Build Coastguard Worker #include <fstream>
32*795d594fSAndroid Build Coastguard Worker #include <numeric>
33*795d594fSAndroid Build Coastguard Worker #include <string>
34*795d594fSAndroid Build Coastguard Worker #include <string_view>
35*795d594fSAndroid Build Coastguard Worker #include <vector>
36*795d594fSAndroid Build Coastguard Worker 
37*795d594fSAndroid Build Coastguard Worker #include "android-base/file.h"
38*795d594fSAndroid Build Coastguard Worker #include "android-base/parsebool.h"
39*795d594fSAndroid Build Coastguard Worker #include "android-base/parseint.h"
40*795d594fSAndroid Build Coastguard Worker #include "android-base/properties.h"
41*795d594fSAndroid Build Coastguard Worker #include "android-base/strings.h"
42*795d594fSAndroid Build Coastguard Worker #include "base/file_utils.h"
43*795d594fSAndroid Build Coastguard Worker #include "base/memfd.h"
44*795d594fSAndroid Build Coastguard Worker #include "base/quasi_atomic.h"
45*795d594fSAndroid Build Coastguard Worker #include "base/systrace.h"
46*795d594fSAndroid Build Coastguard Worker #include "base/utils.h"
47*795d594fSAndroid Build Coastguard Worker #include "gc/accounting/mod_union_table-inl.h"
48*795d594fSAndroid Build Coastguard Worker #include "gc/collector_type.h"
49*795d594fSAndroid Build Coastguard Worker #include "gc/reference_processor.h"
50*795d594fSAndroid Build Coastguard Worker #include "gc/space/bump_pointer_space.h"
51*795d594fSAndroid Build Coastguard Worker #include "gc/task_processor.h"
52*795d594fSAndroid Build Coastguard Worker #include "gc/verification-inl.h"
53*795d594fSAndroid Build Coastguard Worker #include "jit/jit_code_cache.h"
54*795d594fSAndroid Build Coastguard Worker #include "mark_compact-inl.h"
55*795d594fSAndroid Build Coastguard Worker #include "mirror/object-refvisitor-inl.h"
56*795d594fSAndroid Build Coastguard Worker #include "read_barrier_config.h"
57*795d594fSAndroid Build Coastguard Worker #include "scoped_thread_state_change-inl.h"
58*795d594fSAndroid Build Coastguard Worker #include "sigchain.h"
59*795d594fSAndroid Build Coastguard Worker #include "thread_list.h"
60*795d594fSAndroid Build Coastguard Worker 
61*795d594fSAndroid Build Coastguard Worker #ifdef ART_TARGET_ANDROID
62*795d594fSAndroid Build Coastguard Worker #include "android-modules-utils/sdk_level.h"
63*795d594fSAndroid Build Coastguard Worker #include "com_android_art.h"
64*795d594fSAndroid Build Coastguard Worker #endif
65*795d594fSAndroid Build Coastguard Worker 
66*795d594fSAndroid Build Coastguard Worker #ifndef __BIONIC__
67*795d594fSAndroid Build Coastguard Worker #ifndef MREMAP_DONTUNMAP
68*795d594fSAndroid Build Coastguard Worker #define MREMAP_DONTUNMAP 4
69*795d594fSAndroid Build Coastguard Worker #endif
70*795d594fSAndroid Build Coastguard Worker #endif  // __BIONIC__
71*795d594fSAndroid Build Coastguard Worker 
72*795d594fSAndroid Build Coastguard Worker // See aosp/2996596 for where these values came from.
73*795d594fSAndroid Build Coastguard Worker #ifndef UFFDIO_COPY_MODE_MMAP_TRYLOCK
74*795d594fSAndroid Build Coastguard Worker #define UFFDIO_COPY_MODE_MMAP_TRYLOCK (static_cast<uint64_t>(1) << 63)
75*795d594fSAndroid Build Coastguard Worker #endif
76*795d594fSAndroid Build Coastguard Worker #ifndef UFFDIO_ZEROPAGE_MODE_MMAP_TRYLOCK
77*795d594fSAndroid Build Coastguard Worker #define UFFDIO_ZEROPAGE_MODE_MMAP_TRYLOCK (static_cast<uint64_t>(1) << 63)
78*795d594fSAndroid Build Coastguard Worker #endif
79*795d594fSAndroid Build Coastguard Worker #ifdef ART_TARGET_ANDROID
80*795d594fSAndroid Build Coastguard Worker namespace {
81*795d594fSAndroid Build Coastguard Worker 
82*795d594fSAndroid Build Coastguard Worker using ::android::base::GetBoolProperty;
83*795d594fSAndroid Build Coastguard Worker using ::android::base::ParseBool;
84*795d594fSAndroid Build Coastguard Worker using ::android::base::ParseBoolResult;
85*795d594fSAndroid Build Coastguard Worker using ::android::modules::sdklevel::IsAtLeastV;
86*795d594fSAndroid Build Coastguard Worker 
87*795d594fSAndroid Build Coastguard Worker }  // namespace
88*795d594fSAndroid Build Coastguard Worker #endif
89*795d594fSAndroid Build Coastguard Worker 
90*795d594fSAndroid Build Coastguard Worker namespace art HIDDEN {
91*795d594fSAndroid Build Coastguard Worker 
HaveMremapDontunmap()92*795d594fSAndroid Build Coastguard Worker static bool HaveMremapDontunmap() {
93*795d594fSAndroid Build Coastguard Worker   const size_t page_size = GetPageSizeSlow();
94*795d594fSAndroid Build Coastguard Worker   void* old = mmap(nullptr, page_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_SHARED, -1, 0);
95*795d594fSAndroid Build Coastguard Worker   CHECK_NE(old, MAP_FAILED);
96*795d594fSAndroid Build Coastguard Worker   void* addr = mremap(old, page_size, page_size, MREMAP_MAYMOVE | MREMAP_DONTUNMAP, nullptr);
97*795d594fSAndroid Build Coastguard Worker   CHECK_EQ(munmap(old, page_size), 0);
98*795d594fSAndroid Build Coastguard Worker   if (addr != MAP_FAILED) {
99*795d594fSAndroid Build Coastguard Worker     CHECK_EQ(munmap(addr, page_size), 0);
100*795d594fSAndroid Build Coastguard Worker     return true;
101*795d594fSAndroid Build Coastguard Worker   } else {
102*795d594fSAndroid Build Coastguard Worker     return false;
103*795d594fSAndroid Build Coastguard Worker   }
104*795d594fSAndroid Build Coastguard Worker }
105*795d594fSAndroid Build Coastguard Worker 
106*795d594fSAndroid Build Coastguard Worker static bool gUffdSupportsMmapTrylock = false;
107*795d594fSAndroid Build Coastguard Worker // We require MREMAP_DONTUNMAP functionality of the mremap syscall, which was
108*795d594fSAndroid Build Coastguard Worker // introduced in 5.13 kernel version. But it was backported to GKI kernels.
109*795d594fSAndroid Build Coastguard Worker static bool gHaveMremapDontunmap = IsKernelVersionAtLeast(5, 13) || HaveMremapDontunmap();
110*795d594fSAndroid Build Coastguard Worker // Bitmap of features supported by userfaultfd. This is obtained via uffd API ioctl.
111*795d594fSAndroid Build Coastguard Worker static uint64_t gUffdFeatures = 0;
112*795d594fSAndroid Build Coastguard Worker // Both, missing and minor faults on shmem are needed only for minor-fault mode.
113*795d594fSAndroid Build Coastguard Worker static constexpr uint64_t kUffdFeaturesForMinorFault =
114*795d594fSAndroid Build Coastguard Worker     UFFD_FEATURE_MISSING_SHMEM | UFFD_FEATURE_MINOR_SHMEM;
115*795d594fSAndroid Build Coastguard Worker static constexpr uint64_t kUffdFeaturesForSigbus = UFFD_FEATURE_SIGBUS;
116*795d594fSAndroid Build Coastguard Worker // A region which is more than kBlackDenseRegionThreshold percent live doesn't
117*795d594fSAndroid Build Coastguard Worker // need to be compacted as it is too densely packed.
118*795d594fSAndroid Build Coastguard Worker static constexpr uint kBlackDenseRegionThreshold = 95U;
119*795d594fSAndroid Build Coastguard Worker // We consider SIGBUS feature necessary to enable this GC as it's superior than
120*795d594fSAndroid Build Coastguard Worker // threading-based implementation for janks. We may want minor-fault in future
121*795d594fSAndroid Build Coastguard Worker // to be available for making jit-code-cache updation concurrent, which uses shmem.
KernelSupportsUffd()122*795d594fSAndroid Build Coastguard Worker bool KernelSupportsUffd() {
123*795d594fSAndroid Build Coastguard Worker #ifdef __linux__
124*795d594fSAndroid Build Coastguard Worker   if (gHaveMremapDontunmap) {
125*795d594fSAndroid Build Coastguard Worker     int fd = syscall(__NR_userfaultfd, O_CLOEXEC | UFFD_USER_MODE_ONLY);
126*795d594fSAndroid Build Coastguard Worker     // On non-android devices we may not have the kernel patches that restrict
127*795d594fSAndroid Build Coastguard Worker     // userfaultfd to user mode. But that is not a security concern as we are
128*795d594fSAndroid Build Coastguard Worker     // on host. Therefore, attempt one more time without UFFD_USER_MODE_ONLY.
129*795d594fSAndroid Build Coastguard Worker     if (!kIsTargetAndroid && fd == -1 && errno == EINVAL) {
130*795d594fSAndroid Build Coastguard Worker       fd = syscall(__NR_userfaultfd, O_CLOEXEC);
131*795d594fSAndroid Build Coastguard Worker     }
132*795d594fSAndroid Build Coastguard Worker     if (fd >= 0) {
133*795d594fSAndroid Build Coastguard Worker       // We are only fetching the available features, which is returned by the
134*795d594fSAndroid Build Coastguard Worker       // ioctl.
135*795d594fSAndroid Build Coastguard Worker       struct uffdio_api api = {.api = UFFD_API, .features = 0, .ioctls = 0};
136*795d594fSAndroid Build Coastguard Worker       CHECK_EQ(ioctl(fd, UFFDIO_API, &api), 0) << "ioctl_userfaultfd : API:" << strerror(errno);
137*795d594fSAndroid Build Coastguard Worker       gUffdFeatures = api.features;
138*795d594fSAndroid Build Coastguard Worker       // MMAP_TRYLOCK is available only in 5.10 and 5.15 GKI kernels. The higher
139*795d594fSAndroid Build Coastguard Worker       // versions will have per-vma locks. The lower ones don't support
140*795d594fSAndroid Build Coastguard Worker       // userfaultfd.
141*795d594fSAndroid Build Coastguard Worker       if (kIsTargetAndroid && !IsKernelVersionAtLeast(5, 16)) {
142*795d594fSAndroid Build Coastguard Worker         // Check if MMAP_TRYLOCK feature is supported
143*795d594fSAndroid Build Coastguard Worker         const size_t page_size = GetPageSizeSlow();
144*795d594fSAndroid Build Coastguard Worker         void* mem =
145*795d594fSAndroid Build Coastguard Worker             mmap(nullptr, page_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
146*795d594fSAndroid Build Coastguard Worker         CHECK_NE(mem, MAP_FAILED) << " errno: " << errno;
147*795d594fSAndroid Build Coastguard Worker 
148*795d594fSAndroid Build Coastguard Worker         struct uffdio_zeropage uffd_zeropage;
149*795d594fSAndroid Build Coastguard Worker         uffd_zeropage.mode = UFFDIO_ZEROPAGE_MODE_MMAP_TRYLOCK;
150*795d594fSAndroid Build Coastguard Worker         uffd_zeropage.range.start = reinterpret_cast<uintptr_t>(mem);
151*795d594fSAndroid Build Coastguard Worker         uffd_zeropage.range.len = page_size;
152*795d594fSAndroid Build Coastguard Worker         uffd_zeropage.zeropage = 0;
153*795d594fSAndroid Build Coastguard Worker         // The ioctl will definitely fail as mem is not registered with uffd.
154*795d594fSAndroid Build Coastguard Worker         CHECK_EQ(ioctl(fd, UFFDIO_ZEROPAGE, &uffd_zeropage), -1);
155*795d594fSAndroid Build Coastguard Worker         // uffd ioctls return EINVAL for several reasons. We make sure with
156*795d594fSAndroid Build Coastguard Worker         // (proper alignment of 'mem' and 'len') that, before updating
157*795d594fSAndroid Build Coastguard Worker         // uffd_zeropage.zeropage (with error), it fails with EINVAL only if
158*795d594fSAndroid Build Coastguard Worker         // `trylock` isn't available.
159*795d594fSAndroid Build Coastguard Worker         if (uffd_zeropage.zeropage == 0 && errno == EINVAL) {
160*795d594fSAndroid Build Coastguard Worker           LOG(INFO) << "MMAP_TRYLOCK is not supported in uffd addr:" << mem
161*795d594fSAndroid Build Coastguard Worker                     << " page-size:" << page_size;
162*795d594fSAndroid Build Coastguard Worker         } else {
163*795d594fSAndroid Build Coastguard Worker           gUffdSupportsMmapTrylock = true;
164*795d594fSAndroid Build Coastguard Worker           LOG(INFO) << "MMAP_TRYLOCK is supported in uffd errno:" << errno << " addr:" << mem
165*795d594fSAndroid Build Coastguard Worker                     << " size:" << page_size;
166*795d594fSAndroid Build Coastguard Worker         }
167*795d594fSAndroid Build Coastguard Worker         munmap(mem, page_size);
168*795d594fSAndroid Build Coastguard Worker       }
169*795d594fSAndroid Build Coastguard Worker       close(fd);
170*795d594fSAndroid Build Coastguard Worker       // Minimum we need is sigbus feature for using userfaultfd.
171*795d594fSAndroid Build Coastguard Worker       return (api.features & kUffdFeaturesForSigbus) == kUffdFeaturesForSigbus;
172*795d594fSAndroid Build Coastguard Worker     }
173*795d594fSAndroid Build Coastguard Worker   }
174*795d594fSAndroid Build Coastguard Worker #endif
175*795d594fSAndroid Build Coastguard Worker   return false;
176*795d594fSAndroid Build Coastguard Worker }
177*795d594fSAndroid Build Coastguard Worker 
178*795d594fSAndroid Build Coastguard Worker // The other cases are defined as constexpr in runtime/read_barrier_config.h
179*795d594fSAndroid Build Coastguard Worker #if !defined(ART_FORCE_USE_READ_BARRIER) && defined(ART_USE_READ_BARRIER)
180*795d594fSAndroid Build Coastguard Worker // Returns collector type asked to be used on the cmdline.
FetchCmdlineGcType()181*795d594fSAndroid Build Coastguard Worker static gc::CollectorType FetchCmdlineGcType() {
182*795d594fSAndroid Build Coastguard Worker   std::string argv;
183*795d594fSAndroid Build Coastguard Worker   gc::CollectorType gc_type = gc::CollectorType::kCollectorTypeNone;
184*795d594fSAndroid Build Coastguard Worker   if (android::base::ReadFileToString("/proc/self/cmdline", &argv)) {
185*795d594fSAndroid Build Coastguard Worker     auto pos = argv.rfind("-Xgc:");
186*795d594fSAndroid Build Coastguard Worker     if (argv.substr(pos + 5, 3) == "CMC") {
187*795d594fSAndroid Build Coastguard Worker       gc_type = gc::CollectorType::kCollectorTypeCMC;
188*795d594fSAndroid Build Coastguard Worker     } else if (argv.substr(pos + 5, 2) == "CC") {
189*795d594fSAndroid Build Coastguard Worker       gc_type = gc::CollectorType::kCollectorTypeCC;
190*795d594fSAndroid Build Coastguard Worker     }
191*795d594fSAndroid Build Coastguard Worker   }
192*795d594fSAndroid Build Coastguard Worker   return gc_type;
193*795d594fSAndroid Build Coastguard Worker }
194*795d594fSAndroid Build Coastguard Worker 
195*795d594fSAndroid Build Coastguard Worker #ifdef ART_TARGET_ANDROID
GetOverrideCacheInfoFd()196*795d594fSAndroid Build Coastguard Worker static int GetOverrideCacheInfoFd() {
197*795d594fSAndroid Build Coastguard Worker   std::string args_str;
198*795d594fSAndroid Build Coastguard Worker   if (!android::base::ReadFileToString("/proc/self/cmdline", &args_str)) {
199*795d594fSAndroid Build Coastguard Worker     LOG(WARNING) << "Failed to load /proc/self/cmdline";
200*795d594fSAndroid Build Coastguard Worker     return -1;
201*795d594fSAndroid Build Coastguard Worker   }
202*795d594fSAndroid Build Coastguard Worker   std::vector<std::string_view> args;
203*795d594fSAndroid Build Coastguard Worker   Split(std::string_view(args_str), /*separator=*/'\0', &args);
204*795d594fSAndroid Build Coastguard Worker   for (std::string_view arg : args) {
205*795d594fSAndroid Build Coastguard Worker     if (android::base::ConsumePrefix(&arg, "--cache-info-fd=")) {  // This is a dex2oat flag.
206*795d594fSAndroid Build Coastguard Worker       int fd;
207*795d594fSAndroid Build Coastguard Worker       if (!android::base::ParseInt(std::string(arg), &fd)) {
208*795d594fSAndroid Build Coastguard Worker         LOG(ERROR) << "Failed to parse --cache-info-fd (value: '" << arg << "')";
209*795d594fSAndroid Build Coastguard Worker         return -1;
210*795d594fSAndroid Build Coastguard Worker       }
211*795d594fSAndroid Build Coastguard Worker       return fd;
212*795d594fSAndroid Build Coastguard Worker     }
213*795d594fSAndroid Build Coastguard Worker   }
214*795d594fSAndroid Build Coastguard Worker   return -1;
215*795d594fSAndroid Build Coastguard Worker }
216*795d594fSAndroid Build Coastguard Worker 
GetCachedProperties()217*795d594fSAndroid Build Coastguard Worker static std::unordered_map<std::string, std::string> GetCachedProperties() {
218*795d594fSAndroid Build Coastguard Worker   // For simplicity, we don't handle multiple calls because otherwise we would have to reset the fd.
219*795d594fSAndroid Build Coastguard Worker   static bool called = false;
220*795d594fSAndroid Build Coastguard Worker   CHECK(!called) << "GetCachedBoolProperty can be called only once";
221*795d594fSAndroid Build Coastguard Worker   called = true;
222*795d594fSAndroid Build Coastguard Worker 
223*795d594fSAndroid Build Coastguard Worker   std::string cache_info_contents;
224*795d594fSAndroid Build Coastguard Worker   int fd = GetOverrideCacheInfoFd();
225*795d594fSAndroid Build Coastguard Worker   if (fd >= 0) {
226*795d594fSAndroid Build Coastguard Worker     if (!android::base::ReadFdToString(fd, &cache_info_contents)) {
227*795d594fSAndroid Build Coastguard Worker       PLOG(ERROR) << "Failed to read cache-info from fd " << fd;
228*795d594fSAndroid Build Coastguard Worker       return {};
229*795d594fSAndroid Build Coastguard Worker     }
230*795d594fSAndroid Build Coastguard Worker   } else {
231*795d594fSAndroid Build Coastguard Worker     std::string path = GetApexDataDalvikCacheDirectory(InstructionSet::kNone) + "/cache-info.xml";
232*795d594fSAndroid Build Coastguard Worker     if (!android::base::ReadFileToString(path, &cache_info_contents)) {
233*795d594fSAndroid Build Coastguard Worker       // If the file is not found, then we are in chroot or in a standalone runtime process (e.g.,
234*795d594fSAndroid Build Coastguard Worker       // IncidentHelper), or odsign/odrefresh failed to generate and sign the cache info. There's
235*795d594fSAndroid Build Coastguard Worker       // nothing we can do.
236*795d594fSAndroid Build Coastguard Worker       if (errno != ENOENT) {
237*795d594fSAndroid Build Coastguard Worker         PLOG(ERROR) << "Failed to read cache-info from the default path";
238*795d594fSAndroid Build Coastguard Worker       }
239*795d594fSAndroid Build Coastguard Worker       return {};
240*795d594fSAndroid Build Coastguard Worker     }
241*795d594fSAndroid Build Coastguard Worker   }
242*795d594fSAndroid Build Coastguard Worker 
243*795d594fSAndroid Build Coastguard Worker   std::optional<com::android::art::CacheInfo> cache_info =
244*795d594fSAndroid Build Coastguard Worker       com::android::art::parse(cache_info_contents.c_str());
245*795d594fSAndroid Build Coastguard Worker   if (!cache_info.has_value()) {
246*795d594fSAndroid Build Coastguard Worker     // This should never happen.
247*795d594fSAndroid Build Coastguard Worker     LOG(ERROR) << "Failed to parse cache-info";
248*795d594fSAndroid Build Coastguard Worker     return {};
249*795d594fSAndroid Build Coastguard Worker   }
250*795d594fSAndroid Build Coastguard Worker   const com::android::art::KeyValuePairList* list = cache_info->getFirstSystemProperties();
251*795d594fSAndroid Build Coastguard Worker   if (list == nullptr) {
252*795d594fSAndroid Build Coastguard Worker     // This should never happen.
253*795d594fSAndroid Build Coastguard Worker     LOG(ERROR) << "Missing system properties from cache-info";
254*795d594fSAndroid Build Coastguard Worker     return {};
255*795d594fSAndroid Build Coastguard Worker   }
256*795d594fSAndroid Build Coastguard Worker   const std::vector<com::android::art::KeyValuePair>& properties = list->getItem();
257*795d594fSAndroid Build Coastguard Worker   std::unordered_map<std::string, std::string> result;
258*795d594fSAndroid Build Coastguard Worker   for (const com::android::art::KeyValuePair& pair : properties) {
259*795d594fSAndroid Build Coastguard Worker     result[pair.getK()] = pair.getV();
260*795d594fSAndroid Build Coastguard Worker   }
261*795d594fSAndroid Build Coastguard Worker   return result;
262*795d594fSAndroid Build Coastguard Worker }
263*795d594fSAndroid Build Coastguard Worker 
GetCachedBoolProperty(const std::unordered_map<std::string,std::string> & cached_properties,const std::string & key,bool default_value)264*795d594fSAndroid Build Coastguard Worker static bool GetCachedBoolProperty(
265*795d594fSAndroid Build Coastguard Worker     const std::unordered_map<std::string, std::string>& cached_properties,
266*795d594fSAndroid Build Coastguard Worker     const std::string& key,
267*795d594fSAndroid Build Coastguard Worker     bool default_value) {
268*795d594fSAndroid Build Coastguard Worker   auto it = cached_properties.find(key);
269*795d594fSAndroid Build Coastguard Worker   if (it == cached_properties.end()) {
270*795d594fSAndroid Build Coastguard Worker     return default_value;
271*795d594fSAndroid Build Coastguard Worker   }
272*795d594fSAndroid Build Coastguard Worker   ParseBoolResult result = ParseBool(it->second);
273*795d594fSAndroid Build Coastguard Worker   switch (result) {
274*795d594fSAndroid Build Coastguard Worker     case ParseBoolResult::kTrue:
275*795d594fSAndroid Build Coastguard Worker       return true;
276*795d594fSAndroid Build Coastguard Worker     case ParseBoolResult::kFalse:
277*795d594fSAndroid Build Coastguard Worker       return false;
278*795d594fSAndroid Build Coastguard Worker     case ParseBoolResult::kError:
279*795d594fSAndroid Build Coastguard Worker       return default_value;
280*795d594fSAndroid Build Coastguard Worker   }
281*795d594fSAndroid Build Coastguard Worker }
282*795d594fSAndroid Build Coastguard Worker 
SysPropSaysUffdGc()283*795d594fSAndroid Build Coastguard Worker static bool SysPropSaysUffdGc() {
284*795d594fSAndroid Build Coastguard Worker   // The phenotype flag can change at time time after boot, but it shouldn't take effect until a
285*795d594fSAndroid Build Coastguard Worker   // reboot. Therefore, we read the phenotype flag from the cache info, which is generated on boot.
286*795d594fSAndroid Build Coastguard Worker   std::unordered_map<std::string, std::string> cached_properties = GetCachedProperties();
287*795d594fSAndroid Build Coastguard Worker   bool phenotype_enable = GetCachedBoolProperty(
288*795d594fSAndroid Build Coastguard Worker       cached_properties, "persist.device_config.runtime_native_boot.enable_uffd_gc_2", false);
289*795d594fSAndroid Build Coastguard Worker   bool phenotype_force_disable = GetCachedBoolProperty(
290*795d594fSAndroid Build Coastguard Worker       cached_properties, "persist.device_config.runtime_native_boot.force_disable_uffd_gc", false);
291*795d594fSAndroid Build Coastguard Worker   bool build_enable = GetBoolProperty("ro.dalvik.vm.enable_uffd_gc", false);
292*795d594fSAndroid Build Coastguard Worker   bool is_at_most_u = !IsAtLeastV();
293*795d594fSAndroid Build Coastguard Worker   return (phenotype_enable || build_enable || is_at_most_u) && !phenotype_force_disable;
294*795d594fSAndroid Build Coastguard Worker }
295*795d594fSAndroid Build Coastguard Worker #else
296*795d594fSAndroid Build Coastguard Worker // Never called.
SysPropSaysUffdGc()297*795d594fSAndroid Build Coastguard Worker static bool SysPropSaysUffdGc() { return false; }
298*795d594fSAndroid Build Coastguard Worker #endif
299*795d594fSAndroid Build Coastguard Worker 
ShouldUseUserfaultfd()300*795d594fSAndroid Build Coastguard Worker static bool ShouldUseUserfaultfd() {
301*795d594fSAndroid Build Coastguard Worker   static_assert(kUseBakerReadBarrier || kUseTableLookupReadBarrier);
302*795d594fSAndroid Build Coastguard Worker #ifdef __linux__
303*795d594fSAndroid Build Coastguard Worker   // Use CMC/CC if that is being explicitly asked for on cmdline. Otherwise,
304*795d594fSAndroid Build Coastguard Worker   // always use CC on host. On target, use CMC only if system properties says so
305*795d594fSAndroid Build Coastguard Worker   // and the kernel supports it.
306*795d594fSAndroid Build Coastguard Worker   gc::CollectorType gc_type = FetchCmdlineGcType();
307*795d594fSAndroid Build Coastguard Worker   return gc_type == gc::CollectorType::kCollectorTypeCMC ||
308*795d594fSAndroid Build Coastguard Worker          (gc_type == gc::CollectorType::kCollectorTypeNone &&
309*795d594fSAndroid Build Coastguard Worker           kIsTargetAndroid &&
310*795d594fSAndroid Build Coastguard Worker           SysPropSaysUffdGc() &&
311*795d594fSAndroid Build Coastguard Worker           KernelSupportsUffd());
312*795d594fSAndroid Build Coastguard Worker #else
313*795d594fSAndroid Build Coastguard Worker   return false;
314*795d594fSAndroid Build Coastguard Worker #endif
315*795d594fSAndroid Build Coastguard Worker }
316*795d594fSAndroid Build Coastguard Worker 
317*795d594fSAndroid Build Coastguard Worker const bool gUseUserfaultfd = ShouldUseUserfaultfd();
318*795d594fSAndroid Build Coastguard Worker const bool gUseReadBarrier = !gUseUserfaultfd;
319*795d594fSAndroid Build Coastguard Worker #endif
320*795d594fSAndroid Build Coastguard Worker 
321*795d594fSAndroid Build Coastguard Worker namespace gc {
322*795d594fSAndroid Build Coastguard Worker namespace collector {
323*795d594fSAndroid Build Coastguard Worker 
324*795d594fSAndroid Build Coastguard Worker // Turn off kCheckLocks when profiling the GC as it slows down the GC
325*795d594fSAndroid Build Coastguard Worker // significantly.
326*795d594fSAndroid Build Coastguard Worker static constexpr bool kCheckLocks = kDebugLocking;
327*795d594fSAndroid Build Coastguard Worker static constexpr bool kVerifyRootsMarked = kIsDebugBuild;
328*795d594fSAndroid Build Coastguard Worker // Number of compaction buffers reserved for mutator threads in SIGBUS feature
329*795d594fSAndroid Build Coastguard Worker // case. It's extremely unlikely that we will ever have more than these number
330*795d594fSAndroid Build Coastguard Worker // of mutator threads trying to access the moving-space during one compaction
331*795d594fSAndroid Build Coastguard Worker // phase.
332*795d594fSAndroid Build Coastguard Worker static constexpr size_t kMutatorCompactionBufferCount = 2048;
333*795d594fSAndroid Build Coastguard Worker // Minimum from-space chunk to be madvised (during concurrent compaction) in one go.
334*795d594fSAndroid Build Coastguard Worker // Choose a reasonable size to avoid making too many batched ioctl and madvise calls.
335*795d594fSAndroid Build Coastguard Worker static constexpr ssize_t kMinFromSpaceMadviseSize = 8 * MB;
336*795d594fSAndroid Build Coastguard Worker // Concurrent compaction termination logic is different (and slightly more efficient) if the
337*795d594fSAndroid Build Coastguard Worker // kernel has the fault-retry feature (allowing repeated faults on the same page), which was
338*795d594fSAndroid Build Coastguard Worker // introduced in 5.7 (https://android-review.git.corp.google.com/c/kernel/common/+/1540088).
339*795d594fSAndroid Build Coastguard Worker // This allows a single page fault to be handled, in turn, by each worker thread, only waking
340*795d594fSAndroid Build Coastguard Worker // up the GC thread at the end.
341*795d594fSAndroid Build Coastguard Worker static const bool gKernelHasFaultRetry = IsKernelVersionAtLeast(5, 7);
342*795d594fSAndroid Build Coastguard Worker 
GetUffdAndMinorFault()343*795d594fSAndroid Build Coastguard Worker std::pair<bool, bool> MarkCompact::GetUffdAndMinorFault() {
344*795d594fSAndroid Build Coastguard Worker   bool uffd_available;
345*795d594fSAndroid Build Coastguard Worker   // In most cases the gUffdFeatures will already be initialized at boot time
346*795d594fSAndroid Build Coastguard Worker   // when libart is loaded. On very old kernels we may get '0' from the kernel,
347*795d594fSAndroid Build Coastguard Worker   // in which case we would be doing the syscalls each time this function is
348*795d594fSAndroid Build Coastguard Worker   // called. But that's very unlikely case. There are no correctness issues as
349*795d594fSAndroid Build Coastguard Worker   // the response from kernel never changes after boot.
350*795d594fSAndroid Build Coastguard Worker   if (UNLIKELY(gUffdFeatures == 0)) {
351*795d594fSAndroid Build Coastguard Worker     uffd_available = KernelSupportsUffd();
352*795d594fSAndroid Build Coastguard Worker   } else {
353*795d594fSAndroid Build Coastguard Worker     // We can have any uffd features only if uffd exists.
354*795d594fSAndroid Build Coastguard Worker     uffd_available = true;
355*795d594fSAndroid Build Coastguard Worker   }
356*795d594fSAndroid Build Coastguard Worker   bool minor_fault_available =
357*795d594fSAndroid Build Coastguard Worker       (gUffdFeatures & kUffdFeaturesForMinorFault) == kUffdFeaturesForMinorFault;
358*795d594fSAndroid Build Coastguard Worker   return std::pair<bool, bool>(uffd_available, minor_fault_available);
359*795d594fSAndroid Build Coastguard Worker }
360*795d594fSAndroid Build Coastguard Worker 
CreateUserfaultfd(bool post_fork)361*795d594fSAndroid Build Coastguard Worker bool MarkCompact::CreateUserfaultfd(bool post_fork) {
362*795d594fSAndroid Build Coastguard Worker   if (post_fork || uffd_ == kFdUnused) {
363*795d594fSAndroid Build Coastguard Worker     // Check if we have MREMAP_DONTUNMAP here for cases where
364*795d594fSAndroid Build Coastguard Worker     // 'ART_USE_READ_BARRIER=false' is used. Additionally, this check ensures
365*795d594fSAndroid Build Coastguard Worker     // that userfaultfd isn't used on old kernels, which cause random ioctl
366*795d594fSAndroid Build Coastguard Worker     // failures.
367*795d594fSAndroid Build Coastguard Worker     if (gHaveMremapDontunmap) {
368*795d594fSAndroid Build Coastguard Worker       // Don't use O_NONBLOCK as we rely on read waiting on uffd_ if there isn't
369*795d594fSAndroid Build Coastguard Worker       // any read event available. We don't use poll.
370*795d594fSAndroid Build Coastguard Worker       uffd_ = syscall(__NR_userfaultfd, O_CLOEXEC | UFFD_USER_MODE_ONLY);
371*795d594fSAndroid Build Coastguard Worker       // On non-android devices we may not have the kernel patches that restrict
372*795d594fSAndroid Build Coastguard Worker       // userfaultfd to user mode. But that is not a security concern as we are
373*795d594fSAndroid Build Coastguard Worker       // on host. Therefore, attempt one more time without UFFD_USER_MODE_ONLY.
374*795d594fSAndroid Build Coastguard Worker       if (!kIsTargetAndroid && UNLIKELY(uffd_ == -1 && errno == EINVAL)) {
375*795d594fSAndroid Build Coastguard Worker         uffd_ = syscall(__NR_userfaultfd, O_CLOEXEC);
376*795d594fSAndroid Build Coastguard Worker       }
377*795d594fSAndroid Build Coastguard Worker       if (UNLIKELY(uffd_ == -1)) {
378*795d594fSAndroid Build Coastguard Worker         uffd_ = kFallbackMode;
379*795d594fSAndroid Build Coastguard Worker         LOG(WARNING) << "Userfaultfd isn't supported (reason: " << strerror(errno)
380*795d594fSAndroid Build Coastguard Worker                      << ") and therefore falling back to stop-the-world compaction.";
381*795d594fSAndroid Build Coastguard Worker       } else {
382*795d594fSAndroid Build Coastguard Worker         DCHECK(IsValidFd(uffd_));
383*795d594fSAndroid Build Coastguard Worker         // Initialize uffd with the features which are required and available.
384*795d594fSAndroid Build Coastguard Worker         // Using private anonymous mapping in threading mode is the default,
385*795d594fSAndroid Build Coastguard Worker         // for which we don't need to ask for any features. Note: this mode
386*795d594fSAndroid Build Coastguard Worker         // is not used in production.
387*795d594fSAndroid Build Coastguard Worker         struct uffdio_api api = {.api = UFFD_API, .features = 0, .ioctls = 0};
388*795d594fSAndroid Build Coastguard Worker         // We should add SIGBUS feature only if we plan on using it as
389*795d594fSAndroid Build Coastguard Worker         // requesting it here will mean threading mode will not work.
390*795d594fSAndroid Build Coastguard Worker         CHECK_EQ(gUffdFeatures & kUffdFeaturesForSigbus, kUffdFeaturesForSigbus);
391*795d594fSAndroid Build Coastguard Worker         api.features |= kUffdFeaturesForSigbus;
392*795d594fSAndroid Build Coastguard Worker         CHECK_EQ(ioctl(uffd_, UFFDIO_API, &api), 0)
393*795d594fSAndroid Build Coastguard Worker             << "ioctl_userfaultfd: API: " << strerror(errno);
394*795d594fSAndroid Build Coastguard Worker       }
395*795d594fSAndroid Build Coastguard Worker     } else {
396*795d594fSAndroid Build Coastguard Worker       uffd_ = kFallbackMode;
397*795d594fSAndroid Build Coastguard Worker     }
398*795d594fSAndroid Build Coastguard Worker   }
399*795d594fSAndroid Build Coastguard Worker   uffd_initialized_ = !post_fork || uffd_ == kFallbackMode;
400*795d594fSAndroid Build Coastguard Worker   return IsValidFd(uffd_);
401*795d594fSAndroid Build Coastguard Worker }
402*795d594fSAndroid Build Coastguard Worker 
403*795d594fSAndroid Build Coastguard Worker template <size_t kAlignment>
Create(uintptr_t begin,uintptr_t end)404*795d594fSAndroid Build Coastguard Worker MarkCompact::LiveWordsBitmap<kAlignment>* MarkCompact::LiveWordsBitmap<kAlignment>::Create(
405*795d594fSAndroid Build Coastguard Worker     uintptr_t begin, uintptr_t end) {
406*795d594fSAndroid Build Coastguard Worker   return static_cast<LiveWordsBitmap<kAlignment>*>(
407*795d594fSAndroid Build Coastguard Worker           MemRangeBitmap::Create("Concurrent Mark Compact live words bitmap", begin, end));
408*795d594fSAndroid Build Coastguard Worker }
409*795d594fSAndroid Build Coastguard Worker 
ComputeInfoMapSize()410*795d594fSAndroid Build Coastguard Worker size_t MarkCompact::ComputeInfoMapSize() {
411*795d594fSAndroid Build Coastguard Worker   size_t moving_space_size = bump_pointer_space_->Capacity();
412*795d594fSAndroid Build Coastguard Worker   size_t chunk_info_vec_size = moving_space_size / kOffsetChunkSize;
413*795d594fSAndroid Build Coastguard Worker   size_t nr_moving_pages = DivideByPageSize(moving_space_size);
414*795d594fSAndroid Build Coastguard Worker   size_t nr_non_moving_pages = DivideByPageSize(heap_->GetNonMovingSpace()->Capacity());
415*795d594fSAndroid Build Coastguard Worker   return chunk_info_vec_size * sizeof(uint32_t) + nr_non_moving_pages * sizeof(ObjReference) +
416*795d594fSAndroid Build Coastguard Worker          nr_moving_pages * (sizeof(ObjReference) + sizeof(uint32_t) + sizeof(Atomic<uint32_t>));
417*795d594fSAndroid Build Coastguard Worker }
418*795d594fSAndroid Build Coastguard Worker 
InitializeInfoMap(uint8_t * p,size_t moving_space_sz)419*795d594fSAndroid Build Coastguard Worker size_t MarkCompact::InitializeInfoMap(uint8_t* p, size_t moving_space_sz) {
420*795d594fSAndroid Build Coastguard Worker   size_t nr_moving_pages = DivideByPageSize(moving_space_sz);
421*795d594fSAndroid Build Coastguard Worker 
422*795d594fSAndroid Build Coastguard Worker   chunk_info_vec_ = reinterpret_cast<uint32_t*>(p);
423*795d594fSAndroid Build Coastguard Worker   vector_length_ = moving_space_sz / kOffsetChunkSize;
424*795d594fSAndroid Build Coastguard Worker   size_t total = vector_length_ * sizeof(uint32_t);
425*795d594fSAndroid Build Coastguard Worker 
426*795d594fSAndroid Build Coastguard Worker   first_objs_moving_space_ = reinterpret_cast<ObjReference*>(p + total);
427*795d594fSAndroid Build Coastguard Worker   total += nr_moving_pages * sizeof(ObjReference);
428*795d594fSAndroid Build Coastguard Worker 
429*795d594fSAndroid Build Coastguard Worker   pre_compact_offset_moving_space_ = reinterpret_cast<uint32_t*>(p + total);
430*795d594fSAndroid Build Coastguard Worker   total += nr_moving_pages * sizeof(uint32_t);
431*795d594fSAndroid Build Coastguard Worker 
432*795d594fSAndroid Build Coastguard Worker   moving_pages_status_ = reinterpret_cast<Atomic<uint32_t>*>(p + total);
433*795d594fSAndroid Build Coastguard Worker   total += nr_moving_pages * sizeof(Atomic<uint32_t>);
434*795d594fSAndroid Build Coastguard Worker 
435*795d594fSAndroid Build Coastguard Worker   first_objs_non_moving_space_ = reinterpret_cast<ObjReference*>(p + total);
436*795d594fSAndroid Build Coastguard Worker   total += DivideByPageSize(heap_->GetNonMovingSpace()->Capacity()) * sizeof(ObjReference);
437*795d594fSAndroid Build Coastguard Worker   DCHECK_EQ(total, ComputeInfoMapSize());
438*795d594fSAndroid Build Coastguard Worker   return total;
439*795d594fSAndroid Build Coastguard Worker }
440*795d594fSAndroid Build Coastguard Worker 
MarkCompact(Heap * heap)441*795d594fSAndroid Build Coastguard Worker MarkCompact::MarkCompact(Heap* heap)
442*795d594fSAndroid Build Coastguard Worker     : GarbageCollector(heap, "concurrent mark compact"),
443*795d594fSAndroid Build Coastguard Worker       gc_barrier_(0),
444*795d594fSAndroid Build Coastguard Worker       lock_("mark compact lock", kGenericBottomLock),
445*795d594fSAndroid Build Coastguard Worker       bump_pointer_space_(heap->GetBumpPointerSpace()),
446*795d594fSAndroid Build Coastguard Worker       moving_space_bitmap_(bump_pointer_space_->GetMarkBitmap()),
447*795d594fSAndroid Build Coastguard Worker       moving_space_begin_(bump_pointer_space_->Begin()),
448*795d594fSAndroid Build Coastguard Worker       moving_space_end_(bump_pointer_space_->Limit()),
449*795d594fSAndroid Build Coastguard Worker       black_dense_end_(moving_space_begin_),
450*795d594fSAndroid Build Coastguard Worker       uffd_(kFdUnused),
451*795d594fSAndroid Build Coastguard Worker       sigbus_in_progress_count_{kSigbusCounterCompactionDoneMask, kSigbusCounterCompactionDoneMask},
452*795d594fSAndroid Build Coastguard Worker       compacting_(false),
453*795d594fSAndroid Build Coastguard Worker       marking_done_(false),
454*795d594fSAndroid Build Coastguard Worker       uffd_initialized_(false),
455*795d594fSAndroid Build Coastguard Worker       clamp_info_map_status_(ClampInfoStatus::kClampInfoNotDone) {
456*795d594fSAndroid Build Coastguard Worker   if (kIsDebugBuild) {
457*795d594fSAndroid Build Coastguard Worker     updated_roots_.reset(new std::unordered_set<void*>());
458*795d594fSAndroid Build Coastguard Worker   }
459*795d594fSAndroid Build Coastguard Worker   if (gUffdFeatures == 0) {
460*795d594fSAndroid Build Coastguard Worker     GetUffdAndMinorFault();
461*795d594fSAndroid Build Coastguard Worker   }
462*795d594fSAndroid Build Coastguard Worker   uint8_t* moving_space_begin = bump_pointer_space_->Begin();
463*795d594fSAndroid Build Coastguard Worker   // TODO: Depending on how the bump-pointer space move is implemented. If we
464*795d594fSAndroid Build Coastguard Worker   // switch between two virtual memories each time, then we will have to
465*795d594fSAndroid Build Coastguard Worker   // initialize live_words_bitmap_ accordingly.
466*795d594fSAndroid Build Coastguard Worker   live_words_bitmap_.reset(LiveWordsBitmap<kAlignment>::Create(
467*795d594fSAndroid Build Coastguard Worker       reinterpret_cast<uintptr_t>(moving_space_begin),
468*795d594fSAndroid Build Coastguard Worker       reinterpret_cast<uintptr_t>(bump_pointer_space_->Limit())));
469*795d594fSAndroid Build Coastguard Worker 
470*795d594fSAndroid Build Coastguard Worker   std::string err_msg;
471*795d594fSAndroid Build Coastguard Worker   size_t moving_space_size = bump_pointer_space_->Capacity();
472*795d594fSAndroid Build Coastguard Worker   {
473*795d594fSAndroid Build Coastguard Worker     // Create one MemMap for all the data structures
474*795d594fSAndroid Build Coastguard Worker     info_map_ = MemMap::MapAnonymous("Concurrent mark-compact chunk-info vector",
475*795d594fSAndroid Build Coastguard Worker                                      ComputeInfoMapSize(),
476*795d594fSAndroid Build Coastguard Worker                                      PROT_READ | PROT_WRITE,
477*795d594fSAndroid Build Coastguard Worker                                      /*low_4gb=*/false,
478*795d594fSAndroid Build Coastguard Worker                                      &err_msg);
479*795d594fSAndroid Build Coastguard Worker     if (UNLIKELY(!info_map_.IsValid())) {
480*795d594fSAndroid Build Coastguard Worker       LOG(FATAL) << "Failed to allocate concurrent mark-compact chunk-info vector: " << err_msg;
481*795d594fSAndroid Build Coastguard Worker     } else {
482*795d594fSAndroid Build Coastguard Worker       size_t total = InitializeInfoMap(info_map_.Begin(), moving_space_size);
483*795d594fSAndroid Build Coastguard Worker       DCHECK_EQ(total, info_map_.Size());
484*795d594fSAndroid Build Coastguard Worker     }
485*795d594fSAndroid Build Coastguard Worker   }
486*795d594fSAndroid Build Coastguard Worker 
487*795d594fSAndroid Build Coastguard Worker   size_t moving_space_alignment = Heap::BestPageTableAlignment(moving_space_size);
488*795d594fSAndroid Build Coastguard Worker   // The moving space is created at a fixed address, which is expected to be
489*795d594fSAndroid Build Coastguard Worker   // PMD-size aligned.
490*795d594fSAndroid Build Coastguard Worker   if (!IsAlignedParam(moving_space_begin, moving_space_alignment)) {
491*795d594fSAndroid Build Coastguard Worker     LOG(WARNING) << "Bump pointer space is not aligned to " << PrettySize(moving_space_alignment)
492*795d594fSAndroid Build Coastguard Worker                  << ". This can lead to longer stop-the-world pauses for compaction";
493*795d594fSAndroid Build Coastguard Worker   }
494*795d594fSAndroid Build Coastguard Worker   // NOTE: PROT_NONE is used here as these mappings are for address space reservation
495*795d594fSAndroid Build Coastguard Worker   // only and will be used only after appropriately remapping them.
496*795d594fSAndroid Build Coastguard Worker   from_space_map_ = MemMap::MapAnonymousAligned("Concurrent mark-compact from-space",
497*795d594fSAndroid Build Coastguard Worker                                                 moving_space_size,
498*795d594fSAndroid Build Coastguard Worker                                                 PROT_NONE,
499*795d594fSAndroid Build Coastguard Worker                                                 /*low_4gb=*/kObjPtrPoisoning,
500*795d594fSAndroid Build Coastguard Worker                                                 moving_space_alignment,
501*795d594fSAndroid Build Coastguard Worker                                                 &err_msg);
502*795d594fSAndroid Build Coastguard Worker   if (UNLIKELY(!from_space_map_.IsValid())) {
503*795d594fSAndroid Build Coastguard Worker     LOG(FATAL) << "Failed to allocate concurrent mark-compact from-space" << err_msg;
504*795d594fSAndroid Build Coastguard Worker   } else {
505*795d594fSAndroid Build Coastguard Worker     from_space_begin_ = from_space_map_.Begin();
506*795d594fSAndroid Build Coastguard Worker   }
507*795d594fSAndroid Build Coastguard Worker 
508*795d594fSAndroid Build Coastguard Worker   compaction_buffers_map_ = MemMap::MapAnonymous("Concurrent mark-compact compaction buffers",
509*795d594fSAndroid Build Coastguard Worker                                                  (1 + kMutatorCompactionBufferCount) * gPageSize,
510*795d594fSAndroid Build Coastguard Worker                                                  PROT_READ | PROT_WRITE,
511*795d594fSAndroid Build Coastguard Worker                                                  /*low_4gb=*/kObjPtrPoisoning,
512*795d594fSAndroid Build Coastguard Worker                                                  &err_msg);
513*795d594fSAndroid Build Coastguard Worker   if (UNLIKELY(!compaction_buffers_map_.IsValid())) {
514*795d594fSAndroid Build Coastguard Worker     LOG(FATAL) << "Failed to allocate concurrent mark-compact compaction buffers" << err_msg;
515*795d594fSAndroid Build Coastguard Worker   }
516*795d594fSAndroid Build Coastguard Worker   // We also use the first page-sized buffer for the purpose of terminating concurrent compaction.
517*795d594fSAndroid Build Coastguard Worker   conc_compaction_termination_page_ = compaction_buffers_map_.Begin();
518*795d594fSAndroid Build Coastguard Worker   // Touch the page deliberately to avoid userfaults on it. We madvise it in
519*795d594fSAndroid Build Coastguard Worker   // CompactionPhase() before using it to terminate concurrent compaction.
520*795d594fSAndroid Build Coastguard Worker   ForceRead(conc_compaction_termination_page_);
521*795d594fSAndroid Build Coastguard Worker 
522*795d594fSAndroid Build Coastguard Worker   // In most of the cases, we don't expect more than one LinearAlloc space.
523*795d594fSAndroid Build Coastguard Worker   linear_alloc_spaces_data_.reserve(1);
524*795d594fSAndroid Build Coastguard Worker 
525*795d594fSAndroid Build Coastguard Worker   // Initialize GC metrics.
526*795d594fSAndroid Build Coastguard Worker   metrics::ArtMetrics* metrics = GetMetrics();
527*795d594fSAndroid Build Coastguard Worker   // The mark-compact collector supports only full-heap collections at the moment.
528*795d594fSAndroid Build Coastguard Worker   gc_time_histogram_ = metrics->FullGcCollectionTime();
529*795d594fSAndroid Build Coastguard Worker   metrics_gc_count_ = metrics->FullGcCount();
530*795d594fSAndroid Build Coastguard Worker   metrics_gc_count_delta_ = metrics->FullGcCountDelta();
531*795d594fSAndroid Build Coastguard Worker   gc_throughput_histogram_ = metrics->FullGcThroughput();
532*795d594fSAndroid Build Coastguard Worker   gc_tracing_throughput_hist_ = metrics->FullGcTracingThroughput();
533*795d594fSAndroid Build Coastguard Worker   gc_throughput_avg_ = metrics->FullGcThroughputAvg();
534*795d594fSAndroid Build Coastguard Worker   gc_tracing_throughput_avg_ = metrics->FullGcTracingThroughputAvg();
535*795d594fSAndroid Build Coastguard Worker   gc_scanned_bytes_ = metrics->FullGcScannedBytes();
536*795d594fSAndroid Build Coastguard Worker   gc_scanned_bytes_delta_ = metrics->FullGcScannedBytesDelta();
537*795d594fSAndroid Build Coastguard Worker   gc_freed_bytes_ = metrics->FullGcFreedBytes();
538*795d594fSAndroid Build Coastguard Worker   gc_freed_bytes_delta_ = metrics->FullGcFreedBytesDelta();
539*795d594fSAndroid Build Coastguard Worker   gc_duration_ = metrics->FullGcDuration();
540*795d594fSAndroid Build Coastguard Worker   gc_duration_delta_ = metrics->FullGcDurationDelta();
541*795d594fSAndroid Build Coastguard Worker   are_metrics_initialized_ = true;
542*795d594fSAndroid Build Coastguard Worker }
543*795d594fSAndroid Build Coastguard Worker 
AddLinearAllocSpaceData(uint8_t * begin,size_t len)544*795d594fSAndroid Build Coastguard Worker void MarkCompact::AddLinearAllocSpaceData(uint8_t* begin, size_t len) {
545*795d594fSAndroid Build Coastguard Worker   DCHECK_ALIGNED_PARAM(begin, gPageSize);
546*795d594fSAndroid Build Coastguard Worker   DCHECK_ALIGNED_PARAM(len, gPageSize);
547*795d594fSAndroid Build Coastguard Worker   DCHECK_GE(len, Heap::GetPMDSize());
548*795d594fSAndroid Build Coastguard Worker   size_t alignment = Heap::BestPageTableAlignment(len);
549*795d594fSAndroid Build Coastguard Worker   std::string err_msg;
550*795d594fSAndroid Build Coastguard Worker   MemMap shadow(MemMap::MapAnonymousAligned("linear-alloc shadow map",
551*795d594fSAndroid Build Coastguard Worker                                             len,
552*795d594fSAndroid Build Coastguard Worker                                             PROT_NONE,
553*795d594fSAndroid Build Coastguard Worker                                             /*low_4gb=*/false,
554*795d594fSAndroid Build Coastguard Worker                                             alignment,
555*795d594fSAndroid Build Coastguard Worker                                             &err_msg));
556*795d594fSAndroid Build Coastguard Worker   if (!shadow.IsValid()) {
557*795d594fSAndroid Build Coastguard Worker     LOG(FATAL) << "Failed to allocate linear-alloc shadow map: " << err_msg;
558*795d594fSAndroid Build Coastguard Worker     UNREACHABLE();
559*795d594fSAndroid Build Coastguard Worker   }
560*795d594fSAndroid Build Coastguard Worker 
561*795d594fSAndroid Build Coastguard Worker   MemMap page_status_map(MemMap::MapAnonymous("linear-alloc page-status map",
562*795d594fSAndroid Build Coastguard Worker                                               DivideByPageSize(len),
563*795d594fSAndroid Build Coastguard Worker                                               PROT_READ | PROT_WRITE,
564*795d594fSAndroid Build Coastguard Worker                                               /*low_4gb=*/false,
565*795d594fSAndroid Build Coastguard Worker                                               &err_msg));
566*795d594fSAndroid Build Coastguard Worker   if (!page_status_map.IsValid()) {
567*795d594fSAndroid Build Coastguard Worker     LOG(FATAL) << "Failed to allocate linear-alloc page-status shadow map: " << err_msg;
568*795d594fSAndroid Build Coastguard Worker     UNREACHABLE();
569*795d594fSAndroid Build Coastguard Worker   }
570*795d594fSAndroid Build Coastguard Worker   linear_alloc_spaces_data_.emplace_back(
571*795d594fSAndroid Build Coastguard Worker       std::forward<MemMap>(shadow), std::forward<MemMap>(page_status_map), begin, begin + len);
572*795d594fSAndroid Build Coastguard Worker }
573*795d594fSAndroid Build Coastguard Worker 
ClampGrowthLimit(size_t new_capacity)574*795d594fSAndroid Build Coastguard Worker void MarkCompact::ClampGrowthLimit(size_t new_capacity) {
575*795d594fSAndroid Build Coastguard Worker   // From-space is the same size as moving-space in virtual memory.
576*795d594fSAndroid Build Coastguard Worker   // However, if it's in >4GB address space then we don't need to do it
577*795d594fSAndroid Build Coastguard Worker   // synchronously.
578*795d594fSAndroid Build Coastguard Worker #if defined(__LP64__)
579*795d594fSAndroid Build Coastguard Worker   constexpr bool kClampFromSpace = kObjPtrPoisoning;
580*795d594fSAndroid Build Coastguard Worker #else
581*795d594fSAndroid Build Coastguard Worker   constexpr bool kClampFromSpace = true;
582*795d594fSAndroid Build Coastguard Worker #endif
583*795d594fSAndroid Build Coastguard Worker   size_t old_capacity = bump_pointer_space_->Capacity();
584*795d594fSAndroid Build Coastguard Worker   new_capacity = bump_pointer_space_->ClampGrowthLimit(new_capacity);
585*795d594fSAndroid Build Coastguard Worker   if (new_capacity < old_capacity) {
586*795d594fSAndroid Build Coastguard Worker     CHECK(from_space_map_.IsValid());
587*795d594fSAndroid Build Coastguard Worker     if (kClampFromSpace) {
588*795d594fSAndroid Build Coastguard Worker       from_space_map_.SetSize(new_capacity);
589*795d594fSAndroid Build Coastguard Worker     }
590*795d594fSAndroid Build Coastguard Worker     clamp_info_map_status_ = ClampInfoStatus::kClampInfoPending;
591*795d594fSAndroid Build Coastguard Worker   }
592*795d594fSAndroid Build Coastguard Worker   CHECK_EQ(moving_space_begin_, bump_pointer_space_->Begin());
593*795d594fSAndroid Build Coastguard Worker }
594*795d594fSAndroid Build Coastguard Worker 
MaybeClampGcStructures()595*795d594fSAndroid Build Coastguard Worker void MarkCompact::MaybeClampGcStructures() {
596*795d594fSAndroid Build Coastguard Worker   size_t moving_space_size = bump_pointer_space_->Capacity();
597*795d594fSAndroid Build Coastguard Worker   DCHECK(thread_running_gc_ != nullptr);
598*795d594fSAndroid Build Coastguard Worker   if (UNLIKELY(clamp_info_map_status_ == ClampInfoStatus::kClampInfoPending)) {
599*795d594fSAndroid Build Coastguard Worker     CHECK(from_space_map_.IsValid());
600*795d594fSAndroid Build Coastguard Worker     if (from_space_map_.Size() > moving_space_size) {
601*795d594fSAndroid Build Coastguard Worker       from_space_map_.SetSize(moving_space_size);
602*795d594fSAndroid Build Coastguard Worker     }
603*795d594fSAndroid Build Coastguard Worker     // Bitmaps and other data structures
604*795d594fSAndroid Build Coastguard Worker     live_words_bitmap_->SetBitmapSize(moving_space_size);
605*795d594fSAndroid Build Coastguard Worker     size_t set_size = InitializeInfoMap(info_map_.Begin(), moving_space_size);
606*795d594fSAndroid Build Coastguard Worker     CHECK_LT(set_size, info_map_.Size());
607*795d594fSAndroid Build Coastguard Worker     info_map_.SetSize(set_size);
608*795d594fSAndroid Build Coastguard Worker 
609*795d594fSAndroid Build Coastguard Worker     clamp_info_map_status_ = ClampInfoStatus::kClampInfoFinished;
610*795d594fSAndroid Build Coastguard Worker   }
611*795d594fSAndroid Build Coastguard Worker }
612*795d594fSAndroid Build Coastguard Worker 
PrepareCardTableForMarking(bool clear_alloc_space_cards)613*795d594fSAndroid Build Coastguard Worker void MarkCompact::PrepareCardTableForMarking(bool clear_alloc_space_cards) {
614*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
615*795d594fSAndroid Build Coastguard Worker   accounting::CardTable* const card_table = heap_->GetCardTable();
616*795d594fSAndroid Build Coastguard Worker   // immune_spaces_ is emptied in InitializePhase() before marking starts. This
617*795d594fSAndroid Build Coastguard Worker   // function is invoked twice during marking. We only need to populate immune_spaces_
618*795d594fSAndroid Build Coastguard Worker   // once per GC cycle. And when it's done (below), all the immune spaces are
619*795d594fSAndroid Build Coastguard Worker   // added to it. We can never have partially filled immune_spaces_.
620*795d594fSAndroid Build Coastguard Worker   bool update_immune_spaces = immune_spaces_.IsEmpty();
621*795d594fSAndroid Build Coastguard Worker   // Mark all of the spaces we never collect as immune.
622*795d594fSAndroid Build Coastguard Worker   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
623*795d594fSAndroid Build Coastguard Worker     if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
624*795d594fSAndroid Build Coastguard Worker         space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
625*795d594fSAndroid Build Coastguard Worker       CHECK(space->IsZygoteSpace() || space->IsImageSpace());
626*795d594fSAndroid Build Coastguard Worker       if (update_immune_spaces) {
627*795d594fSAndroid Build Coastguard Worker         immune_spaces_.AddSpace(space);
628*795d594fSAndroid Build Coastguard Worker       }
629*795d594fSAndroid Build Coastguard Worker       accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
630*795d594fSAndroid Build Coastguard Worker       if (table != nullptr) {
631*795d594fSAndroid Build Coastguard Worker         table->ProcessCards();
632*795d594fSAndroid Build Coastguard Worker       } else {
633*795d594fSAndroid Build Coastguard Worker         // Keep cards aged if we don't have a mod-union table since we need
634*795d594fSAndroid Build Coastguard Worker         // to scan them in future GCs. This case is for app images.
635*795d594fSAndroid Build Coastguard Worker         card_table->ModifyCardsAtomic(
636*795d594fSAndroid Build Coastguard Worker             space->Begin(),
637*795d594fSAndroid Build Coastguard Worker             space->End(),
638*795d594fSAndroid Build Coastguard Worker             [](uint8_t card) {
639*795d594fSAndroid Build Coastguard Worker               return (card == gc::accounting::CardTable::kCardClean)
640*795d594fSAndroid Build Coastguard Worker                   ? card
641*795d594fSAndroid Build Coastguard Worker                   : gc::accounting::CardTable::kCardAged;
642*795d594fSAndroid Build Coastguard Worker             },
643*795d594fSAndroid Build Coastguard Worker             /* card modified visitor */ VoidFunctor());
644*795d594fSAndroid Build Coastguard Worker       }
645*795d594fSAndroid Build Coastguard Worker     } else if (clear_alloc_space_cards) {
646*795d594fSAndroid Build Coastguard Worker       CHECK(!space->IsZygoteSpace());
647*795d594fSAndroid Build Coastguard Worker       CHECK(!space->IsImageSpace());
648*795d594fSAndroid Build Coastguard Worker       // The card-table corresponding to bump-pointer and non-moving space can
649*795d594fSAndroid Build Coastguard Worker       // be cleared, because we are going to traverse all the reachable objects
650*795d594fSAndroid Build Coastguard Worker       // in these spaces. This card-table will eventually be used to track
651*795d594fSAndroid Build Coastguard Worker       // mutations while concurrent marking is going on.
652*795d594fSAndroid Build Coastguard Worker       card_table->ClearCardRange(space->Begin(), space->Limit());
653*795d594fSAndroid Build Coastguard Worker       if (space != bump_pointer_space_) {
654*795d594fSAndroid Build Coastguard Worker         CHECK_EQ(space, heap_->GetNonMovingSpace());
655*795d594fSAndroid Build Coastguard Worker         non_moving_space_ = space;
656*795d594fSAndroid Build Coastguard Worker         non_moving_space_bitmap_ = space->GetMarkBitmap();
657*795d594fSAndroid Build Coastguard Worker       }
658*795d594fSAndroid Build Coastguard Worker     } else {
659*795d594fSAndroid Build Coastguard Worker       card_table->ModifyCardsAtomic(
660*795d594fSAndroid Build Coastguard Worker           space->Begin(),
661*795d594fSAndroid Build Coastguard Worker           space->End(),
662*795d594fSAndroid Build Coastguard Worker           [](uint8_t card) {
663*795d594fSAndroid Build Coastguard Worker             return (card == gc::accounting::CardTable::kCardDirty) ?
664*795d594fSAndroid Build Coastguard Worker                        gc::accounting::CardTable::kCardAged :
665*795d594fSAndroid Build Coastguard Worker                        gc::accounting::CardTable::kCardClean;
666*795d594fSAndroid Build Coastguard Worker           },
667*795d594fSAndroid Build Coastguard Worker           /* card modified visitor */ VoidFunctor());
668*795d594fSAndroid Build Coastguard Worker     }
669*795d594fSAndroid Build Coastguard Worker   }
670*795d594fSAndroid Build Coastguard Worker }
671*795d594fSAndroid Build Coastguard Worker 
MarkZygoteLargeObjects()672*795d594fSAndroid Build Coastguard Worker void MarkCompact::MarkZygoteLargeObjects() {
673*795d594fSAndroid Build Coastguard Worker   Thread* self = thread_running_gc_;
674*795d594fSAndroid Build Coastguard Worker   DCHECK_EQ(self, Thread::Current());
675*795d594fSAndroid Build Coastguard Worker   space::LargeObjectSpace* const los = heap_->GetLargeObjectsSpace();
676*795d594fSAndroid Build Coastguard Worker   if (los != nullptr) {
677*795d594fSAndroid Build Coastguard Worker     // Pick the current live bitmap (mark bitmap if swapped).
678*795d594fSAndroid Build Coastguard Worker     accounting::LargeObjectBitmap* const live_bitmap = los->GetLiveBitmap();
679*795d594fSAndroid Build Coastguard Worker     accounting::LargeObjectBitmap* const mark_bitmap = los->GetMarkBitmap();
680*795d594fSAndroid Build Coastguard Worker     // Walk through all of the objects and explicitly mark the zygote ones so they don't get swept.
681*795d594fSAndroid Build Coastguard Worker     std::pair<uint8_t*, uint8_t*> range = los->GetBeginEndAtomic();
682*795d594fSAndroid Build Coastguard Worker     live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(range.first),
683*795d594fSAndroid Build Coastguard Worker                                   reinterpret_cast<uintptr_t>(range.second),
684*795d594fSAndroid Build Coastguard Worker                                   [mark_bitmap, los, self](mirror::Object* obj)
685*795d594fSAndroid Build Coastguard Worker                                       REQUIRES(Locks::heap_bitmap_lock_)
686*795d594fSAndroid Build Coastguard Worker                                           REQUIRES_SHARED(Locks::mutator_lock_) {
687*795d594fSAndroid Build Coastguard Worker                                             if (los->IsZygoteLargeObject(self, obj)) {
688*795d594fSAndroid Build Coastguard Worker                                               mark_bitmap->Set(obj);
689*795d594fSAndroid Build Coastguard Worker                                             }
690*795d594fSAndroid Build Coastguard Worker                                           });
691*795d594fSAndroid Build Coastguard Worker   }
692*795d594fSAndroid Build Coastguard Worker }
693*795d594fSAndroid Build Coastguard Worker 
InitializePhase()694*795d594fSAndroid Build Coastguard Worker void MarkCompact::InitializePhase() {
695*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
696*795d594fSAndroid Build Coastguard Worker   mark_stack_ = heap_->GetMarkStack();
697*795d594fSAndroid Build Coastguard Worker   CHECK(mark_stack_->IsEmpty());
698*795d594fSAndroid Build Coastguard Worker   immune_spaces_.Reset();
699*795d594fSAndroid Build Coastguard Worker   moving_first_objs_count_ = 0;
700*795d594fSAndroid Build Coastguard Worker   non_moving_first_objs_count_ = 0;
701*795d594fSAndroid Build Coastguard Worker   black_page_count_ = 0;
702*795d594fSAndroid Build Coastguard Worker   bytes_scanned_ = 0;
703*795d594fSAndroid Build Coastguard Worker   freed_objects_ = 0;
704*795d594fSAndroid Build Coastguard Worker   // The first buffer is used by gc-thread.
705*795d594fSAndroid Build Coastguard Worker   compaction_buffer_counter_.store(1, std::memory_order_relaxed);
706*795d594fSAndroid Build Coastguard Worker   black_allocations_begin_ = bump_pointer_space_->Limit();
707*795d594fSAndroid Build Coastguard Worker   DCHECK_EQ(moving_space_begin_, bump_pointer_space_->Begin());
708*795d594fSAndroid Build Coastguard Worker   from_space_slide_diff_ = from_space_begin_ - moving_space_begin_;
709*795d594fSAndroid Build Coastguard Worker   moving_space_end_ = bump_pointer_space_->Limit();
710*795d594fSAndroid Build Coastguard Worker   if (black_dense_end_ > moving_space_begin_) {
711*795d594fSAndroid Build Coastguard Worker     moving_space_bitmap_->Clear();
712*795d594fSAndroid Build Coastguard Worker   }
713*795d594fSAndroid Build Coastguard Worker   black_dense_end_ = moving_space_begin_;
714*795d594fSAndroid Build Coastguard Worker   // TODO: Would it suffice to read it once in the constructor, which is called
715*795d594fSAndroid Build Coastguard Worker   // in zygote process?
716*795d594fSAndroid Build Coastguard Worker   pointer_size_ = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
717*795d594fSAndroid Build Coastguard Worker }
718*795d594fSAndroid Build Coastguard Worker 
719*795d594fSAndroid Build Coastguard Worker class MarkCompact::ThreadFlipVisitor : public Closure {
720*795d594fSAndroid Build Coastguard Worker  public:
ThreadFlipVisitor(MarkCompact * collector)721*795d594fSAndroid Build Coastguard Worker   explicit ThreadFlipVisitor(MarkCompact* collector) : collector_(collector) {}
722*795d594fSAndroid Build Coastguard Worker 
Run(Thread * thread)723*795d594fSAndroid Build Coastguard Worker   void Run(Thread* thread) override REQUIRES_SHARED(Locks::mutator_lock_) {
724*795d594fSAndroid Build Coastguard Worker     // Note: self is not necessarily equal to thread since thread may be suspended.
725*795d594fSAndroid Build Coastguard Worker     Thread* self = Thread::Current();
726*795d594fSAndroid Build Coastguard Worker     CHECK(thread == self || thread->GetState() != ThreadState::kRunnable)
727*795d594fSAndroid Build Coastguard Worker         << thread->GetState() << " thread " << thread << " self " << self;
728*795d594fSAndroid Build Coastguard Worker     thread->VisitRoots(collector_, kVisitRootFlagAllRoots);
729*795d594fSAndroid Build Coastguard Worker     // Interpreter cache is thread-local so it needs to be swept either in a
730*795d594fSAndroid Build Coastguard Worker     // flip, or a stop-the-world pause.
731*795d594fSAndroid Build Coastguard Worker     CHECK(collector_->compacting_);
732*795d594fSAndroid Build Coastguard Worker     thread->SweepInterpreterCache(collector_);
733*795d594fSAndroid Build Coastguard Worker     thread->AdjustTlab(collector_->black_objs_slide_diff_);
734*795d594fSAndroid Build Coastguard Worker   }
735*795d594fSAndroid Build Coastguard Worker 
736*795d594fSAndroid Build Coastguard Worker  private:
737*795d594fSAndroid Build Coastguard Worker   MarkCompact* const collector_;
738*795d594fSAndroid Build Coastguard Worker };
739*795d594fSAndroid Build Coastguard Worker 
740*795d594fSAndroid Build Coastguard Worker class MarkCompact::FlipCallback : public Closure {
741*795d594fSAndroid Build Coastguard Worker  public:
FlipCallback(MarkCompact * collector)742*795d594fSAndroid Build Coastguard Worker   explicit FlipCallback(MarkCompact* collector) : collector_(collector) {}
743*795d594fSAndroid Build Coastguard Worker 
Run(Thread * thread)744*795d594fSAndroid Build Coastguard Worker   void Run([[maybe_unused]] Thread* thread) override REQUIRES(Locks::mutator_lock_) {
745*795d594fSAndroid Build Coastguard Worker     collector_->CompactionPause();
746*795d594fSAndroid Build Coastguard Worker   }
747*795d594fSAndroid Build Coastguard Worker 
748*795d594fSAndroid Build Coastguard Worker  private:
749*795d594fSAndroid Build Coastguard Worker   MarkCompact* const collector_;
750*795d594fSAndroid Build Coastguard Worker };
751*795d594fSAndroid Build Coastguard Worker 
RunPhases()752*795d594fSAndroid Build Coastguard Worker void MarkCompact::RunPhases() {
753*795d594fSAndroid Build Coastguard Worker   Thread* self = Thread::Current();
754*795d594fSAndroid Build Coastguard Worker   thread_running_gc_ = self;
755*795d594fSAndroid Build Coastguard Worker   Runtime* runtime = Runtime::Current();
756*795d594fSAndroid Build Coastguard Worker   InitializePhase();
757*795d594fSAndroid Build Coastguard Worker   GetHeap()->PreGcVerification(this);
758*795d594fSAndroid Build Coastguard Worker   {
759*795d594fSAndroid Build Coastguard Worker     ReaderMutexLock mu(self, *Locks::mutator_lock_);
760*795d594fSAndroid Build Coastguard Worker     MarkingPhase();
761*795d594fSAndroid Build Coastguard Worker   }
762*795d594fSAndroid Build Coastguard Worker   {
763*795d594fSAndroid Build Coastguard Worker     // Marking pause
764*795d594fSAndroid Build Coastguard Worker     ScopedPause pause(this);
765*795d594fSAndroid Build Coastguard Worker     MarkingPause();
766*795d594fSAndroid Build Coastguard Worker     if (kIsDebugBuild) {
767*795d594fSAndroid Build Coastguard Worker       bump_pointer_space_->AssertAllThreadLocalBuffersAreRevoked();
768*795d594fSAndroid Build Coastguard Worker     }
769*795d594fSAndroid Build Coastguard Worker   }
770*795d594fSAndroid Build Coastguard Worker   bool perform_compaction;
771*795d594fSAndroid Build Coastguard Worker   {
772*795d594fSAndroid Build Coastguard Worker     ReaderMutexLock mu(self, *Locks::mutator_lock_);
773*795d594fSAndroid Build Coastguard Worker     ReclaimPhase();
774*795d594fSAndroid Build Coastguard Worker     perform_compaction = PrepareForCompaction();
775*795d594fSAndroid Build Coastguard Worker   }
776*795d594fSAndroid Build Coastguard Worker 
777*795d594fSAndroid Build Coastguard Worker   if (perform_compaction) {
778*795d594fSAndroid Build Coastguard Worker     // Compaction pause
779*795d594fSAndroid Build Coastguard Worker     ThreadFlipVisitor visitor(this);
780*795d594fSAndroid Build Coastguard Worker     FlipCallback callback(this);
781*795d594fSAndroid Build Coastguard Worker     runtime->GetThreadList()->FlipThreadRoots(
782*795d594fSAndroid Build Coastguard Worker         &visitor, &callback, this, GetHeap()->GetGcPauseListener());
783*795d594fSAndroid Build Coastguard Worker 
784*795d594fSAndroid Build Coastguard Worker     if (IsValidFd(uffd_)) {
785*795d594fSAndroid Build Coastguard Worker       ReaderMutexLock mu(self, *Locks::mutator_lock_);
786*795d594fSAndroid Build Coastguard Worker       CompactionPhase();
787*795d594fSAndroid Build Coastguard Worker     }
788*795d594fSAndroid Build Coastguard Worker   }
789*795d594fSAndroid Build Coastguard Worker   FinishPhase();
790*795d594fSAndroid Build Coastguard Worker   GetHeap()->PostGcVerification(this);
791*795d594fSAndroid Build Coastguard Worker   thread_running_gc_ = nullptr;
792*795d594fSAndroid Build Coastguard Worker }
793*795d594fSAndroid Build Coastguard Worker 
InitMovingSpaceFirstObjects(size_t vec_len,size_t to_space_page_idx)794*795d594fSAndroid Build Coastguard Worker void MarkCompact::InitMovingSpaceFirstObjects(size_t vec_len, size_t to_space_page_idx) {
795*795d594fSAndroid Build Coastguard Worker   uint32_t offset_in_chunk_word;
796*795d594fSAndroid Build Coastguard Worker   uint32_t offset;
797*795d594fSAndroid Build Coastguard Worker   mirror::Object* obj;
798*795d594fSAndroid Build Coastguard Worker   const uintptr_t heap_begin = moving_space_bitmap_->HeapBegin();
799*795d594fSAndroid Build Coastguard Worker 
800*795d594fSAndroid Build Coastguard Worker   // Find the first live word.
801*795d594fSAndroid Build Coastguard Worker   size_t chunk_idx = to_space_page_idx * (gPageSize / kOffsetChunkSize);
802*795d594fSAndroid Build Coastguard Worker   DCHECK_LT(chunk_idx, vec_len);
803*795d594fSAndroid Build Coastguard Worker   // Find the first live word in the space
804*795d594fSAndroid Build Coastguard Worker   for (; chunk_info_vec_[chunk_idx] == 0; chunk_idx++) {
805*795d594fSAndroid Build Coastguard Worker     if (chunk_idx >= vec_len) {
806*795d594fSAndroid Build Coastguard Worker       // We don't have any live data on the moving-space.
807*795d594fSAndroid Build Coastguard Worker       moving_first_objs_count_ = to_space_page_idx;
808*795d594fSAndroid Build Coastguard Worker       return;
809*795d594fSAndroid Build Coastguard Worker     }
810*795d594fSAndroid Build Coastguard Worker   }
811*795d594fSAndroid Build Coastguard Worker   DCHECK_LT(chunk_idx, vec_len);
812*795d594fSAndroid Build Coastguard Worker   // Use live-words bitmap to find the first live word
813*795d594fSAndroid Build Coastguard Worker   offset_in_chunk_word = live_words_bitmap_->FindNthLiveWordOffset(chunk_idx, /*n*/ 0);
814*795d594fSAndroid Build Coastguard Worker   offset = chunk_idx * kBitsPerVectorWord + offset_in_chunk_word;
815*795d594fSAndroid Build Coastguard Worker   DCHECK(live_words_bitmap_->Test(offset)) << "offset=" << offset
816*795d594fSAndroid Build Coastguard Worker                                            << " chunk_idx=" << chunk_idx
817*795d594fSAndroid Build Coastguard Worker                                            << " N=0"
818*795d594fSAndroid Build Coastguard Worker                                            << " offset_in_word=" << offset_in_chunk_word
819*795d594fSAndroid Build Coastguard Worker                                            << " word=" << std::hex
820*795d594fSAndroid Build Coastguard Worker                                            << live_words_bitmap_->GetWord(chunk_idx);
821*795d594fSAndroid Build Coastguard Worker   obj = moving_space_bitmap_->FindPrecedingObject(heap_begin + offset * kAlignment);
822*795d594fSAndroid Build Coastguard Worker   // TODO: add a check to validate the object.
823*795d594fSAndroid Build Coastguard Worker 
824*795d594fSAndroid Build Coastguard Worker   pre_compact_offset_moving_space_[to_space_page_idx] = offset;
825*795d594fSAndroid Build Coastguard Worker   first_objs_moving_space_[to_space_page_idx].Assign(obj);
826*795d594fSAndroid Build Coastguard Worker   to_space_page_idx++;
827*795d594fSAndroid Build Coastguard Worker 
828*795d594fSAndroid Build Coastguard Worker   uint32_t page_live_bytes = 0;
829*795d594fSAndroid Build Coastguard Worker   while (true) {
830*795d594fSAndroid Build Coastguard Worker     for (; page_live_bytes <= gPageSize; chunk_idx++) {
831*795d594fSAndroid Build Coastguard Worker       if (chunk_idx >= vec_len) {
832*795d594fSAndroid Build Coastguard Worker         moving_first_objs_count_ = to_space_page_idx;
833*795d594fSAndroid Build Coastguard Worker         return;
834*795d594fSAndroid Build Coastguard Worker       }
835*795d594fSAndroid Build Coastguard Worker       page_live_bytes += chunk_info_vec_[chunk_idx];
836*795d594fSAndroid Build Coastguard Worker     }
837*795d594fSAndroid Build Coastguard Worker     chunk_idx--;
838*795d594fSAndroid Build Coastguard Worker     page_live_bytes -= gPageSize;
839*795d594fSAndroid Build Coastguard Worker     DCHECK_LE(page_live_bytes, kOffsetChunkSize);
840*795d594fSAndroid Build Coastguard Worker     DCHECK_LE(page_live_bytes, chunk_info_vec_[chunk_idx])
841*795d594fSAndroid Build Coastguard Worker         << " chunk_idx=" << chunk_idx
842*795d594fSAndroid Build Coastguard Worker         << " to_space_page_idx=" << to_space_page_idx
843*795d594fSAndroid Build Coastguard Worker         << " vec_len=" << vec_len;
844*795d594fSAndroid Build Coastguard Worker     DCHECK(IsAligned<kAlignment>(chunk_info_vec_[chunk_idx] - page_live_bytes));
845*795d594fSAndroid Build Coastguard Worker     offset_in_chunk_word =
846*795d594fSAndroid Build Coastguard Worker             live_words_bitmap_->FindNthLiveWordOffset(
847*795d594fSAndroid Build Coastguard Worker                 chunk_idx, (chunk_info_vec_[chunk_idx] - page_live_bytes) / kAlignment);
848*795d594fSAndroid Build Coastguard Worker     offset = chunk_idx * kBitsPerVectorWord + offset_in_chunk_word;
849*795d594fSAndroid Build Coastguard Worker     DCHECK(live_words_bitmap_->Test(offset))
850*795d594fSAndroid Build Coastguard Worker         << "offset=" << offset
851*795d594fSAndroid Build Coastguard Worker         << " chunk_idx=" << chunk_idx
852*795d594fSAndroid Build Coastguard Worker         << " N=" << ((chunk_info_vec_[chunk_idx] - page_live_bytes) / kAlignment)
853*795d594fSAndroid Build Coastguard Worker         << " offset_in_word=" << offset_in_chunk_word
854*795d594fSAndroid Build Coastguard Worker         << " word=" << std::hex << live_words_bitmap_->GetWord(chunk_idx);
855*795d594fSAndroid Build Coastguard Worker     // TODO: Can we optimize this for large objects? If we are continuing a
856*795d594fSAndroid Build Coastguard Worker     // large object that spans multiple pages, then we may be able to do without
857*795d594fSAndroid Build Coastguard Worker     // calling FindPrecedingObject().
858*795d594fSAndroid Build Coastguard Worker     //
859*795d594fSAndroid Build Coastguard Worker     // Find the object which encapsulates offset in it, which could be
860*795d594fSAndroid Build Coastguard Worker     // starting at offset itself.
861*795d594fSAndroid Build Coastguard Worker     obj = moving_space_bitmap_->FindPrecedingObject(heap_begin + offset * kAlignment);
862*795d594fSAndroid Build Coastguard Worker     // TODO: add a check to validate the object.
863*795d594fSAndroid Build Coastguard Worker     pre_compact_offset_moving_space_[to_space_page_idx] = offset;
864*795d594fSAndroid Build Coastguard Worker     first_objs_moving_space_[to_space_page_idx].Assign(obj);
865*795d594fSAndroid Build Coastguard Worker     to_space_page_idx++;
866*795d594fSAndroid Build Coastguard Worker     chunk_idx++;
867*795d594fSAndroid Build Coastguard Worker   }
868*795d594fSAndroid Build Coastguard Worker }
869*795d594fSAndroid Build Coastguard Worker 
InitNonMovingFirstObjects(uintptr_t begin,uintptr_t end,accounting::ContinuousSpaceBitmap * bitmap,ObjReference * first_objs_arr)870*795d594fSAndroid Build Coastguard Worker size_t MarkCompact::InitNonMovingFirstObjects(uintptr_t begin,
871*795d594fSAndroid Build Coastguard Worker                                               uintptr_t end,
872*795d594fSAndroid Build Coastguard Worker                                               accounting::ContinuousSpaceBitmap* bitmap,
873*795d594fSAndroid Build Coastguard Worker                                               ObjReference* first_objs_arr) {
874*795d594fSAndroid Build Coastguard Worker   mirror::Object* prev_obj;
875*795d594fSAndroid Build Coastguard Worker   size_t page_idx;
876*795d594fSAndroid Build Coastguard Worker   {
877*795d594fSAndroid Build Coastguard Worker     // Find first live object
878*795d594fSAndroid Build Coastguard Worker     mirror::Object* obj = nullptr;
879*795d594fSAndroid Build Coastguard Worker     bitmap->VisitMarkedRange</*kVisitOnce*/ true>(begin,
880*795d594fSAndroid Build Coastguard Worker                                                   end,
881*795d594fSAndroid Build Coastguard Worker                                                   [&obj] (mirror::Object* o) {
882*795d594fSAndroid Build Coastguard Worker                                                     obj = o;
883*795d594fSAndroid Build Coastguard Worker                                                   });
884*795d594fSAndroid Build Coastguard Worker     if (obj == nullptr) {
885*795d594fSAndroid Build Coastguard Worker       // There are no live objects in the space
886*795d594fSAndroid Build Coastguard Worker       return 0;
887*795d594fSAndroid Build Coastguard Worker     }
888*795d594fSAndroid Build Coastguard Worker     page_idx = DivideByPageSize(reinterpret_cast<uintptr_t>(obj) - begin);
889*795d594fSAndroid Build Coastguard Worker     first_objs_arr[page_idx++].Assign(obj);
890*795d594fSAndroid Build Coastguard Worker     prev_obj = obj;
891*795d594fSAndroid Build Coastguard Worker   }
892*795d594fSAndroid Build Coastguard Worker   // TODO: check obj is valid
893*795d594fSAndroid Build Coastguard Worker   uintptr_t prev_obj_end = reinterpret_cast<uintptr_t>(prev_obj)
894*795d594fSAndroid Build Coastguard Worker                            + RoundUp(prev_obj->SizeOf<kDefaultVerifyFlags>(), kAlignment);
895*795d594fSAndroid Build Coastguard Worker   // For every page find the object starting from which we need to call
896*795d594fSAndroid Build Coastguard Worker   // VisitReferences. It could either be an object that started on some
897*795d594fSAndroid Build Coastguard Worker   // preceding page, or some object starting within this page.
898*795d594fSAndroid Build Coastguard Worker   begin = RoundDown(reinterpret_cast<uintptr_t>(prev_obj) + gPageSize, gPageSize);
899*795d594fSAndroid Build Coastguard Worker   while (begin < end) {
900*795d594fSAndroid Build Coastguard Worker     // Utilize, if any, large object that started in some preceding page, but
901*795d594fSAndroid Build Coastguard Worker     // overlaps with this page as well.
902*795d594fSAndroid Build Coastguard Worker     if (prev_obj != nullptr && prev_obj_end > begin) {
903*795d594fSAndroid Build Coastguard Worker       DCHECK_LT(prev_obj, reinterpret_cast<mirror::Object*>(begin));
904*795d594fSAndroid Build Coastguard Worker       first_objs_arr[page_idx].Assign(prev_obj);
905*795d594fSAndroid Build Coastguard Worker     } else {
906*795d594fSAndroid Build Coastguard Worker       prev_obj_end = 0;
907*795d594fSAndroid Build Coastguard Worker       // It's sufficient to only search for previous object in the preceding page.
908*795d594fSAndroid Build Coastguard Worker       // If no live object started in that page and some object had started in
909*795d594fSAndroid Build Coastguard Worker       // the page preceding to that page, which was big enough to overlap with
910*795d594fSAndroid Build Coastguard Worker       // the current page, then we wouldn't be in the else part.
911*795d594fSAndroid Build Coastguard Worker       prev_obj = bitmap->FindPrecedingObject(begin, begin - gPageSize);
912*795d594fSAndroid Build Coastguard Worker       if (prev_obj != nullptr) {
913*795d594fSAndroid Build Coastguard Worker         prev_obj_end = reinterpret_cast<uintptr_t>(prev_obj)
914*795d594fSAndroid Build Coastguard Worker                         + RoundUp(prev_obj->SizeOf<kDefaultVerifyFlags>(), kAlignment);
915*795d594fSAndroid Build Coastguard Worker       }
916*795d594fSAndroid Build Coastguard Worker       if (prev_obj_end > begin) {
917*795d594fSAndroid Build Coastguard Worker         first_objs_arr[page_idx].Assign(prev_obj);
918*795d594fSAndroid Build Coastguard Worker       } else {
919*795d594fSAndroid Build Coastguard Worker         // Find the first live object in this page
920*795d594fSAndroid Build Coastguard Worker         bitmap->VisitMarkedRange</*kVisitOnce*/ true>(
921*795d594fSAndroid Build Coastguard Worker             begin, begin + gPageSize, [first_objs_arr, page_idx](mirror::Object* obj) {
922*795d594fSAndroid Build Coastguard Worker               first_objs_arr[page_idx].Assign(obj);
923*795d594fSAndroid Build Coastguard Worker             });
924*795d594fSAndroid Build Coastguard Worker       }
925*795d594fSAndroid Build Coastguard Worker       // An empty entry indicates that the page has no live objects and hence
926*795d594fSAndroid Build Coastguard Worker       // can be skipped.
927*795d594fSAndroid Build Coastguard Worker     }
928*795d594fSAndroid Build Coastguard Worker     begin += gPageSize;
929*795d594fSAndroid Build Coastguard Worker     page_idx++;
930*795d594fSAndroid Build Coastguard Worker   }
931*795d594fSAndroid Build Coastguard Worker   return page_idx;
932*795d594fSAndroid Build Coastguard Worker }
933*795d594fSAndroid Build Coastguard Worker 
PrepareForCompaction()934*795d594fSAndroid Build Coastguard Worker bool MarkCompact::PrepareForCompaction() {
935*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
936*795d594fSAndroid Build Coastguard Worker   size_t chunk_info_per_page = gPageSize / kOffsetChunkSize;
937*795d594fSAndroid Build Coastguard Worker   size_t vector_len = (black_allocations_begin_ - moving_space_begin_) / kOffsetChunkSize;
938*795d594fSAndroid Build Coastguard Worker   DCHECK_LE(vector_len, vector_length_);
939*795d594fSAndroid Build Coastguard Worker   DCHECK_ALIGNED_PARAM(vector_length_, chunk_info_per_page);
940*795d594fSAndroid Build Coastguard Worker   if (UNLIKELY(vector_len == 0)) {
941*795d594fSAndroid Build Coastguard Worker     // Nothing to compact.
942*795d594fSAndroid Build Coastguard Worker     return false;
943*795d594fSAndroid Build Coastguard Worker   }
944*795d594fSAndroid Build Coastguard Worker   for (size_t i = 0; i < vector_len; i++) {
945*795d594fSAndroid Build Coastguard Worker     DCHECK_LE(chunk_info_vec_[i], kOffsetChunkSize);
946*795d594fSAndroid Build Coastguard Worker     DCHECK_EQ(chunk_info_vec_[i], live_words_bitmap_->LiveBytesInBitmapWord(i));
947*795d594fSAndroid Build Coastguard Worker   }
948*795d594fSAndroid Build Coastguard Worker 
949*795d594fSAndroid Build Coastguard Worker   // TODO: We can do a lot of neat tricks with this offset vector to tune the
950*795d594fSAndroid Build Coastguard Worker   // compaction as we wish. Originally, the compaction algorithm slides all
951*795d594fSAndroid Build Coastguard Worker   // live objects towards the beginning of the heap. This is nice because it
952*795d594fSAndroid Build Coastguard Worker   // keeps the spatial locality of objects intact.
953*795d594fSAndroid Build Coastguard Worker   // However, sometimes it's desired to compact objects in certain portions
954*795d594fSAndroid Build Coastguard Worker   // of the heap. For instance, it is expected that, over time,
955*795d594fSAndroid Build Coastguard Worker   // objects towards the beginning of the heap are long lived and are always
956*795d594fSAndroid Build Coastguard Worker   // densely packed. In this case, it makes sense to only update references in
957*795d594fSAndroid Build Coastguard Worker   // there and not try to compact it.
958*795d594fSAndroid Build Coastguard Worker   // Furthermore, we might have some large objects and may not want to move such
959*795d594fSAndroid Build Coastguard Worker   // objects.
960*795d594fSAndroid Build Coastguard Worker   // We can adjust, without too much effort, the values in the chunk_info_vec_ such
961*795d594fSAndroid Build Coastguard Worker   // that the objects in the dense beginning area aren't moved. OTOH, large
962*795d594fSAndroid Build Coastguard Worker   // objects, which could be anywhere in the heap, could also be kept from
963*795d594fSAndroid Build Coastguard Worker   // moving by using a similar trick. The only issue is that by doing this we will
964*795d594fSAndroid Build Coastguard Worker   // leave an unused hole in the middle of the heap which can't be used for
965*795d594fSAndroid Build Coastguard Worker   // allocations until we do a *full* compaction.
966*795d594fSAndroid Build Coastguard Worker   //
967*795d594fSAndroid Build Coastguard Worker   // At this point every element in the chunk_info_vec_ contains the live-bytes
968*795d594fSAndroid Build Coastguard Worker   // of the corresponding chunk. For old-to-new address computation we need
969*795d594fSAndroid Build Coastguard Worker   // every element to reflect total live-bytes till the corresponding chunk.
970*795d594fSAndroid Build Coastguard Worker 
971*795d594fSAndroid Build Coastguard Worker   size_t black_dense_idx = 0;
972*795d594fSAndroid Build Coastguard Worker   GcCause gc_cause = GetCurrentIteration()->GetGcCause();
973*795d594fSAndroid Build Coastguard Worker   if (gc_cause != kGcCauseExplicit && gc_cause != kGcCauseCollectorTransition &&
974*795d594fSAndroid Build Coastguard Worker       !GetCurrentIteration()->GetClearSoftReferences()) {
975*795d594fSAndroid Build Coastguard Worker     uint64_t live_bytes = 0, total_bytes = 0;
976*795d594fSAndroid Build Coastguard Worker     size_t aligned_vec_len = RoundUp(vector_len, chunk_info_per_page);
977*795d594fSAndroid Build Coastguard Worker     size_t num_pages = aligned_vec_len / chunk_info_per_page;
978*795d594fSAndroid Build Coastguard Worker     size_t threshold_passing_marker = 0;  // In number of pages
979*795d594fSAndroid Build Coastguard Worker     std::vector<uint32_t> pages_live_bytes;
980*795d594fSAndroid Build Coastguard Worker     pages_live_bytes.reserve(num_pages);
981*795d594fSAndroid Build Coastguard Worker     // Identify the largest chunk towards the beginning of moving space which
982*795d594fSAndroid Build Coastguard Worker     // passes the black-dense threshold.
983*795d594fSAndroid Build Coastguard Worker     for (size_t i = 0; i < aligned_vec_len; i += chunk_info_per_page) {
984*795d594fSAndroid Build Coastguard Worker       uint32_t page_live_bytes = 0;
985*795d594fSAndroid Build Coastguard Worker       for (size_t j = 0; j < chunk_info_per_page; j++) {
986*795d594fSAndroid Build Coastguard Worker         page_live_bytes += chunk_info_vec_[i + j];
987*795d594fSAndroid Build Coastguard Worker         total_bytes += kOffsetChunkSize;
988*795d594fSAndroid Build Coastguard Worker       }
989*795d594fSAndroid Build Coastguard Worker       live_bytes += page_live_bytes;
990*795d594fSAndroid Build Coastguard Worker       pages_live_bytes.push_back(page_live_bytes);
991*795d594fSAndroid Build Coastguard Worker       if (live_bytes * 100U >= total_bytes * kBlackDenseRegionThreshold) {
992*795d594fSAndroid Build Coastguard Worker         threshold_passing_marker = pages_live_bytes.size();
993*795d594fSAndroid Build Coastguard Worker       }
994*795d594fSAndroid Build Coastguard Worker     }
995*795d594fSAndroid Build Coastguard Worker     DCHECK_EQ(pages_live_bytes.size(), num_pages);
996*795d594fSAndroid Build Coastguard Worker     // Eliminate the pages at the end of the chunk which are lower than the threshold.
997*795d594fSAndroid Build Coastguard Worker     if (threshold_passing_marker > 0) {
998*795d594fSAndroid Build Coastguard Worker       auto iter = std::find_if(
999*795d594fSAndroid Build Coastguard Worker           pages_live_bytes.rbegin() + (num_pages - threshold_passing_marker),
1000*795d594fSAndroid Build Coastguard Worker           pages_live_bytes.rend(),
1001*795d594fSAndroid Build Coastguard Worker           [](uint32_t bytes) { return bytes * 100U >= gPageSize * kBlackDenseRegionThreshold; });
1002*795d594fSAndroid Build Coastguard Worker       black_dense_idx = (pages_live_bytes.rend() - iter) * chunk_info_per_page;
1003*795d594fSAndroid Build Coastguard Worker     }
1004*795d594fSAndroid Build Coastguard Worker     black_dense_end_ = moving_space_begin_ + black_dense_idx * kOffsetChunkSize;
1005*795d594fSAndroid Build Coastguard Worker     DCHECK_ALIGNED_PARAM(black_dense_end_, gPageSize);
1006*795d594fSAndroid Build Coastguard Worker 
1007*795d594fSAndroid Build Coastguard Worker     // Adjust for class allocated after black_dense_end_ while its object(s)
1008*795d594fSAndroid Build Coastguard Worker     // are earlier. This is required as we update the references in the
1009*795d594fSAndroid Build Coastguard Worker     // black-dense region in-place. And if the class pointer of some first
1010*795d594fSAndroid Build Coastguard Worker     // object for a page, which started in some preceding page, is already
1011*795d594fSAndroid Build Coastguard Worker     // updated, then we will read wrong class data like ref-offset bitmap.
1012*795d594fSAndroid Build Coastguard Worker     for (auto iter = class_after_obj_map_.rbegin();
1013*795d594fSAndroid Build Coastguard Worker          iter != class_after_obj_map_.rend() &&
1014*795d594fSAndroid Build Coastguard Worker          reinterpret_cast<uint8_t*>(iter->first.AsMirrorPtr()) >= black_dense_end_;
1015*795d594fSAndroid Build Coastguard Worker          iter++) {
1016*795d594fSAndroid Build Coastguard Worker       black_dense_end_ =
1017*795d594fSAndroid Build Coastguard Worker           std::min(black_dense_end_, reinterpret_cast<uint8_t*>(iter->second.AsMirrorPtr()));
1018*795d594fSAndroid Build Coastguard Worker       black_dense_end_ = AlignDown(black_dense_end_, gPageSize);
1019*795d594fSAndroid Build Coastguard Worker     }
1020*795d594fSAndroid Build Coastguard Worker     black_dense_idx = (black_dense_end_ - moving_space_begin_) / kOffsetChunkSize;
1021*795d594fSAndroid Build Coastguard Worker     DCHECK_LE(black_dense_idx, vector_len);
1022*795d594fSAndroid Build Coastguard Worker     if (black_dense_idx == vector_len) {
1023*795d594fSAndroid Build Coastguard Worker       // There is nothing to compact.
1024*795d594fSAndroid Build Coastguard Worker       return false;
1025*795d594fSAndroid Build Coastguard Worker     }
1026*795d594fSAndroid Build Coastguard Worker     InitNonMovingFirstObjects(reinterpret_cast<uintptr_t>(moving_space_begin_),
1027*795d594fSAndroid Build Coastguard Worker                               reinterpret_cast<uintptr_t>(black_dense_end_),
1028*795d594fSAndroid Build Coastguard Worker                               moving_space_bitmap_,
1029*795d594fSAndroid Build Coastguard Worker                               first_objs_moving_space_);
1030*795d594fSAndroid Build Coastguard Worker   }
1031*795d594fSAndroid Build Coastguard Worker 
1032*795d594fSAndroid Build Coastguard Worker   InitMovingSpaceFirstObjects(vector_len, black_dense_idx / chunk_info_per_page);
1033*795d594fSAndroid Build Coastguard Worker   non_moving_first_objs_count_ =
1034*795d594fSAndroid Build Coastguard Worker       InitNonMovingFirstObjects(reinterpret_cast<uintptr_t>(non_moving_space_->Begin()),
1035*795d594fSAndroid Build Coastguard Worker                                 reinterpret_cast<uintptr_t>(non_moving_space_->End()),
1036*795d594fSAndroid Build Coastguard Worker                                 non_moving_space_->GetLiveBitmap(),
1037*795d594fSAndroid Build Coastguard Worker                                 first_objs_non_moving_space_);
1038*795d594fSAndroid Build Coastguard Worker   // Update the vector one past the heap usage as it is required for black
1039*795d594fSAndroid Build Coastguard Worker   // allocated objects' post-compact address computation.
1040*795d594fSAndroid Build Coastguard Worker   uint32_t total_bytes;
1041*795d594fSAndroid Build Coastguard Worker   if (vector_len < vector_length_) {
1042*795d594fSAndroid Build Coastguard Worker     vector_len++;
1043*795d594fSAndroid Build Coastguard Worker     total_bytes = 0;
1044*795d594fSAndroid Build Coastguard Worker   } else {
1045*795d594fSAndroid Build Coastguard Worker     // Fetch the value stored in the last element before it gets overwritten by
1046*795d594fSAndroid Build Coastguard Worker     // std::exclusive_scan().
1047*795d594fSAndroid Build Coastguard Worker     total_bytes = chunk_info_vec_[vector_len - 1];
1048*795d594fSAndroid Build Coastguard Worker   }
1049*795d594fSAndroid Build Coastguard Worker   std::exclusive_scan(chunk_info_vec_ + black_dense_idx,
1050*795d594fSAndroid Build Coastguard Worker                       chunk_info_vec_ + vector_len,
1051*795d594fSAndroid Build Coastguard Worker                       chunk_info_vec_ + black_dense_idx,
1052*795d594fSAndroid Build Coastguard Worker                       black_dense_idx * kOffsetChunkSize);
1053*795d594fSAndroid Build Coastguard Worker   total_bytes += chunk_info_vec_[vector_len - 1];
1054*795d594fSAndroid Build Coastguard Worker   post_compact_end_ = AlignUp(moving_space_begin_ + total_bytes, gPageSize);
1055*795d594fSAndroid Build Coastguard Worker   CHECK_EQ(post_compact_end_, moving_space_begin_ + moving_first_objs_count_ * gPageSize)
1056*795d594fSAndroid Build Coastguard Worker       << "moving_first_objs_count_:" << moving_first_objs_count_
1057*795d594fSAndroid Build Coastguard Worker       << " black_dense_idx:" << black_dense_idx << " vector_len:" << vector_len
1058*795d594fSAndroid Build Coastguard Worker       << " total_bytes:" << total_bytes
1059*795d594fSAndroid Build Coastguard Worker       << " black_dense_end:" << reinterpret_cast<void*>(black_dense_end_)
1060*795d594fSAndroid Build Coastguard Worker       << " chunk_info_per_page:" << chunk_info_per_page;
1061*795d594fSAndroid Build Coastguard Worker   black_objs_slide_diff_ = black_allocations_begin_ - post_compact_end_;
1062*795d594fSAndroid Build Coastguard Worker   // We shouldn't be consuming more space after compaction than pre-compaction.
1063*795d594fSAndroid Build Coastguard Worker   CHECK_GE(black_objs_slide_diff_, 0);
1064*795d594fSAndroid Build Coastguard Worker   if (black_objs_slide_diff_ == 0) {
1065*795d594fSAndroid Build Coastguard Worker     black_dense_end_ = black_allocations_begin_;
1066*795d594fSAndroid Build Coastguard Worker     return false;
1067*795d594fSAndroid Build Coastguard Worker   }
1068*795d594fSAndroid Build Coastguard Worker   for (size_t i = vector_len; i < vector_length_; i++) {
1069*795d594fSAndroid Build Coastguard Worker     DCHECK_EQ(chunk_info_vec_[i], 0u);
1070*795d594fSAndroid Build Coastguard Worker   }
1071*795d594fSAndroid Build Coastguard Worker 
1072*795d594fSAndroid Build Coastguard Worker   // How do we handle compaction of heap portion used for allocations after the
1073*795d594fSAndroid Build Coastguard Worker   // marking-pause?
1074*795d594fSAndroid Build Coastguard Worker   // All allocations after the marking-pause are considered black (reachable)
1075*795d594fSAndroid Build Coastguard Worker   // for this GC cycle. However, they need not be allocated contiguously as
1076*795d594fSAndroid Build Coastguard Worker   // different mutators use TLABs. So we will compact the heap till the point
1077*795d594fSAndroid Build Coastguard Worker   // where allocations took place before the marking-pause. And everything after
1078*795d594fSAndroid Build Coastguard Worker   // that will be slid with TLAB holes, and then TLAB info in TLS will be
1079*795d594fSAndroid Build Coastguard Worker   // appropriately updated in the pre-compaction pause.
1080*795d594fSAndroid Build Coastguard Worker   // The chunk-info vector entries for the post marking-pause allocations will be
1081*795d594fSAndroid Build Coastguard Worker   // also updated in the pre-compaction pause.
1082*795d594fSAndroid Build Coastguard Worker 
1083*795d594fSAndroid Build Coastguard Worker   if (!uffd_initialized_) {
1084*795d594fSAndroid Build Coastguard Worker     CreateUserfaultfd(/*post_fork=*/false);
1085*795d594fSAndroid Build Coastguard Worker   }
1086*795d594fSAndroid Build Coastguard Worker   return true;
1087*795d594fSAndroid Build Coastguard Worker }
1088*795d594fSAndroid Build Coastguard Worker 
1089*795d594fSAndroid Build Coastguard Worker class MarkCompact::VerifyRootMarkedVisitor : public SingleRootVisitor {
1090*795d594fSAndroid Build Coastguard Worker  public:
VerifyRootMarkedVisitor(MarkCompact * collector)1091*795d594fSAndroid Build Coastguard Worker   explicit VerifyRootMarkedVisitor(MarkCompact* collector) : collector_(collector) { }
1092*795d594fSAndroid Build Coastguard Worker 
VisitRoot(mirror::Object * root,const RootInfo & info)1093*795d594fSAndroid Build Coastguard Worker   void VisitRoot(mirror::Object* root, const RootInfo& info) override
1094*795d594fSAndroid Build Coastguard Worker       REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
1095*795d594fSAndroid Build Coastguard Worker     CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
1096*795d594fSAndroid Build Coastguard Worker   }
1097*795d594fSAndroid Build Coastguard Worker 
1098*795d594fSAndroid Build Coastguard Worker  private:
1099*795d594fSAndroid Build Coastguard Worker   MarkCompact* const collector_;
1100*795d594fSAndroid Build Coastguard Worker };
1101*795d594fSAndroid Build Coastguard Worker 
ReMarkRoots(Runtime * runtime)1102*795d594fSAndroid Build Coastguard Worker void MarkCompact::ReMarkRoots(Runtime* runtime) {
1103*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1104*795d594fSAndroid Build Coastguard Worker   DCHECK_EQ(thread_running_gc_, Thread::Current());
1105*795d594fSAndroid Build Coastguard Worker   Locks::mutator_lock_->AssertExclusiveHeld(thread_running_gc_);
1106*795d594fSAndroid Build Coastguard Worker   MarkNonThreadRoots(runtime);
1107*795d594fSAndroid Build Coastguard Worker   MarkConcurrentRoots(static_cast<VisitRootFlags>(kVisitRootFlagNewRoots
1108*795d594fSAndroid Build Coastguard Worker                                                   | kVisitRootFlagStopLoggingNewRoots
1109*795d594fSAndroid Build Coastguard Worker                                                   | kVisitRootFlagClearRootLog),
1110*795d594fSAndroid Build Coastguard Worker                       runtime);
1111*795d594fSAndroid Build Coastguard Worker 
1112*795d594fSAndroid Build Coastguard Worker   if (kVerifyRootsMarked) {
1113*795d594fSAndroid Build Coastguard Worker     TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings());
1114*795d594fSAndroid Build Coastguard Worker     VerifyRootMarkedVisitor visitor(this);
1115*795d594fSAndroid Build Coastguard Worker     runtime->VisitRoots(&visitor);
1116*795d594fSAndroid Build Coastguard Worker   }
1117*795d594fSAndroid Build Coastguard Worker }
1118*795d594fSAndroid Build Coastguard Worker 
MarkingPause()1119*795d594fSAndroid Build Coastguard Worker void MarkCompact::MarkingPause() {
1120*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t("(Paused)MarkingPause", GetTimings());
1121*795d594fSAndroid Build Coastguard Worker   Runtime* runtime = Runtime::Current();
1122*795d594fSAndroid Build Coastguard Worker   Locks::mutator_lock_->AssertExclusiveHeld(thread_running_gc_);
1123*795d594fSAndroid Build Coastguard Worker   {
1124*795d594fSAndroid Build Coastguard Worker     // Handle the dirty objects as we are a concurrent GC
1125*795d594fSAndroid Build Coastguard Worker     WriterMutexLock mu(thread_running_gc_, *Locks::heap_bitmap_lock_);
1126*795d594fSAndroid Build Coastguard Worker     {
1127*795d594fSAndroid Build Coastguard Worker       MutexLock mu2(thread_running_gc_, *Locks::runtime_shutdown_lock_);
1128*795d594fSAndroid Build Coastguard Worker       MutexLock mu3(thread_running_gc_, *Locks::thread_list_lock_);
1129*795d594fSAndroid Build Coastguard Worker       std::list<Thread*> thread_list = runtime->GetThreadList()->GetList();
1130*795d594fSAndroid Build Coastguard Worker       for (Thread* thread : thread_list) {
1131*795d594fSAndroid Build Coastguard Worker         thread->VisitRoots(this, static_cast<VisitRootFlags>(0));
1132*795d594fSAndroid Build Coastguard Worker         DCHECK_EQ(thread->GetThreadLocalGcBuffer(), nullptr);
1133*795d594fSAndroid Build Coastguard Worker         // Need to revoke all the thread-local allocation stacks since we will
1134*795d594fSAndroid Build Coastguard Worker         // swap the allocation stacks (below) and don't want anybody to allocate
1135*795d594fSAndroid Build Coastguard Worker         // into the live stack.
1136*795d594fSAndroid Build Coastguard Worker         thread->RevokeThreadLocalAllocationStack();
1137*795d594fSAndroid Build Coastguard Worker         bump_pointer_space_->RevokeThreadLocalBuffers(thread);
1138*795d594fSAndroid Build Coastguard Worker       }
1139*795d594fSAndroid Build Coastguard Worker     }
1140*795d594fSAndroid Build Coastguard Worker     // Fetch only the accumulated objects-allocated count as it is guaranteed to
1141*795d594fSAndroid Build Coastguard Worker     // be up-to-date after the TLAB revocation above.
1142*795d594fSAndroid Build Coastguard Worker     freed_objects_ += bump_pointer_space_->GetAccumulatedObjectsAllocated();
1143*795d594fSAndroid Build Coastguard Worker     // Capture 'end' of moving-space at this point. Every allocation beyond this
1144*795d594fSAndroid Build Coastguard Worker     // point will be considered as black.
1145*795d594fSAndroid Build Coastguard Worker     // Align-up to page boundary so that black allocations happen from next page
1146*795d594fSAndroid Build Coastguard Worker     // onwards. Also, it ensures that 'end' is aligned for card-table's
1147*795d594fSAndroid Build Coastguard Worker     // ClearCardRange().
1148*795d594fSAndroid Build Coastguard Worker     black_allocations_begin_ = bump_pointer_space_->AlignEnd(thread_running_gc_, gPageSize, heap_);
1149*795d594fSAndroid Build Coastguard Worker     DCHECK_ALIGNED_PARAM(black_allocations_begin_, gPageSize);
1150*795d594fSAndroid Build Coastguard Worker 
1151*795d594fSAndroid Build Coastguard Worker     // Re-mark root set. Doesn't include thread-roots as they are already marked
1152*795d594fSAndroid Build Coastguard Worker     // above.
1153*795d594fSAndroid Build Coastguard Worker     ReMarkRoots(runtime);
1154*795d594fSAndroid Build Coastguard Worker     // Scan dirty objects.
1155*795d594fSAndroid Build Coastguard Worker     RecursiveMarkDirtyObjects(/*paused*/ true, accounting::CardTable::kCardDirty);
1156*795d594fSAndroid Build Coastguard Worker     {
1157*795d594fSAndroid Build Coastguard Worker       TimingLogger::ScopedTiming t2("SwapStacks", GetTimings());
1158*795d594fSAndroid Build Coastguard Worker       heap_->SwapStacks();
1159*795d594fSAndroid Build Coastguard Worker       live_stack_freeze_size_ = heap_->GetLiveStack()->Size();
1160*795d594fSAndroid Build Coastguard Worker     }
1161*795d594fSAndroid Build Coastguard Worker   }
1162*795d594fSAndroid Build Coastguard Worker   // TODO: For PreSweepingGcVerification(), find correct strategy to visit/walk
1163*795d594fSAndroid Build Coastguard Worker   // objects in bump-pointer space when we have a mark-bitmap to indicate live
1164*795d594fSAndroid Build Coastguard Worker   // objects. At the same time we also need to be able to visit black allocations,
1165*795d594fSAndroid Build Coastguard Worker   // even though they are not marked in the bitmap. Without both of these we fail
1166*795d594fSAndroid Build Coastguard Worker   // pre-sweeping verification. As well as we leave windows open wherein a
1167*795d594fSAndroid Build Coastguard Worker   // VisitObjects/Walk on the space would either miss some objects or visit
1168*795d594fSAndroid Build Coastguard Worker   // unreachable ones. These windows are when we are switching from shared
1169*795d594fSAndroid Build Coastguard Worker   // mutator-lock to exclusive and vice-versa starting from here till compaction pause.
1170*795d594fSAndroid Build Coastguard Worker   // heap_->PreSweepingGcVerification(this);
1171*795d594fSAndroid Build Coastguard Worker 
1172*795d594fSAndroid Build Coastguard Worker   // Disallow new system weaks to prevent a race which occurs when someone adds
1173*795d594fSAndroid Build Coastguard Worker   // a new system weak before we sweep them. Since this new system weak may not
1174*795d594fSAndroid Build Coastguard Worker   // be marked, the GC may incorrectly sweep it. This also fixes a race where
1175*795d594fSAndroid Build Coastguard Worker   // interning may attempt to return a strong reference to a string that is
1176*795d594fSAndroid Build Coastguard Worker   // about to be swept.
1177*795d594fSAndroid Build Coastguard Worker   runtime->DisallowNewSystemWeaks();
1178*795d594fSAndroid Build Coastguard Worker   // Enable the reference processing slow path, needs to be done with mutators
1179*795d594fSAndroid Build Coastguard Worker   // paused since there is no lock in the GetReferent fast path.
1180*795d594fSAndroid Build Coastguard Worker   heap_->GetReferenceProcessor()->EnableSlowPath();
1181*795d594fSAndroid Build Coastguard Worker   marking_done_ = true;
1182*795d594fSAndroid Build Coastguard Worker }
1183*795d594fSAndroid Build Coastguard Worker 
SweepSystemWeaks(Thread * self,Runtime * runtime,const bool paused)1184*795d594fSAndroid Build Coastguard Worker void MarkCompact::SweepSystemWeaks(Thread* self, Runtime* runtime, const bool paused) {
1185*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(paused ? "(Paused)SweepSystemWeaks" : "SweepSystemWeaks",
1186*795d594fSAndroid Build Coastguard Worker                                GetTimings());
1187*795d594fSAndroid Build Coastguard Worker   ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
1188*795d594fSAndroid Build Coastguard Worker   runtime->SweepSystemWeaks(this);
1189*795d594fSAndroid Build Coastguard Worker }
1190*795d594fSAndroid Build Coastguard Worker 
ProcessReferences(Thread * self)1191*795d594fSAndroid Build Coastguard Worker void MarkCompact::ProcessReferences(Thread* self) {
1192*795d594fSAndroid Build Coastguard Worker   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
1193*795d594fSAndroid Build Coastguard Worker   GetHeap()->GetReferenceProcessor()->ProcessReferences(self, GetTimings());
1194*795d594fSAndroid Build Coastguard Worker }
1195*795d594fSAndroid Build Coastguard Worker 
Sweep(bool swap_bitmaps)1196*795d594fSAndroid Build Coastguard Worker void MarkCompact::Sweep(bool swap_bitmaps) {
1197*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1198*795d594fSAndroid Build Coastguard Worker   // Ensure that nobody inserted objects in the live stack after we swapped the
1199*795d594fSAndroid Build Coastguard Worker   // stacks.
1200*795d594fSAndroid Build Coastguard Worker   CHECK_GE(live_stack_freeze_size_, GetHeap()->GetLiveStack()->Size());
1201*795d594fSAndroid Build Coastguard Worker   {
1202*795d594fSAndroid Build Coastguard Worker     TimingLogger::ScopedTiming t2("MarkAllocStackAsLive", GetTimings());
1203*795d594fSAndroid Build Coastguard Worker     // Mark everything allocated since the last GC as live so that we can sweep
1204*795d594fSAndroid Build Coastguard Worker     // concurrently, knowing that new allocations won't be marked as live.
1205*795d594fSAndroid Build Coastguard Worker     accounting::ObjectStack* live_stack = heap_->GetLiveStack();
1206*795d594fSAndroid Build Coastguard Worker     heap_->MarkAllocStackAsLive(live_stack);
1207*795d594fSAndroid Build Coastguard Worker     live_stack->Reset();
1208*795d594fSAndroid Build Coastguard Worker     DCHECK(mark_stack_->IsEmpty());
1209*795d594fSAndroid Build Coastguard Worker   }
1210*795d594fSAndroid Build Coastguard Worker   for (const auto& space : GetHeap()->GetContinuousSpaces()) {
1211*795d594fSAndroid Build Coastguard Worker     if (space->IsContinuousMemMapAllocSpace() && space != bump_pointer_space_ &&
1212*795d594fSAndroid Build Coastguard Worker         !immune_spaces_.ContainsSpace(space)) {
1213*795d594fSAndroid Build Coastguard Worker       space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace();
1214*795d594fSAndroid Build Coastguard Worker       DCHECK(!alloc_space->IsZygoteSpace());
1215*795d594fSAndroid Build Coastguard Worker       TimingLogger::ScopedTiming split("SweepMallocSpace", GetTimings());
1216*795d594fSAndroid Build Coastguard Worker       RecordFree(alloc_space->Sweep(swap_bitmaps));
1217*795d594fSAndroid Build Coastguard Worker     }
1218*795d594fSAndroid Build Coastguard Worker   }
1219*795d594fSAndroid Build Coastguard Worker   SweepLargeObjects(swap_bitmaps);
1220*795d594fSAndroid Build Coastguard Worker }
1221*795d594fSAndroid Build Coastguard Worker 
SweepLargeObjects(bool swap_bitmaps)1222*795d594fSAndroid Build Coastguard Worker void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
1223*795d594fSAndroid Build Coastguard Worker   space::LargeObjectSpace* los = heap_->GetLargeObjectsSpace();
1224*795d594fSAndroid Build Coastguard Worker   if (los != nullptr) {
1225*795d594fSAndroid Build Coastguard Worker     TimingLogger::ScopedTiming split(__FUNCTION__, GetTimings());
1226*795d594fSAndroid Build Coastguard Worker     RecordFreeLOS(los->Sweep(swap_bitmaps));
1227*795d594fSAndroid Build Coastguard Worker   }
1228*795d594fSAndroid Build Coastguard Worker }
1229*795d594fSAndroid Build Coastguard Worker 
ReclaimPhase()1230*795d594fSAndroid Build Coastguard Worker void MarkCompact::ReclaimPhase() {
1231*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
1232*795d594fSAndroid Build Coastguard Worker   DCHECK(thread_running_gc_ == Thread::Current());
1233*795d594fSAndroid Build Coastguard Worker   Runtime* const runtime = Runtime::Current();
1234*795d594fSAndroid Build Coastguard Worker   // Process the references concurrently.
1235*795d594fSAndroid Build Coastguard Worker   ProcessReferences(thread_running_gc_);
1236*795d594fSAndroid Build Coastguard Worker   // TODO: Try to merge this system-weak sweeping with the one while updating
1237*795d594fSAndroid Build Coastguard Worker   // references during the compaction pause.
1238*795d594fSAndroid Build Coastguard Worker   SweepSystemWeaks(thread_running_gc_, runtime, /*paused*/ false);
1239*795d594fSAndroid Build Coastguard Worker   runtime->AllowNewSystemWeaks();
1240*795d594fSAndroid Build Coastguard Worker   // Clean up class loaders after system weaks are swept since that is how we know if class
1241*795d594fSAndroid Build Coastguard Worker   // unloading occurred.
1242*795d594fSAndroid Build Coastguard Worker   runtime->GetClassLinker()->CleanupClassLoaders();
1243*795d594fSAndroid Build Coastguard Worker   {
1244*795d594fSAndroid Build Coastguard Worker     WriterMutexLock mu(thread_running_gc_, *Locks::heap_bitmap_lock_);
1245*795d594fSAndroid Build Coastguard Worker     // Reclaim unmarked objects.
1246*795d594fSAndroid Build Coastguard Worker     Sweep(false);
1247*795d594fSAndroid Build Coastguard Worker     // Swap the live and mark bitmaps for each space which we modified space. This is an
1248*795d594fSAndroid Build Coastguard Worker     // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
1249*795d594fSAndroid Build Coastguard Worker     // bitmaps.
1250*795d594fSAndroid Build Coastguard Worker     SwapBitmaps();
1251*795d594fSAndroid Build Coastguard Worker     // Unbind the live and mark bitmaps.
1252*795d594fSAndroid Build Coastguard Worker     GetHeap()->UnBindBitmaps();
1253*795d594fSAndroid Build Coastguard Worker   }
1254*795d594fSAndroid Build Coastguard Worker }
1255*795d594fSAndroid Build Coastguard Worker 
1256*795d594fSAndroid Build Coastguard Worker // We want to avoid checking for every reference if it's within the page or
1257*795d594fSAndroid Build Coastguard Worker // not. This can be done if we know where in the page the holder object lies.
1258*795d594fSAndroid Build Coastguard Worker // If it doesn't overlap either boundaries then we can skip the checks.
1259*795d594fSAndroid Build Coastguard Worker template <bool kCheckBegin, bool kCheckEnd>
1260*795d594fSAndroid Build Coastguard Worker class MarkCompact::RefsUpdateVisitor {
1261*795d594fSAndroid Build Coastguard Worker  public:
RefsUpdateVisitor(MarkCompact * collector,mirror::Object * obj,uint8_t * begin,uint8_t * end)1262*795d594fSAndroid Build Coastguard Worker   explicit RefsUpdateVisitor(MarkCompact* collector,
1263*795d594fSAndroid Build Coastguard Worker                              mirror::Object* obj,
1264*795d594fSAndroid Build Coastguard Worker                              uint8_t* begin,
1265*795d594fSAndroid Build Coastguard Worker                              uint8_t* end)
1266*795d594fSAndroid Build Coastguard Worker       : collector_(collector),
1267*795d594fSAndroid Build Coastguard Worker         moving_space_begin_(collector->black_dense_end_),
1268*795d594fSAndroid Build Coastguard Worker         moving_space_end_(collector->moving_space_end_),
1269*795d594fSAndroid Build Coastguard Worker         obj_(obj),
1270*795d594fSAndroid Build Coastguard Worker         begin_(begin),
1271*795d594fSAndroid Build Coastguard Worker         end_(end) {
1272*795d594fSAndroid Build Coastguard Worker     DCHECK(!kCheckBegin || begin != nullptr);
1273*795d594fSAndroid Build Coastguard Worker     DCHECK(!kCheckEnd || end != nullptr);
1274*795d594fSAndroid Build Coastguard Worker   }
1275*795d594fSAndroid Build Coastguard Worker 
operator ()(mirror::Object * old,MemberOffset offset,bool is_static) const1276*795d594fSAndroid Build Coastguard Worker   void operator()([[maybe_unused]] mirror::Object* old,
1277*795d594fSAndroid Build Coastguard Worker                   MemberOffset offset,
1278*795d594fSAndroid Build Coastguard Worker                   [[maybe_unused]] bool is_static) const ALWAYS_INLINE
1279*795d594fSAndroid Build Coastguard Worker       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
1280*795d594fSAndroid Build Coastguard Worker     bool update = true;
1281*795d594fSAndroid Build Coastguard Worker     if (kCheckBegin || kCheckEnd) {
1282*795d594fSAndroid Build Coastguard Worker       uint8_t* ref = reinterpret_cast<uint8_t*>(obj_) + offset.Int32Value();
1283*795d594fSAndroid Build Coastguard Worker       update = (!kCheckBegin || ref >= begin_) && (!kCheckEnd || ref < end_);
1284*795d594fSAndroid Build Coastguard Worker     }
1285*795d594fSAndroid Build Coastguard Worker     if (update) {
1286*795d594fSAndroid Build Coastguard Worker       collector_->UpdateRef(obj_, offset, moving_space_begin_, moving_space_end_);
1287*795d594fSAndroid Build Coastguard Worker     }
1288*795d594fSAndroid Build Coastguard Worker   }
1289*795d594fSAndroid Build Coastguard Worker 
1290*795d594fSAndroid Build Coastguard Worker   // For object arrays we don't need to check boundaries here as it's done in
1291*795d594fSAndroid Build Coastguard Worker   // VisitReferenes().
1292*795d594fSAndroid Build Coastguard Worker   // TODO: Optimize reference updating using SIMD instructions. Object arrays
1293*795d594fSAndroid Build Coastguard Worker   // are perfect as all references are tightly packed.
operator ()(mirror::Object * old,MemberOffset offset,bool is_static,bool is_obj_array) const1294*795d594fSAndroid Build Coastguard Worker   void operator()([[maybe_unused]] mirror::Object* old,
1295*795d594fSAndroid Build Coastguard Worker                   MemberOffset offset,
1296*795d594fSAndroid Build Coastguard Worker                   [[maybe_unused]] bool is_static,
1297*795d594fSAndroid Build Coastguard Worker                   [[maybe_unused]] bool is_obj_array) const ALWAYS_INLINE
1298*795d594fSAndroid Build Coastguard Worker       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
1299*795d594fSAndroid Build Coastguard Worker     collector_->UpdateRef(obj_, offset, moving_space_begin_, moving_space_end_);
1300*795d594fSAndroid Build Coastguard Worker   }
1301*795d594fSAndroid Build Coastguard Worker 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const1302*795d594fSAndroid Build Coastguard Worker   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
1303*795d594fSAndroid Build Coastguard Worker       ALWAYS_INLINE
1304*795d594fSAndroid Build Coastguard Worker       REQUIRES_SHARED(Locks::mutator_lock_) {
1305*795d594fSAndroid Build Coastguard Worker     if (!root->IsNull()) {
1306*795d594fSAndroid Build Coastguard Worker       VisitRoot(root);
1307*795d594fSAndroid Build Coastguard Worker     }
1308*795d594fSAndroid Build Coastguard Worker   }
1309*795d594fSAndroid Build Coastguard Worker 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const1310*795d594fSAndroid Build Coastguard Worker   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
1311*795d594fSAndroid Build Coastguard Worker       ALWAYS_INLINE
1312*795d594fSAndroid Build Coastguard Worker       REQUIRES_SHARED(Locks::mutator_lock_) {
1313*795d594fSAndroid Build Coastguard Worker     collector_->UpdateRoot(root, moving_space_begin_, moving_space_end_);
1314*795d594fSAndroid Build Coastguard Worker   }
1315*795d594fSAndroid Build Coastguard Worker 
1316*795d594fSAndroid Build Coastguard Worker  private:
1317*795d594fSAndroid Build Coastguard Worker   MarkCompact* const collector_;
1318*795d594fSAndroid Build Coastguard Worker   uint8_t* const moving_space_begin_;
1319*795d594fSAndroid Build Coastguard Worker   uint8_t* const moving_space_end_;
1320*795d594fSAndroid Build Coastguard Worker   mirror::Object* const obj_;
1321*795d594fSAndroid Build Coastguard Worker   uint8_t* const begin_;
1322*795d594fSAndroid Build Coastguard Worker   uint8_t* const end_;
1323*795d594fSAndroid Build Coastguard Worker };
1324*795d594fSAndroid Build Coastguard Worker 
IsValidObject(mirror::Object * obj) const1325*795d594fSAndroid Build Coastguard Worker bool MarkCompact::IsValidObject(mirror::Object* obj) const {
1326*795d594fSAndroid Build Coastguard Worker   mirror::Class* klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
1327*795d594fSAndroid Build Coastguard Worker   if (!heap_->GetVerification()->IsValidHeapObjectAddress(klass)) {
1328*795d594fSAndroid Build Coastguard Worker     return false;
1329*795d594fSAndroid Build Coastguard Worker   }
1330*795d594fSAndroid Build Coastguard Worker   return heap_->GetVerification()->IsValidClassUnchecked<kWithFromSpaceBarrier>(
1331*795d594fSAndroid Build Coastguard Worker           obj->GetClass<kVerifyNone, kWithFromSpaceBarrier>());
1332*795d594fSAndroid Build Coastguard Worker }
1333*795d594fSAndroid Build Coastguard Worker 
1334*795d594fSAndroid Build Coastguard Worker template <typename Callback>
VerifyObject(mirror::Object * ref,Callback & callback) const1335*795d594fSAndroid Build Coastguard Worker void MarkCompact::VerifyObject(mirror::Object* ref, Callback& callback) const {
1336*795d594fSAndroid Build Coastguard Worker   if (kIsDebugBuild) {
1337*795d594fSAndroid Build Coastguard Worker     mirror::Class* klass = ref->GetClass<kVerifyNone, kWithFromSpaceBarrier>();
1338*795d594fSAndroid Build Coastguard Worker     mirror::Class* pre_compact_klass = ref->GetClass<kVerifyNone, kWithoutReadBarrier>();
1339*795d594fSAndroid Build Coastguard Worker     mirror::Class* klass_klass = klass->GetClass<kVerifyNone, kWithFromSpaceBarrier>();
1340*795d594fSAndroid Build Coastguard Worker     mirror::Class* klass_klass_klass = klass_klass->GetClass<kVerifyNone, kWithFromSpaceBarrier>();
1341*795d594fSAndroid Build Coastguard Worker     if (HasAddress(pre_compact_klass) &&
1342*795d594fSAndroid Build Coastguard Worker         reinterpret_cast<uint8_t*>(pre_compact_klass) < black_allocations_begin_) {
1343*795d594fSAndroid Build Coastguard Worker       CHECK(moving_space_bitmap_->Test(pre_compact_klass))
1344*795d594fSAndroid Build Coastguard Worker           << "ref=" << ref
1345*795d594fSAndroid Build Coastguard Worker           << " post_compact_end=" << static_cast<void*>(post_compact_end_)
1346*795d594fSAndroid Build Coastguard Worker           << " pre_compact_klass=" << pre_compact_klass
1347*795d594fSAndroid Build Coastguard Worker           << " black_allocations_begin=" << static_cast<void*>(black_allocations_begin_);
1348*795d594fSAndroid Build Coastguard Worker       CHECK(live_words_bitmap_->Test(pre_compact_klass));
1349*795d594fSAndroid Build Coastguard Worker     }
1350*795d594fSAndroid Build Coastguard Worker     if (!IsValidObject(ref)) {
1351*795d594fSAndroid Build Coastguard Worker       std::ostringstream oss;
1352*795d594fSAndroid Build Coastguard Worker       oss << "Invalid object: "
1353*795d594fSAndroid Build Coastguard Worker           << "ref=" << ref
1354*795d594fSAndroid Build Coastguard Worker           << " klass=" << klass
1355*795d594fSAndroid Build Coastguard Worker           << " klass_klass=" << klass_klass
1356*795d594fSAndroid Build Coastguard Worker           << " klass_klass_klass=" << klass_klass_klass
1357*795d594fSAndroid Build Coastguard Worker           << " pre_compact_klass=" << pre_compact_klass
1358*795d594fSAndroid Build Coastguard Worker           << " from_space_begin=" << static_cast<void*>(from_space_begin_)
1359*795d594fSAndroid Build Coastguard Worker           << " pre_compact_begin=" << static_cast<void*>(bump_pointer_space_->Begin())
1360*795d594fSAndroid Build Coastguard Worker           << " post_compact_end=" << static_cast<void*>(post_compact_end_)
1361*795d594fSAndroid Build Coastguard Worker           << " black_allocations_begin=" << static_cast<void*>(black_allocations_begin_);
1362*795d594fSAndroid Build Coastguard Worker 
1363*795d594fSAndroid Build Coastguard Worker       // Call callback before dumping larger data like RAM and space dumps.
1364*795d594fSAndroid Build Coastguard Worker       callback(oss);
1365*795d594fSAndroid Build Coastguard Worker 
1366*795d594fSAndroid Build Coastguard Worker       oss << " \nobject="
1367*795d594fSAndroid Build Coastguard Worker           << heap_->GetVerification()->DumpRAMAroundAddress(reinterpret_cast<uintptr_t>(ref), 128)
1368*795d594fSAndroid Build Coastguard Worker           << " \nklass(from)="
1369*795d594fSAndroid Build Coastguard Worker           << heap_->GetVerification()->DumpRAMAroundAddress(reinterpret_cast<uintptr_t>(klass), 128)
1370*795d594fSAndroid Build Coastguard Worker           << "spaces:\n";
1371*795d594fSAndroid Build Coastguard Worker       heap_->DumpSpaces(oss);
1372*795d594fSAndroid Build Coastguard Worker       LOG(FATAL) << oss.str();
1373*795d594fSAndroid Build Coastguard Worker     }
1374*795d594fSAndroid Build Coastguard Worker   }
1375*795d594fSAndroid Build Coastguard Worker }
1376*795d594fSAndroid Build Coastguard Worker 
CompactPage(mirror::Object * obj,uint32_t offset,uint8_t * addr,bool needs_memset_zero)1377*795d594fSAndroid Build Coastguard Worker void MarkCompact::CompactPage(mirror::Object* obj,
1378*795d594fSAndroid Build Coastguard Worker                               uint32_t offset,
1379*795d594fSAndroid Build Coastguard Worker                               uint8_t* addr,
1380*795d594fSAndroid Build Coastguard Worker                               bool needs_memset_zero) {
1381*795d594fSAndroid Build Coastguard Worker   DCHECK(moving_space_bitmap_->Test(obj)
1382*795d594fSAndroid Build Coastguard Worker          && live_words_bitmap_->Test(obj));
1383*795d594fSAndroid Build Coastguard Worker   DCHECK(live_words_bitmap_->Test(offset)) << "obj=" << obj
1384*795d594fSAndroid Build Coastguard Worker                                            << " offset=" << offset
1385*795d594fSAndroid Build Coastguard Worker                                            << " addr=" << static_cast<void*>(addr)
1386*795d594fSAndroid Build Coastguard Worker                                            << " black_allocs_begin="
1387*795d594fSAndroid Build Coastguard Worker                                            << static_cast<void*>(black_allocations_begin_)
1388*795d594fSAndroid Build Coastguard Worker                                            << " post_compact_addr="
1389*795d594fSAndroid Build Coastguard Worker                                            << static_cast<void*>(post_compact_end_);
1390*795d594fSAndroid Build Coastguard Worker   uint8_t* const start_addr = addr;
1391*795d594fSAndroid Build Coastguard Worker   // How many distinct live-strides do we have.
1392*795d594fSAndroid Build Coastguard Worker   size_t stride_count = 0;
1393*795d594fSAndroid Build Coastguard Worker   uint8_t* last_stride = addr;
1394*795d594fSAndroid Build Coastguard Worker   uint32_t last_stride_begin = 0;
1395*795d594fSAndroid Build Coastguard Worker   auto verify_obj_callback = [&] (std::ostream& os) {
1396*795d594fSAndroid Build Coastguard Worker                                os << " stride_count=" << stride_count
1397*795d594fSAndroid Build Coastguard Worker                                   << " last_stride=" << static_cast<void*>(last_stride)
1398*795d594fSAndroid Build Coastguard Worker                                   << " offset=" << offset
1399*795d594fSAndroid Build Coastguard Worker                                   << " start_addr=" << static_cast<void*>(start_addr);
1400*795d594fSAndroid Build Coastguard Worker                              };
1401*795d594fSAndroid Build Coastguard Worker   obj = GetFromSpaceAddr(obj);
1402*795d594fSAndroid Build Coastguard Worker   live_words_bitmap_->VisitLiveStrides(
1403*795d594fSAndroid Build Coastguard Worker       offset,
1404*795d594fSAndroid Build Coastguard Worker       black_allocations_begin_,
1405*795d594fSAndroid Build Coastguard Worker       gPageSize,
1406*795d594fSAndroid Build Coastguard Worker       [&addr, &last_stride, &stride_count, &last_stride_begin, verify_obj_callback, this](
1407*795d594fSAndroid Build Coastguard Worker           uint32_t stride_begin, size_t stride_size, [[maybe_unused]] bool is_last)
1408*795d594fSAndroid Build Coastguard Worker           REQUIRES_SHARED(Locks::mutator_lock_) {
1409*795d594fSAndroid Build Coastguard Worker             const size_t stride_in_bytes = stride_size * kAlignment;
1410*795d594fSAndroid Build Coastguard Worker             DCHECK_LE(stride_in_bytes, gPageSize);
1411*795d594fSAndroid Build Coastguard Worker             last_stride_begin = stride_begin;
1412*795d594fSAndroid Build Coastguard Worker             DCHECK(IsAligned<kAlignment>(addr));
1413*795d594fSAndroid Build Coastguard Worker             memcpy(addr, from_space_begin_ + stride_begin * kAlignment, stride_in_bytes);
1414*795d594fSAndroid Build Coastguard Worker             if (kIsDebugBuild) {
1415*795d594fSAndroid Build Coastguard Worker               uint8_t* space_begin = bump_pointer_space_->Begin();
1416*795d594fSAndroid Build Coastguard Worker               // We can interpret the first word of the stride as an
1417*795d594fSAndroid Build Coastguard Worker               // obj only from second stride onwards, as the first
1418*795d594fSAndroid Build Coastguard Worker               // stride's first-object may have started on previous
1419*795d594fSAndroid Build Coastguard Worker               // page. The only exception is the first page of the
1420*795d594fSAndroid Build Coastguard Worker               // moving space.
1421*795d594fSAndroid Build Coastguard Worker               if (stride_count > 0 || stride_begin * kAlignment < gPageSize) {
1422*795d594fSAndroid Build Coastguard Worker                 mirror::Object* o =
1423*795d594fSAndroid Build Coastguard Worker                     reinterpret_cast<mirror::Object*>(space_begin + stride_begin * kAlignment);
1424*795d594fSAndroid Build Coastguard Worker                 CHECK(live_words_bitmap_->Test(o)) << "ref=" << o;
1425*795d594fSAndroid Build Coastguard Worker                 CHECK(moving_space_bitmap_->Test(o))
1426*795d594fSAndroid Build Coastguard Worker                     << "ref=" << o << " bitmap: " << moving_space_bitmap_->DumpMemAround(o);
1427*795d594fSAndroid Build Coastguard Worker                 VerifyObject(reinterpret_cast<mirror::Object*>(addr), verify_obj_callback);
1428*795d594fSAndroid Build Coastguard Worker               }
1429*795d594fSAndroid Build Coastguard Worker             }
1430*795d594fSAndroid Build Coastguard Worker             last_stride = addr;
1431*795d594fSAndroid Build Coastguard Worker             addr += stride_in_bytes;
1432*795d594fSAndroid Build Coastguard Worker             stride_count++;
1433*795d594fSAndroid Build Coastguard Worker           });
1434*795d594fSAndroid Build Coastguard Worker   DCHECK_LT(last_stride, start_addr + gPageSize);
1435*795d594fSAndroid Build Coastguard Worker   DCHECK_GT(stride_count, 0u);
1436*795d594fSAndroid Build Coastguard Worker   size_t obj_size = 0;
1437*795d594fSAndroid Build Coastguard Worker   uint32_t offset_within_obj = offset * kAlignment
1438*795d594fSAndroid Build Coastguard Worker                                - (reinterpret_cast<uint8_t*>(obj) - from_space_begin_);
1439*795d594fSAndroid Build Coastguard Worker   // First object
1440*795d594fSAndroid Build Coastguard Worker   if (offset_within_obj > 0) {
1441*795d594fSAndroid Build Coastguard Worker     mirror::Object* to_ref = reinterpret_cast<mirror::Object*>(start_addr - offset_within_obj);
1442*795d594fSAndroid Build Coastguard Worker     if (stride_count > 1) {
1443*795d594fSAndroid Build Coastguard Worker       RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/false> visitor(this,
1444*795d594fSAndroid Build Coastguard Worker                                                                          to_ref,
1445*795d594fSAndroid Build Coastguard Worker                                                                          start_addr,
1446*795d594fSAndroid Build Coastguard Worker                                                                          nullptr);
1447*795d594fSAndroid Build Coastguard Worker       obj_size = obj->VisitRefsForCompaction</*kFetchObjSize*/true, /*kVisitNativeRoots*/false>(
1448*795d594fSAndroid Build Coastguard Worker               visitor, MemberOffset(offset_within_obj), MemberOffset(-1));
1449*795d594fSAndroid Build Coastguard Worker     } else {
1450*795d594fSAndroid Build Coastguard Worker       RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/true> visitor(this,
1451*795d594fSAndroid Build Coastguard Worker                                                                         to_ref,
1452*795d594fSAndroid Build Coastguard Worker                                                                         start_addr,
1453*795d594fSAndroid Build Coastguard Worker                                                                         start_addr + gPageSize);
1454*795d594fSAndroid Build Coastguard Worker       obj_size = obj->VisitRefsForCompaction</*kFetchObjSize*/true, /*kVisitNativeRoots*/false>(
1455*795d594fSAndroid Build Coastguard Worker               visitor, MemberOffset(offset_within_obj), MemberOffset(offset_within_obj
1456*795d594fSAndroid Build Coastguard Worker                                                                      + gPageSize));
1457*795d594fSAndroid Build Coastguard Worker     }
1458*795d594fSAndroid Build Coastguard Worker     obj_size = RoundUp(obj_size, kAlignment);
1459*795d594fSAndroid Build Coastguard Worker     DCHECK_GT(obj_size, offset_within_obj)
1460*795d594fSAndroid Build Coastguard Worker         << "obj:" << obj << " class:" << obj->GetClass<kDefaultVerifyFlags, kWithFromSpaceBarrier>()
1461*795d594fSAndroid Build Coastguard Worker         << " to_addr:" << to_ref
1462*795d594fSAndroid Build Coastguard Worker         << " black-allocation-begin:" << reinterpret_cast<void*>(black_allocations_begin_)
1463*795d594fSAndroid Build Coastguard Worker         << " post-compact-end:" << reinterpret_cast<void*>(post_compact_end_)
1464*795d594fSAndroid Build Coastguard Worker         << " offset:" << offset * kAlignment << " class-after-obj-iter:"
1465*795d594fSAndroid Build Coastguard Worker         << (class_after_obj_iter_ != class_after_obj_map_.rend() ?
1466*795d594fSAndroid Build Coastguard Worker                 class_after_obj_iter_->first.AsMirrorPtr() :
1467*795d594fSAndroid Build Coastguard Worker                 nullptr)
1468*795d594fSAndroid Build Coastguard Worker         << " last-reclaimed-page:" << reinterpret_cast<void*>(last_reclaimed_page_)
1469*795d594fSAndroid Build Coastguard Worker         << " last-checked-reclaim-page-idx:" << last_checked_reclaim_page_idx_
1470*795d594fSAndroid Build Coastguard Worker         << " offset-of-last-idx:"
1471*795d594fSAndroid Build Coastguard Worker         << pre_compact_offset_moving_space_[last_checked_reclaim_page_idx_] * kAlignment
1472*795d594fSAndroid Build Coastguard Worker         << " first-obj-of-last-idx:"
1473*795d594fSAndroid Build Coastguard Worker         << first_objs_moving_space_[last_checked_reclaim_page_idx_].AsMirrorPtr();
1474*795d594fSAndroid Build Coastguard Worker 
1475*795d594fSAndroid Build Coastguard Worker     obj_size -= offset_within_obj;
1476*795d594fSAndroid Build Coastguard Worker     // If there is only one stride, then adjust last_stride_begin to the
1477*795d594fSAndroid Build Coastguard Worker     // end of the first object.
1478*795d594fSAndroid Build Coastguard Worker     if (stride_count == 1) {
1479*795d594fSAndroid Build Coastguard Worker       last_stride_begin += obj_size / kAlignment;
1480*795d594fSAndroid Build Coastguard Worker     }
1481*795d594fSAndroid Build Coastguard Worker   }
1482*795d594fSAndroid Build Coastguard Worker 
1483*795d594fSAndroid Build Coastguard Worker   // Except for the last page being compacted, the pages will have addr ==
1484*795d594fSAndroid Build Coastguard Worker   // start_addr + gPageSize.
1485*795d594fSAndroid Build Coastguard Worker   uint8_t* const end_addr = addr;
1486*795d594fSAndroid Build Coastguard Worker   addr = start_addr;
1487*795d594fSAndroid Build Coastguard Worker   size_t bytes_done = obj_size;
1488*795d594fSAndroid Build Coastguard Worker   // All strides except the last one can be updated without any boundary
1489*795d594fSAndroid Build Coastguard Worker   // checks.
1490*795d594fSAndroid Build Coastguard Worker   DCHECK_LE(addr, last_stride);
1491*795d594fSAndroid Build Coastguard Worker   size_t bytes_to_visit = last_stride - addr;
1492*795d594fSAndroid Build Coastguard Worker   DCHECK_LE(bytes_to_visit, gPageSize);
1493*795d594fSAndroid Build Coastguard Worker   while (bytes_to_visit > bytes_done) {
1494*795d594fSAndroid Build Coastguard Worker     mirror::Object* ref = reinterpret_cast<mirror::Object*>(addr + bytes_done);
1495*795d594fSAndroid Build Coastguard Worker     VerifyObject(ref, verify_obj_callback);
1496*795d594fSAndroid Build Coastguard Worker     RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/false>
1497*795d594fSAndroid Build Coastguard Worker             visitor(this, ref, nullptr, nullptr);
1498*795d594fSAndroid Build Coastguard Worker     obj_size = ref->VisitRefsForCompaction(visitor, MemberOffset(0), MemberOffset(-1));
1499*795d594fSAndroid Build Coastguard Worker     obj_size = RoundUp(obj_size, kAlignment);
1500*795d594fSAndroid Build Coastguard Worker     bytes_done += obj_size;
1501*795d594fSAndroid Build Coastguard Worker   }
1502*795d594fSAndroid Build Coastguard Worker   // Last stride may have multiple objects in it and we don't know where the
1503*795d594fSAndroid Build Coastguard Worker   // last object which crosses the page boundary starts, therefore check
1504*795d594fSAndroid Build Coastguard Worker   // page-end in all of these objects. Also, we need to call
1505*795d594fSAndroid Build Coastguard Worker   // VisitRefsForCompaction() with from-space object as we fetch object size,
1506*795d594fSAndroid Build Coastguard Worker   // which in case of klass requires 'class_size_'.
1507*795d594fSAndroid Build Coastguard Worker   uint8_t* from_addr = from_space_begin_ + last_stride_begin * kAlignment;
1508*795d594fSAndroid Build Coastguard Worker   bytes_to_visit = end_addr - addr;
1509*795d594fSAndroid Build Coastguard Worker   DCHECK_LE(bytes_to_visit, gPageSize);
1510*795d594fSAndroid Build Coastguard Worker   while (bytes_to_visit > bytes_done) {
1511*795d594fSAndroid Build Coastguard Worker     mirror::Object* ref = reinterpret_cast<mirror::Object*>(addr + bytes_done);
1512*795d594fSAndroid Build Coastguard Worker     obj = reinterpret_cast<mirror::Object*>(from_addr);
1513*795d594fSAndroid Build Coastguard Worker     VerifyObject(ref, verify_obj_callback);
1514*795d594fSAndroid Build Coastguard Worker     RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/true>
1515*795d594fSAndroid Build Coastguard Worker             visitor(this, ref, nullptr, start_addr + gPageSize);
1516*795d594fSAndroid Build Coastguard Worker     obj_size = obj->VisitRefsForCompaction(visitor,
1517*795d594fSAndroid Build Coastguard Worker                                            MemberOffset(0),
1518*795d594fSAndroid Build Coastguard Worker                                            MemberOffset(end_addr - (addr + bytes_done)));
1519*795d594fSAndroid Build Coastguard Worker     obj_size = RoundUp(obj_size, kAlignment);
1520*795d594fSAndroid Build Coastguard Worker     DCHECK_GT(obj_size, 0u)
1521*795d594fSAndroid Build Coastguard Worker         << "from_addr:" << obj
1522*795d594fSAndroid Build Coastguard Worker         << " from-space-class:" << obj->GetClass<kDefaultVerifyFlags, kWithFromSpaceBarrier>()
1523*795d594fSAndroid Build Coastguard Worker         << " to_addr:" << ref
1524*795d594fSAndroid Build Coastguard Worker         << " black-allocation-begin:" << reinterpret_cast<void*>(black_allocations_begin_)
1525*795d594fSAndroid Build Coastguard Worker         << " post-compact-end:" << reinterpret_cast<void*>(post_compact_end_)
1526*795d594fSAndroid Build Coastguard Worker         << " offset:" << offset * kAlignment << " bytes_done:" << bytes_done
1527*795d594fSAndroid Build Coastguard Worker         << " class-after-obj-iter:"
1528*795d594fSAndroid Build Coastguard Worker         << (class_after_obj_iter_ != class_after_obj_map_.rend() ?
1529*795d594fSAndroid Build Coastguard Worker                 class_after_obj_iter_->first.AsMirrorPtr() :
1530*795d594fSAndroid Build Coastguard Worker                 nullptr)
1531*795d594fSAndroid Build Coastguard Worker         << " last-reclaimed-page:" << reinterpret_cast<void*>(last_reclaimed_page_)
1532*795d594fSAndroid Build Coastguard Worker         << " last-checked-reclaim-page-idx:" << last_checked_reclaim_page_idx_
1533*795d594fSAndroid Build Coastguard Worker         << " offset-of-last-idx:"
1534*795d594fSAndroid Build Coastguard Worker         << pre_compact_offset_moving_space_[last_checked_reclaim_page_idx_] * kAlignment
1535*795d594fSAndroid Build Coastguard Worker         << " first-obj-of-last-idx:"
1536*795d594fSAndroid Build Coastguard Worker         << first_objs_moving_space_[last_checked_reclaim_page_idx_].AsMirrorPtr();
1537*795d594fSAndroid Build Coastguard Worker 
1538*795d594fSAndroid Build Coastguard Worker     from_addr += obj_size;
1539*795d594fSAndroid Build Coastguard Worker     bytes_done += obj_size;
1540*795d594fSAndroid Build Coastguard Worker   }
1541*795d594fSAndroid Build Coastguard Worker   // The last page that we compact may have some bytes left untouched in the
1542*795d594fSAndroid Build Coastguard Worker   // end, we should zero them as the kernel copies at page granularity.
1543*795d594fSAndroid Build Coastguard Worker   if (needs_memset_zero && UNLIKELY(bytes_done < gPageSize)) {
1544*795d594fSAndroid Build Coastguard Worker     std::memset(addr + bytes_done, 0x0, gPageSize - bytes_done);
1545*795d594fSAndroid Build Coastguard Worker   }
1546*795d594fSAndroid Build Coastguard Worker }
1547*795d594fSAndroid Build Coastguard Worker 
1548*795d594fSAndroid Build Coastguard Worker // We store the starting point (pre_compact_page - first_obj) and first-chunk's
1549*795d594fSAndroid Build Coastguard Worker // size. If more TLAB(s) started in this page, then those chunks are identified
1550*795d594fSAndroid Build Coastguard Worker // using mark bitmap. All this info is prepared in UpdateMovingSpaceBlackAllocations().
1551*795d594fSAndroid Build Coastguard Worker // If we find a set bit in the bitmap, then we copy the remaining page and then
1552*795d594fSAndroid Build Coastguard Worker // use the bitmap to visit each object for updating references.
SlideBlackPage(mirror::Object * first_obj,mirror::Object * next_page_first_obj,uint32_t first_chunk_size,uint8_t * const pre_compact_page,uint8_t * dest,bool needs_memset_zero)1553*795d594fSAndroid Build Coastguard Worker void MarkCompact::SlideBlackPage(mirror::Object* first_obj,
1554*795d594fSAndroid Build Coastguard Worker                                  mirror::Object* next_page_first_obj,
1555*795d594fSAndroid Build Coastguard Worker                                  uint32_t first_chunk_size,
1556*795d594fSAndroid Build Coastguard Worker                                  uint8_t* const pre_compact_page,
1557*795d594fSAndroid Build Coastguard Worker                                  uint8_t* dest,
1558*795d594fSAndroid Build Coastguard Worker                                  bool needs_memset_zero) {
1559*795d594fSAndroid Build Coastguard Worker   DCHECK(IsAlignedParam(pre_compact_page, gPageSize));
1560*795d594fSAndroid Build Coastguard Worker   size_t bytes_copied;
1561*795d594fSAndroid Build Coastguard Worker   uint8_t* src_addr = reinterpret_cast<uint8_t*>(GetFromSpaceAddr(first_obj));
1562*795d594fSAndroid Build Coastguard Worker   uint8_t* pre_compact_addr = reinterpret_cast<uint8_t*>(first_obj);
1563*795d594fSAndroid Build Coastguard Worker   uint8_t* const pre_compact_page_end = pre_compact_page + gPageSize;
1564*795d594fSAndroid Build Coastguard Worker   uint8_t* const dest_page_end = dest + gPageSize;
1565*795d594fSAndroid Build Coastguard Worker 
1566*795d594fSAndroid Build Coastguard Worker   auto verify_obj_callback = [&] (std::ostream& os) {
1567*795d594fSAndroid Build Coastguard Worker                                os << " first_obj=" << first_obj
1568*795d594fSAndroid Build Coastguard Worker                                   << " next_page_first_obj=" << next_page_first_obj
1569*795d594fSAndroid Build Coastguard Worker                                   << " first_chunk_sie=" << first_chunk_size
1570*795d594fSAndroid Build Coastguard Worker                                   << " dest=" << static_cast<void*>(dest)
1571*795d594fSAndroid Build Coastguard Worker                                   << " pre_compact_page="
1572*795d594fSAndroid Build Coastguard Worker                                   << static_cast<void* const>(pre_compact_page);
1573*795d594fSAndroid Build Coastguard Worker                              };
1574*795d594fSAndroid Build Coastguard Worker   // We have empty portion at the beginning of the page. Zero it.
1575*795d594fSAndroid Build Coastguard Worker   if (pre_compact_addr > pre_compact_page) {
1576*795d594fSAndroid Build Coastguard Worker     bytes_copied = pre_compact_addr - pre_compact_page;
1577*795d594fSAndroid Build Coastguard Worker     DCHECK_LT(bytes_copied, gPageSize);
1578*795d594fSAndroid Build Coastguard Worker     if (needs_memset_zero) {
1579*795d594fSAndroid Build Coastguard Worker       std::memset(dest, 0x0, bytes_copied);
1580*795d594fSAndroid Build Coastguard Worker     }
1581*795d594fSAndroid Build Coastguard Worker     dest += bytes_copied;
1582*795d594fSAndroid Build Coastguard Worker   } else {
1583*795d594fSAndroid Build Coastguard Worker     bytes_copied = 0;
1584*795d594fSAndroid Build Coastguard Worker     size_t offset = pre_compact_page - pre_compact_addr;
1585*795d594fSAndroid Build Coastguard Worker     pre_compact_addr = pre_compact_page;
1586*795d594fSAndroid Build Coastguard Worker     src_addr += offset;
1587*795d594fSAndroid Build Coastguard Worker     DCHECK(IsAlignedParam(src_addr, gPageSize));
1588*795d594fSAndroid Build Coastguard Worker   }
1589*795d594fSAndroid Build Coastguard Worker   // Copy the first chunk of live words
1590*795d594fSAndroid Build Coastguard Worker   std::memcpy(dest, src_addr, first_chunk_size);
1591*795d594fSAndroid Build Coastguard Worker   // Update references in the first chunk. Use object size to find next object.
1592*795d594fSAndroid Build Coastguard Worker   {
1593*795d594fSAndroid Build Coastguard Worker     size_t bytes_to_visit = first_chunk_size;
1594*795d594fSAndroid Build Coastguard Worker     size_t obj_size;
1595*795d594fSAndroid Build Coastguard Worker     // The first object started in some previous page. So we need to check the
1596*795d594fSAndroid Build Coastguard Worker     // beginning.
1597*795d594fSAndroid Build Coastguard Worker     DCHECK_LE(reinterpret_cast<uint8_t*>(first_obj), pre_compact_addr);
1598*795d594fSAndroid Build Coastguard Worker     size_t offset = pre_compact_addr - reinterpret_cast<uint8_t*>(first_obj);
1599*795d594fSAndroid Build Coastguard Worker     if (bytes_copied == 0 && offset > 0) {
1600*795d594fSAndroid Build Coastguard Worker       mirror::Object* to_obj = reinterpret_cast<mirror::Object*>(dest - offset);
1601*795d594fSAndroid Build Coastguard Worker       mirror::Object* from_obj = reinterpret_cast<mirror::Object*>(src_addr - offset);
1602*795d594fSAndroid Build Coastguard Worker       // If the next page's first-obj is in this page or nullptr, then we don't
1603*795d594fSAndroid Build Coastguard Worker       // need to check end boundary
1604*795d594fSAndroid Build Coastguard Worker       if (next_page_first_obj == nullptr
1605*795d594fSAndroid Build Coastguard Worker           || (first_obj != next_page_first_obj
1606*795d594fSAndroid Build Coastguard Worker               && reinterpret_cast<uint8_t*>(next_page_first_obj) <= pre_compact_page_end)) {
1607*795d594fSAndroid Build Coastguard Worker         RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/false> visitor(this,
1608*795d594fSAndroid Build Coastguard Worker                                                                            to_obj,
1609*795d594fSAndroid Build Coastguard Worker                                                                            dest,
1610*795d594fSAndroid Build Coastguard Worker                                                                            nullptr);
1611*795d594fSAndroid Build Coastguard Worker         obj_size = from_obj->VisitRefsForCompaction<
1612*795d594fSAndroid Build Coastguard Worker                 /*kFetchObjSize*/true, /*kVisitNativeRoots*/false>(visitor,
1613*795d594fSAndroid Build Coastguard Worker                                                                    MemberOffset(offset),
1614*795d594fSAndroid Build Coastguard Worker                                                                    MemberOffset(-1));
1615*795d594fSAndroid Build Coastguard Worker       } else {
1616*795d594fSAndroid Build Coastguard Worker         RefsUpdateVisitor</*kCheckBegin*/true, /*kCheckEnd*/true> visitor(this,
1617*795d594fSAndroid Build Coastguard Worker                                                                           to_obj,
1618*795d594fSAndroid Build Coastguard Worker                                                                           dest,
1619*795d594fSAndroid Build Coastguard Worker                                                                           dest_page_end);
1620*795d594fSAndroid Build Coastguard Worker         obj_size = from_obj->VisitRefsForCompaction<
1621*795d594fSAndroid Build Coastguard Worker                 /*kFetchObjSize*/true, /*kVisitNativeRoots*/false>(visitor,
1622*795d594fSAndroid Build Coastguard Worker                                                                    MemberOffset(offset),
1623*795d594fSAndroid Build Coastguard Worker                                                                    MemberOffset(offset
1624*795d594fSAndroid Build Coastguard Worker                                                                                 + gPageSize));
1625*795d594fSAndroid Build Coastguard Worker         if (first_obj == next_page_first_obj) {
1626*795d594fSAndroid Build Coastguard Worker           // First object is the only object on this page. So there's nothing else left to do.
1627*795d594fSAndroid Build Coastguard Worker           return;
1628*795d594fSAndroid Build Coastguard Worker         }
1629*795d594fSAndroid Build Coastguard Worker       }
1630*795d594fSAndroid Build Coastguard Worker       obj_size = RoundUp(obj_size, kAlignment);
1631*795d594fSAndroid Build Coastguard Worker       obj_size -= offset;
1632*795d594fSAndroid Build Coastguard Worker       dest += obj_size;
1633*795d594fSAndroid Build Coastguard Worker       bytes_to_visit -= obj_size;
1634*795d594fSAndroid Build Coastguard Worker     }
1635*795d594fSAndroid Build Coastguard Worker     bytes_copied += first_chunk_size;
1636*795d594fSAndroid Build Coastguard Worker     // If the last object in this page is next_page_first_obj, then we need to check end boundary
1637*795d594fSAndroid Build Coastguard Worker     bool check_last_obj = false;
1638*795d594fSAndroid Build Coastguard Worker     if (next_page_first_obj != nullptr
1639*795d594fSAndroid Build Coastguard Worker         && reinterpret_cast<uint8_t*>(next_page_first_obj) < pre_compact_page_end
1640*795d594fSAndroid Build Coastguard Worker         && bytes_copied == gPageSize) {
1641*795d594fSAndroid Build Coastguard Worker       size_t diff = pre_compact_page_end - reinterpret_cast<uint8_t*>(next_page_first_obj);
1642*795d594fSAndroid Build Coastguard Worker       DCHECK_LE(diff, gPageSize);
1643*795d594fSAndroid Build Coastguard Worker       DCHECK_LE(diff, bytes_to_visit);
1644*795d594fSAndroid Build Coastguard Worker       bytes_to_visit -= diff;
1645*795d594fSAndroid Build Coastguard Worker       check_last_obj = true;
1646*795d594fSAndroid Build Coastguard Worker     }
1647*795d594fSAndroid Build Coastguard Worker     while (bytes_to_visit > 0) {
1648*795d594fSAndroid Build Coastguard Worker       mirror::Object* dest_obj = reinterpret_cast<mirror::Object*>(dest);
1649*795d594fSAndroid Build Coastguard Worker       VerifyObject(dest_obj, verify_obj_callback);
1650*795d594fSAndroid Build Coastguard Worker       RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/false> visitor(this,
1651*795d594fSAndroid Build Coastguard Worker                                                                           dest_obj,
1652*795d594fSAndroid Build Coastguard Worker                                                                           nullptr,
1653*795d594fSAndroid Build Coastguard Worker                                                                           nullptr);
1654*795d594fSAndroid Build Coastguard Worker       obj_size = dest_obj->VisitRefsForCompaction(visitor, MemberOffset(0), MemberOffset(-1));
1655*795d594fSAndroid Build Coastguard Worker       obj_size = RoundUp(obj_size, kAlignment);
1656*795d594fSAndroid Build Coastguard Worker       bytes_to_visit -= obj_size;
1657*795d594fSAndroid Build Coastguard Worker       dest += obj_size;
1658*795d594fSAndroid Build Coastguard Worker     }
1659*795d594fSAndroid Build Coastguard Worker     DCHECK_EQ(bytes_to_visit, 0u);
1660*795d594fSAndroid Build Coastguard Worker     if (check_last_obj) {
1661*795d594fSAndroid Build Coastguard Worker       mirror::Object* dest_obj = reinterpret_cast<mirror::Object*>(dest);
1662*795d594fSAndroid Build Coastguard Worker       VerifyObject(dest_obj, verify_obj_callback);
1663*795d594fSAndroid Build Coastguard Worker       RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/true> visitor(this,
1664*795d594fSAndroid Build Coastguard Worker                                                                          dest_obj,
1665*795d594fSAndroid Build Coastguard Worker                                                                          nullptr,
1666*795d594fSAndroid Build Coastguard Worker                                                                          dest_page_end);
1667*795d594fSAndroid Build Coastguard Worker       mirror::Object* obj = GetFromSpaceAddr(next_page_first_obj);
1668*795d594fSAndroid Build Coastguard Worker       obj->VisitRefsForCompaction</*kFetchObjSize*/false>(visitor,
1669*795d594fSAndroid Build Coastguard Worker                                                           MemberOffset(0),
1670*795d594fSAndroid Build Coastguard Worker                                                           MemberOffset(dest_page_end - dest));
1671*795d594fSAndroid Build Coastguard Worker       return;
1672*795d594fSAndroid Build Coastguard Worker     }
1673*795d594fSAndroid Build Coastguard Worker   }
1674*795d594fSAndroid Build Coastguard Worker 
1675*795d594fSAndroid Build Coastguard Worker   // Probably a TLAB finished on this page and/or a new TLAB started as well.
1676*795d594fSAndroid Build Coastguard Worker   if (bytes_copied < gPageSize) {
1677*795d594fSAndroid Build Coastguard Worker     src_addr += first_chunk_size;
1678*795d594fSAndroid Build Coastguard Worker     pre_compact_addr += first_chunk_size;
1679*795d594fSAndroid Build Coastguard Worker     // Use mark-bitmap to identify where objects are. First call
1680*795d594fSAndroid Build Coastguard Worker     // VisitMarkedRange for only the first marked bit. If found, zero all bytes
1681*795d594fSAndroid Build Coastguard Worker     // until that object and then call memcpy on the rest of the page.
1682*795d594fSAndroid Build Coastguard Worker     // Then call VisitMarkedRange for all marked bits *after* the one found in
1683*795d594fSAndroid Build Coastguard Worker     // this invocation. This time to visit references.
1684*795d594fSAndroid Build Coastguard Worker     uintptr_t start_visit = reinterpret_cast<uintptr_t>(pre_compact_addr);
1685*795d594fSAndroid Build Coastguard Worker     uintptr_t page_end = reinterpret_cast<uintptr_t>(pre_compact_page_end);
1686*795d594fSAndroid Build Coastguard Worker     mirror::Object* found_obj = nullptr;
1687*795d594fSAndroid Build Coastguard Worker     moving_space_bitmap_->VisitMarkedRange</*kVisitOnce*/true>(start_visit,
1688*795d594fSAndroid Build Coastguard Worker                                                                 page_end,
1689*795d594fSAndroid Build Coastguard Worker                                                                 [&found_obj](mirror::Object* obj) {
1690*795d594fSAndroid Build Coastguard Worker                                                                   found_obj = obj;
1691*795d594fSAndroid Build Coastguard Worker                                                                 });
1692*795d594fSAndroid Build Coastguard Worker     size_t remaining_bytes = gPageSize - bytes_copied;
1693*795d594fSAndroid Build Coastguard Worker     if (found_obj == nullptr) {
1694*795d594fSAndroid Build Coastguard Worker       if (needs_memset_zero) {
1695*795d594fSAndroid Build Coastguard Worker         // No more black objects in this page. Zero the remaining bytes and return.
1696*795d594fSAndroid Build Coastguard Worker         std::memset(dest, 0x0, remaining_bytes);
1697*795d594fSAndroid Build Coastguard Worker       }
1698*795d594fSAndroid Build Coastguard Worker       return;
1699*795d594fSAndroid Build Coastguard Worker     }
1700*795d594fSAndroid Build Coastguard Worker     // Copy everything in this page, which includes any zeroed regions
1701*795d594fSAndroid Build Coastguard Worker     // in-between.
1702*795d594fSAndroid Build Coastguard Worker     std::memcpy(dest, src_addr, remaining_bytes);
1703*795d594fSAndroid Build Coastguard Worker     DCHECK_LT(reinterpret_cast<uintptr_t>(found_obj), page_end);
1704*795d594fSAndroid Build Coastguard Worker     moving_space_bitmap_->VisitMarkedRange(
1705*795d594fSAndroid Build Coastguard Worker             reinterpret_cast<uintptr_t>(found_obj) + mirror::kObjectHeaderSize,
1706*795d594fSAndroid Build Coastguard Worker             page_end,
1707*795d594fSAndroid Build Coastguard Worker             [&found_obj, pre_compact_addr, dest, this, verify_obj_callback] (mirror::Object* obj)
1708*795d594fSAndroid Build Coastguard Worker             REQUIRES_SHARED(Locks::mutator_lock_) {
1709*795d594fSAndroid Build Coastguard Worker               ptrdiff_t diff = reinterpret_cast<uint8_t*>(found_obj) - pre_compact_addr;
1710*795d594fSAndroid Build Coastguard Worker               mirror::Object* ref = reinterpret_cast<mirror::Object*>(dest + diff);
1711*795d594fSAndroid Build Coastguard Worker               VerifyObject(ref, verify_obj_callback);
1712*795d594fSAndroid Build Coastguard Worker               RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/false>
1713*795d594fSAndroid Build Coastguard Worker                       visitor(this, ref, nullptr, nullptr);
1714*795d594fSAndroid Build Coastguard Worker               ref->VisitRefsForCompaction</*kFetchObjSize*/false>(visitor,
1715*795d594fSAndroid Build Coastguard Worker                                                                   MemberOffset(0),
1716*795d594fSAndroid Build Coastguard Worker                                                                   MemberOffset(-1));
1717*795d594fSAndroid Build Coastguard Worker               // Remember for next round.
1718*795d594fSAndroid Build Coastguard Worker               found_obj = obj;
1719*795d594fSAndroid Build Coastguard Worker             });
1720*795d594fSAndroid Build Coastguard Worker     // found_obj may have been updated in VisitMarkedRange. Visit the last found
1721*795d594fSAndroid Build Coastguard Worker     // object.
1722*795d594fSAndroid Build Coastguard Worker     DCHECK_GT(reinterpret_cast<uint8_t*>(found_obj), pre_compact_addr);
1723*795d594fSAndroid Build Coastguard Worker     DCHECK_LT(reinterpret_cast<uintptr_t>(found_obj), page_end);
1724*795d594fSAndroid Build Coastguard Worker     ptrdiff_t diff = reinterpret_cast<uint8_t*>(found_obj) - pre_compact_addr;
1725*795d594fSAndroid Build Coastguard Worker     mirror::Object* dest_obj = reinterpret_cast<mirror::Object*>(dest + diff);
1726*795d594fSAndroid Build Coastguard Worker     VerifyObject(dest_obj, verify_obj_callback);
1727*795d594fSAndroid Build Coastguard Worker     RefsUpdateVisitor</*kCheckBegin*/ false, /*kCheckEnd*/ true> visitor(
1728*795d594fSAndroid Build Coastguard Worker         this, dest_obj, nullptr, dest_page_end);
1729*795d594fSAndroid Build Coastguard Worker     // Last object could overlap with next page. And if it happens to be a
1730*795d594fSAndroid Build Coastguard Worker     // class, then we may access something (like static-fields' offsets) which
1731*795d594fSAndroid Build Coastguard Worker     // is on the next page. Therefore, use from-space's reference.
1732*795d594fSAndroid Build Coastguard Worker     mirror::Object* obj = GetFromSpaceAddr(found_obj);
1733*795d594fSAndroid Build Coastguard Worker     obj->VisitRefsForCompaction</*kFetchObjSize*/ false>(
1734*795d594fSAndroid Build Coastguard Worker         visitor, MemberOffset(0), MemberOffset(page_end - reinterpret_cast<uintptr_t>(found_obj)));
1735*795d594fSAndroid Build Coastguard Worker   }
1736*795d594fSAndroid Build Coastguard Worker }
1737*795d594fSAndroid Build Coastguard Worker 
1738*795d594fSAndroid Build Coastguard Worker template <uint32_t kYieldMax = 5, uint64_t kSleepUs = 10>
BackOff(uint32_t i)1739*795d594fSAndroid Build Coastguard Worker static void BackOff(uint32_t i) {
1740*795d594fSAndroid Build Coastguard Worker   // TODO: Consider adding x86 PAUSE and/or ARM YIELD here.
1741*795d594fSAndroid Build Coastguard Worker   if (i <= kYieldMax) {
1742*795d594fSAndroid Build Coastguard Worker     sched_yield();
1743*795d594fSAndroid Build Coastguard Worker   } else {
1744*795d594fSAndroid Build Coastguard Worker     // nanosleep is not in the async-signal-safe list, but bionic implements it
1745*795d594fSAndroid Build Coastguard Worker     // with a pure system call, so it should be fine.
1746*795d594fSAndroid Build Coastguard Worker     NanoSleep(kSleepUs * 1000 * (i - kYieldMax));
1747*795d594fSAndroid Build Coastguard Worker   }
1748*795d594fSAndroid Build Coastguard Worker }
1749*795d594fSAndroid Build Coastguard Worker 
ZeropageIoctl(void * addr,size_t length,bool tolerate_eexist,bool tolerate_enoent)1750*795d594fSAndroid Build Coastguard Worker size_t MarkCompact::ZeropageIoctl(void* addr,
1751*795d594fSAndroid Build Coastguard Worker                                   size_t length,
1752*795d594fSAndroid Build Coastguard Worker                                   bool tolerate_eexist,
1753*795d594fSAndroid Build Coastguard Worker                                   bool tolerate_enoent) {
1754*795d594fSAndroid Build Coastguard Worker   int32_t backoff_count = -1;
1755*795d594fSAndroid Build Coastguard Worker   int32_t max_backoff = 10;  // max native priority.
1756*795d594fSAndroid Build Coastguard Worker   struct uffdio_zeropage uffd_zeropage;
1757*795d594fSAndroid Build Coastguard Worker   DCHECK(IsAlignedParam(addr, gPageSize));
1758*795d594fSAndroid Build Coastguard Worker   uffd_zeropage.range.start = reinterpret_cast<uintptr_t>(addr);
1759*795d594fSAndroid Build Coastguard Worker   uffd_zeropage.range.len = length;
1760*795d594fSAndroid Build Coastguard Worker   uffd_zeropage.mode = gUffdSupportsMmapTrylock ? UFFDIO_ZEROPAGE_MODE_MMAP_TRYLOCK : 0;
1761*795d594fSAndroid Build Coastguard Worker   while (true) {
1762*795d594fSAndroid Build Coastguard Worker     uffd_zeropage.zeropage = 0;
1763*795d594fSAndroid Build Coastguard Worker     int ret = ioctl(uffd_, UFFDIO_ZEROPAGE, &uffd_zeropage);
1764*795d594fSAndroid Build Coastguard Worker     if (ret == 0) {
1765*795d594fSAndroid Build Coastguard Worker       DCHECK_EQ(uffd_zeropage.zeropage, static_cast<ssize_t>(length));
1766*795d594fSAndroid Build Coastguard Worker       return length;
1767*795d594fSAndroid Build Coastguard Worker     } else if (errno == EAGAIN) {
1768*795d594fSAndroid Build Coastguard Worker       if (uffd_zeropage.zeropage > 0) {
1769*795d594fSAndroid Build Coastguard Worker         // Contention was observed after acquiring mmap_lock. But the first page
1770*795d594fSAndroid Build Coastguard Worker         // is already done, which is what we care about.
1771*795d594fSAndroid Build Coastguard Worker         DCHECK(IsAlignedParam(uffd_zeropage.zeropage, gPageSize));
1772*795d594fSAndroid Build Coastguard Worker         DCHECK_GE(uffd_zeropage.zeropage, static_cast<ssize_t>(gPageSize));
1773*795d594fSAndroid Build Coastguard Worker         return uffd_zeropage.zeropage;
1774*795d594fSAndroid Build Coastguard Worker       } else if (uffd_zeropage.zeropage < 0) {
1775*795d594fSAndroid Build Coastguard Worker         // mmap_read_trylock() failed due to contention. Back-off and retry.
1776*795d594fSAndroid Build Coastguard Worker         DCHECK_EQ(uffd_zeropage.zeropage, -EAGAIN);
1777*795d594fSAndroid Build Coastguard Worker         if (backoff_count == -1) {
1778*795d594fSAndroid Build Coastguard Worker           int prio = Thread::Current()->GetNativePriority();
1779*795d594fSAndroid Build Coastguard Worker           DCHECK(prio > 0 && prio <= 10) << prio;
1780*795d594fSAndroid Build Coastguard Worker           max_backoff -= prio;
1781*795d594fSAndroid Build Coastguard Worker           backoff_count = 0;
1782*795d594fSAndroid Build Coastguard Worker         }
1783*795d594fSAndroid Build Coastguard Worker         if (backoff_count < max_backoff) {
1784*795d594fSAndroid Build Coastguard Worker           // Using 3 to align 'normal' priority threads with sleep.
1785*795d594fSAndroid Build Coastguard Worker           BackOff</*kYieldMax=*/3, /*kSleepUs=*/1000>(backoff_count++);
1786*795d594fSAndroid Build Coastguard Worker         } else {
1787*795d594fSAndroid Build Coastguard Worker           uffd_zeropage.mode = 0;
1788*795d594fSAndroid Build Coastguard Worker         }
1789*795d594fSAndroid Build Coastguard Worker       }
1790*795d594fSAndroid Build Coastguard Worker     } else if (tolerate_eexist && errno == EEXIST) {
1791*795d594fSAndroid Build Coastguard Worker       // Ioctl returns the number of bytes it mapped. The page on which EEXIST occurred
1792*795d594fSAndroid Build Coastguard Worker       // wouldn't be included in it.
1793*795d594fSAndroid Build Coastguard Worker       return uffd_zeropage.zeropage > 0 ? uffd_zeropage.zeropage + gPageSize : gPageSize;
1794*795d594fSAndroid Build Coastguard Worker     } else {
1795*795d594fSAndroid Build Coastguard Worker       CHECK(tolerate_enoent && errno == ENOENT)
1796*795d594fSAndroid Build Coastguard Worker           << "ioctl_userfaultfd: zeropage failed: " << strerror(errno) << ". addr:" << addr;
1797*795d594fSAndroid Build Coastguard Worker       return 0;
1798*795d594fSAndroid Build Coastguard Worker     }
1799*795d594fSAndroid Build Coastguard Worker   }
1800*795d594fSAndroid Build Coastguard Worker }
1801*795d594fSAndroid Build Coastguard Worker 
CopyIoctl(void * dst,void * buffer,size_t length,bool return_on_contention,bool tolerate_enoent)1802*795d594fSAndroid Build Coastguard Worker size_t MarkCompact::CopyIoctl(
1803*795d594fSAndroid Build Coastguard Worker     void* dst, void* buffer, size_t length, bool return_on_contention, bool tolerate_enoent) {
1804*795d594fSAndroid Build Coastguard Worker   int32_t backoff_count = -1;
1805*795d594fSAndroid Build Coastguard Worker   int32_t max_backoff = 10;  // max native priority.
1806*795d594fSAndroid Build Coastguard Worker   struct uffdio_copy uffd_copy;
1807*795d594fSAndroid Build Coastguard Worker   uffd_copy.mode = gUffdSupportsMmapTrylock ? UFFDIO_COPY_MODE_MMAP_TRYLOCK : 0;
1808*795d594fSAndroid Build Coastguard Worker   uffd_copy.src = reinterpret_cast<uintptr_t>(buffer);
1809*795d594fSAndroid Build Coastguard Worker   uffd_copy.dst = reinterpret_cast<uintptr_t>(dst);
1810*795d594fSAndroid Build Coastguard Worker   uffd_copy.len = length;
1811*795d594fSAndroid Build Coastguard Worker   uffd_copy.copy = 0;
1812*795d594fSAndroid Build Coastguard Worker   while (true) {
1813*795d594fSAndroid Build Coastguard Worker     int ret = ioctl(uffd_, UFFDIO_COPY, &uffd_copy);
1814*795d594fSAndroid Build Coastguard Worker     if (ret == 0) {
1815*795d594fSAndroid Build Coastguard Worker       DCHECK_EQ(uffd_copy.copy, static_cast<ssize_t>(length));
1816*795d594fSAndroid Build Coastguard Worker       break;
1817*795d594fSAndroid Build Coastguard Worker     } else if (errno == EAGAIN) {
1818*795d594fSAndroid Build Coastguard Worker       // Contention observed.
1819*795d594fSAndroid Build Coastguard Worker       DCHECK_NE(uffd_copy.copy, 0);
1820*795d594fSAndroid Build Coastguard Worker       if (uffd_copy.copy > 0) {
1821*795d594fSAndroid Build Coastguard Worker         // Contention was observed after acquiring mmap_lock.
1822*795d594fSAndroid Build Coastguard Worker         DCHECK(IsAlignedParam(uffd_copy.copy, gPageSize));
1823*795d594fSAndroid Build Coastguard Worker         DCHECK_GE(uffd_copy.copy, static_cast<ssize_t>(gPageSize));
1824*795d594fSAndroid Build Coastguard Worker         break;
1825*795d594fSAndroid Build Coastguard Worker       } else {
1826*795d594fSAndroid Build Coastguard Worker         // mmap_read_trylock() failed due to contention.
1827*795d594fSAndroid Build Coastguard Worker         DCHECK_EQ(uffd_copy.copy, -EAGAIN);
1828*795d594fSAndroid Build Coastguard Worker         uffd_copy.copy = 0;
1829*795d594fSAndroid Build Coastguard Worker         if (return_on_contention) {
1830*795d594fSAndroid Build Coastguard Worker           break;
1831*795d594fSAndroid Build Coastguard Worker         }
1832*795d594fSAndroid Build Coastguard Worker       }
1833*795d594fSAndroid Build Coastguard Worker       if (backoff_count == -1) {
1834*795d594fSAndroid Build Coastguard Worker         int prio = Thread::Current()->GetNativePriority();
1835*795d594fSAndroid Build Coastguard Worker         DCHECK(prio > 0 && prio <= 10) << prio;
1836*795d594fSAndroid Build Coastguard Worker         max_backoff -= prio;
1837*795d594fSAndroid Build Coastguard Worker         backoff_count = 0;
1838*795d594fSAndroid Build Coastguard Worker       }
1839*795d594fSAndroid Build Coastguard Worker       if (backoff_count < max_backoff) {
1840*795d594fSAndroid Build Coastguard Worker         // Using 3 to align 'normal' priority threads with sleep.
1841*795d594fSAndroid Build Coastguard Worker         BackOff</*kYieldMax=*/3, /*kSleepUs=*/1000>(backoff_count++);
1842*795d594fSAndroid Build Coastguard Worker       } else {
1843*795d594fSAndroid Build Coastguard Worker         uffd_copy.mode = 0;
1844*795d594fSAndroid Build Coastguard Worker       }
1845*795d594fSAndroid Build Coastguard Worker     } else if (errno == EEXIST) {
1846*795d594fSAndroid Build Coastguard Worker       DCHECK_NE(uffd_copy.copy, 0);
1847*795d594fSAndroid Build Coastguard Worker       if (uffd_copy.copy < 0) {
1848*795d594fSAndroid Build Coastguard Worker         uffd_copy.copy = 0;
1849*795d594fSAndroid Build Coastguard Worker       }
1850*795d594fSAndroid Build Coastguard Worker       // Ioctl returns the number of bytes it mapped. The page on which EEXIST occurred
1851*795d594fSAndroid Build Coastguard Worker       // wouldn't be included in it.
1852*795d594fSAndroid Build Coastguard Worker       uffd_copy.copy += gPageSize;
1853*795d594fSAndroid Build Coastguard Worker       break;
1854*795d594fSAndroid Build Coastguard Worker     } else {
1855*795d594fSAndroid Build Coastguard Worker       CHECK(tolerate_enoent && errno == ENOENT)
1856*795d594fSAndroid Build Coastguard Worker           << "ioctl_userfaultfd: copy failed: " << strerror(errno) << ". src:" << buffer
1857*795d594fSAndroid Build Coastguard Worker           << " dst:" << dst;
1858*795d594fSAndroid Build Coastguard Worker       return uffd_copy.copy > 0 ? uffd_copy.copy : 0;
1859*795d594fSAndroid Build Coastguard Worker     }
1860*795d594fSAndroid Build Coastguard Worker   }
1861*795d594fSAndroid Build Coastguard Worker   return uffd_copy.copy;
1862*795d594fSAndroid Build Coastguard Worker }
1863*795d594fSAndroid Build Coastguard Worker 
1864*795d594fSAndroid Build Coastguard Worker template <int kMode, typename CompactionFn>
DoPageCompactionWithStateChange(size_t page_idx,uint8_t * to_space_page,uint8_t * page,bool map_immediately,CompactionFn func)1865*795d594fSAndroid Build Coastguard Worker bool MarkCompact::DoPageCompactionWithStateChange(size_t page_idx,
1866*795d594fSAndroid Build Coastguard Worker                                                   uint8_t* to_space_page,
1867*795d594fSAndroid Build Coastguard Worker                                                   uint8_t* page,
1868*795d594fSAndroid Build Coastguard Worker                                                   bool map_immediately,
1869*795d594fSAndroid Build Coastguard Worker                                                   CompactionFn func) {
1870*795d594fSAndroid Build Coastguard Worker   uint32_t expected_state = static_cast<uint8_t>(PageState::kUnprocessed);
1871*795d594fSAndroid Build Coastguard Worker   uint32_t desired_state = static_cast<uint8_t>(map_immediately ? PageState::kProcessingAndMapping :
1872*795d594fSAndroid Build Coastguard Worker                                                                   PageState::kProcessing);
1873*795d594fSAndroid Build Coastguard Worker   // In the concurrent case (kMode != kFallbackMode) we need to ensure that the update
1874*795d594fSAndroid Build Coastguard Worker   // to moving_spaces_status_[page_idx] is released before the contents of the page are
1875*795d594fSAndroid Build Coastguard Worker   // made accessible to other threads.
1876*795d594fSAndroid Build Coastguard Worker   //
1877*795d594fSAndroid Build Coastguard Worker   // We need acquire ordering here to ensure that when the CAS fails, another thread
1878*795d594fSAndroid Build Coastguard Worker   // has completed processing the page, which is guaranteed by the release below.
1879*795d594fSAndroid Build Coastguard Worker   if (kMode == kFallbackMode || moving_pages_status_[page_idx].compare_exchange_strong(
1880*795d594fSAndroid Build Coastguard Worker                                     expected_state, desired_state, std::memory_order_acquire)) {
1881*795d594fSAndroid Build Coastguard Worker     func();
1882*795d594fSAndroid Build Coastguard Worker     if (kMode == kCopyMode) {
1883*795d594fSAndroid Build Coastguard Worker       if (map_immediately) {
1884*795d594fSAndroid Build Coastguard Worker         CopyIoctl(to_space_page,
1885*795d594fSAndroid Build Coastguard Worker                   page,
1886*795d594fSAndroid Build Coastguard Worker                   gPageSize,
1887*795d594fSAndroid Build Coastguard Worker                   /*return_on_contention=*/false,
1888*795d594fSAndroid Build Coastguard Worker                   /*tolerate_enoent=*/false);
1889*795d594fSAndroid Build Coastguard Worker         // Store is sufficient as no other thread could modify the status at this
1890*795d594fSAndroid Build Coastguard Worker         // point. Relaxed order is sufficient as the ioctl will act as a fence.
1891*795d594fSAndroid Build Coastguard Worker         moving_pages_status_[page_idx].store(static_cast<uint8_t>(PageState::kProcessedAndMapped),
1892*795d594fSAndroid Build Coastguard Worker                                              std::memory_order_relaxed);
1893*795d594fSAndroid Build Coastguard Worker       } else {
1894*795d594fSAndroid Build Coastguard Worker         // Add the src page's index in the status word.
1895*795d594fSAndroid Build Coastguard Worker         DCHECK(from_space_map_.HasAddress(page));
1896*795d594fSAndroid Build Coastguard Worker         DCHECK_LE(static_cast<size_t>(page - from_space_begin_),
1897*795d594fSAndroid Build Coastguard Worker                   std::numeric_limits<uint32_t>::max());
1898*795d594fSAndroid Build Coastguard Worker         uint32_t store_val = page - from_space_begin_;
1899*795d594fSAndroid Build Coastguard Worker         DCHECK_EQ(store_val & kPageStateMask, 0u);
1900*795d594fSAndroid Build Coastguard Worker         store_val |= static_cast<uint8_t>(PageState::kProcessed);
1901*795d594fSAndroid Build Coastguard Worker         // Store is sufficient as no other thread would modify the status at this point.
1902*795d594fSAndroid Build Coastguard Worker         moving_pages_status_[page_idx].store(store_val, std::memory_order_release);
1903*795d594fSAndroid Build Coastguard Worker       }
1904*795d594fSAndroid Build Coastguard Worker     }
1905*795d594fSAndroid Build Coastguard Worker     return true;
1906*795d594fSAndroid Build Coastguard Worker   } else {
1907*795d594fSAndroid Build Coastguard Worker     // Only GC thread could have set the state to Processed.
1908*795d594fSAndroid Build Coastguard Worker     DCHECK_NE(expected_state, static_cast<uint8_t>(PageState::kProcessed));
1909*795d594fSAndroid Build Coastguard Worker     return false;
1910*795d594fSAndroid Build Coastguard Worker   }
1911*795d594fSAndroid Build Coastguard Worker }
1912*795d594fSAndroid Build Coastguard Worker 
FreeFromSpacePages(size_t cur_page_idx,int mode,size_t end_idx_for_mapping)1913*795d594fSAndroid Build Coastguard Worker bool MarkCompact::FreeFromSpacePages(size_t cur_page_idx, int mode, size_t end_idx_for_mapping) {
1914*795d594fSAndroid Build Coastguard Worker   // Thanks to sliding compaction, bump-pointer allocations, and reverse
1915*795d594fSAndroid Build Coastguard Worker   // compaction (see CompactMovingSpace) the logic here is pretty simple: find
1916*795d594fSAndroid Build Coastguard Worker   // the to-space page up to which compaction has finished, all the from-space
1917*795d594fSAndroid Build Coastguard Worker   // pages corresponding to this onwards can be freed. There are some corner
1918*795d594fSAndroid Build Coastguard Worker   // cases to be taken care of, which are described below.
1919*795d594fSAndroid Build Coastguard Worker   size_t idx = last_checked_reclaim_page_idx_;
1920*795d594fSAndroid Build Coastguard Worker   // Find the to-space page up to which the corresponding from-space pages can be
1921*795d594fSAndroid Build Coastguard Worker   // freed.
1922*795d594fSAndroid Build Coastguard Worker   for (; idx > cur_page_idx; idx--) {
1923*795d594fSAndroid Build Coastguard Worker     PageState state = GetMovingPageState(idx - 1);
1924*795d594fSAndroid Build Coastguard Worker     if (state == PageState::kMutatorProcessing) {
1925*795d594fSAndroid Build Coastguard Worker       // Some mutator is working on the page.
1926*795d594fSAndroid Build Coastguard Worker       break;
1927*795d594fSAndroid Build Coastguard Worker     }
1928*795d594fSAndroid Build Coastguard Worker     DCHECK(state >= PageState::kProcessed ||
1929*795d594fSAndroid Build Coastguard Worker            (state == PageState::kUnprocessed &&
1930*795d594fSAndroid Build Coastguard Worker             (mode == kFallbackMode || idx > moving_first_objs_count_)));
1931*795d594fSAndroid Build Coastguard Worker   }
1932*795d594fSAndroid Build Coastguard Worker   DCHECK_LE(idx, last_checked_reclaim_page_idx_);
1933*795d594fSAndroid Build Coastguard Worker   if (idx == last_checked_reclaim_page_idx_) {
1934*795d594fSAndroid Build Coastguard Worker     // Nothing to do.
1935*795d594fSAndroid Build Coastguard Worker     return false;
1936*795d594fSAndroid Build Coastguard Worker   }
1937*795d594fSAndroid Build Coastguard Worker 
1938*795d594fSAndroid Build Coastguard Worker   uint8_t* reclaim_begin;
1939*795d594fSAndroid Build Coastguard Worker   uint8_t* idx_addr;
1940*795d594fSAndroid Build Coastguard Worker   // Calculate the first from-space page to be freed using 'idx'. If the
1941*795d594fSAndroid Build Coastguard Worker   // first-object of the idx'th to-space page started before the corresponding
1942*795d594fSAndroid Build Coastguard Worker   // from-space page, which is almost always the case in the compaction portion
1943*795d594fSAndroid Build Coastguard Worker   // of the moving-space, then it indicates that the subsequent pages that are
1944*795d594fSAndroid Build Coastguard Worker   // yet to be compacted will need the from-space pages. Therefore, find the page
1945*795d594fSAndroid Build Coastguard Worker   // (from the already compacted pages) whose first-object is different from
1946*795d594fSAndroid Build Coastguard Worker   // ours. All the from-space pages starting from that one are safe to be
1947*795d594fSAndroid Build Coastguard Worker   // removed. Please note that this iteration is not expected to be long in
1948*795d594fSAndroid Build Coastguard Worker   // normal cases as objects are smaller than page size.
1949*795d594fSAndroid Build Coastguard Worker   if (idx >= moving_first_objs_count_) {
1950*795d594fSAndroid Build Coastguard Worker     // black-allocated portion of the moving-space
1951*795d594fSAndroid Build Coastguard Worker     idx_addr = black_allocations_begin_ + (idx - moving_first_objs_count_) * gPageSize;
1952*795d594fSAndroid Build Coastguard Worker     reclaim_begin = idx_addr;
1953*795d594fSAndroid Build Coastguard Worker     mirror::Object* first_obj = first_objs_moving_space_[idx].AsMirrorPtr();
1954*795d594fSAndroid Build Coastguard Worker     if (first_obj != nullptr && reinterpret_cast<uint8_t*>(first_obj) < reclaim_begin) {
1955*795d594fSAndroid Build Coastguard Worker       size_t idx_len = moving_first_objs_count_ + black_page_count_;
1956*795d594fSAndroid Build Coastguard Worker       for (size_t i = idx + 1; i < idx_len; i++) {
1957*795d594fSAndroid Build Coastguard Worker         mirror::Object* obj = first_objs_moving_space_[i].AsMirrorPtr();
1958*795d594fSAndroid Build Coastguard Worker         // A null first-object indicates that the corresponding to-space page is
1959*795d594fSAndroid Build Coastguard Worker         // not used yet. So we can compute its from-space page and use that.
1960*795d594fSAndroid Build Coastguard Worker         if (obj != first_obj) {
1961*795d594fSAndroid Build Coastguard Worker           reclaim_begin = obj != nullptr
1962*795d594fSAndroid Build Coastguard Worker                           ? AlignUp(reinterpret_cast<uint8_t*>(obj), gPageSize)
1963*795d594fSAndroid Build Coastguard Worker                           : (black_allocations_begin_ + (i - moving_first_objs_count_) * gPageSize);
1964*795d594fSAndroid Build Coastguard Worker           break;
1965*795d594fSAndroid Build Coastguard Worker         }
1966*795d594fSAndroid Build Coastguard Worker       }
1967*795d594fSAndroid Build Coastguard Worker     }
1968*795d594fSAndroid Build Coastguard Worker   } else {
1969*795d594fSAndroid Build Coastguard Worker     DCHECK_GE(pre_compact_offset_moving_space_[idx], 0u);
1970*795d594fSAndroid Build Coastguard Worker     idx_addr = moving_space_begin_ + idx * gPageSize;
1971*795d594fSAndroid Build Coastguard Worker     if (idx_addr >= black_dense_end_) {
1972*795d594fSAndroid Build Coastguard Worker       idx_addr = moving_space_begin_ + pre_compact_offset_moving_space_[idx] * kAlignment;
1973*795d594fSAndroid Build Coastguard Worker     }
1974*795d594fSAndroid Build Coastguard Worker     reclaim_begin = idx_addr;
1975*795d594fSAndroid Build Coastguard Worker     DCHECK_LE(reclaim_begin, black_allocations_begin_);
1976*795d594fSAndroid Build Coastguard Worker     mirror::Object* first_obj = first_objs_moving_space_[idx].AsMirrorPtr();
1977*795d594fSAndroid Build Coastguard Worker     if (first_obj != nullptr) {
1978*795d594fSAndroid Build Coastguard Worker       if (reinterpret_cast<uint8_t*>(first_obj) < reclaim_begin) {
1979*795d594fSAndroid Build Coastguard Worker         DCHECK_LT(idx, moving_first_objs_count_);
1980*795d594fSAndroid Build Coastguard Worker         mirror::Object* obj = first_obj;
1981*795d594fSAndroid Build Coastguard Worker         for (size_t i = idx + 1; i < moving_first_objs_count_; i++) {
1982*795d594fSAndroid Build Coastguard Worker           obj = first_objs_moving_space_[i].AsMirrorPtr();
1983*795d594fSAndroid Build Coastguard Worker           if (obj == nullptr) {
1984*795d594fSAndroid Build Coastguard Worker             reclaim_begin = moving_space_begin_ + i * gPageSize;
1985*795d594fSAndroid Build Coastguard Worker             break;
1986*795d594fSAndroid Build Coastguard Worker           } else if (first_obj != obj) {
1987*795d594fSAndroid Build Coastguard Worker             DCHECK_LT(first_obj, obj);
1988*795d594fSAndroid Build Coastguard Worker             DCHECK_LT(reclaim_begin, reinterpret_cast<uint8_t*>(obj));
1989*795d594fSAndroid Build Coastguard Worker             reclaim_begin = reinterpret_cast<uint8_t*>(obj);
1990*795d594fSAndroid Build Coastguard Worker             break;
1991*795d594fSAndroid Build Coastguard Worker           }
1992*795d594fSAndroid Build Coastguard Worker         }
1993*795d594fSAndroid Build Coastguard Worker         if (obj == first_obj) {
1994*795d594fSAndroid Build Coastguard Worker           reclaim_begin = black_allocations_begin_;
1995*795d594fSAndroid Build Coastguard Worker         }
1996*795d594fSAndroid Build Coastguard Worker       }
1997*795d594fSAndroid Build Coastguard Worker     }
1998*795d594fSAndroid Build Coastguard Worker     reclaim_begin = AlignUp(reclaim_begin, gPageSize);
1999*795d594fSAndroid Build Coastguard Worker   }
2000*795d594fSAndroid Build Coastguard Worker 
2001*795d594fSAndroid Build Coastguard Worker   DCHECK_NE(reclaim_begin, nullptr);
2002*795d594fSAndroid Build Coastguard Worker   DCHECK_ALIGNED_PARAM(reclaim_begin, gPageSize);
2003*795d594fSAndroid Build Coastguard Worker   DCHECK_ALIGNED_PARAM(last_reclaimed_page_, gPageSize);
2004*795d594fSAndroid Build Coastguard Worker   // Check if the 'class_after_obj_map_' map allows pages to be freed.
2005*795d594fSAndroid Build Coastguard Worker   for (; class_after_obj_iter_ != class_after_obj_map_.rend(); class_after_obj_iter_++) {
2006*795d594fSAndroid Build Coastguard Worker     mirror::Object* klass = class_after_obj_iter_->first.AsMirrorPtr();
2007*795d594fSAndroid Build Coastguard Worker     mirror::Class* from_klass = static_cast<mirror::Class*>(GetFromSpaceAddr(klass));
2008*795d594fSAndroid Build Coastguard Worker     // Check with class' end to ensure that, if required, the entire class survives.
2009*795d594fSAndroid Build Coastguard Worker     uint8_t* klass_end = reinterpret_cast<uint8_t*>(klass) + from_klass->SizeOf<kVerifyNone>();
2010*795d594fSAndroid Build Coastguard Worker     DCHECK_LE(klass_end, last_reclaimed_page_);
2011*795d594fSAndroid Build Coastguard Worker     if (reinterpret_cast<uint8_t*>(klass_end) >= reclaim_begin) {
2012*795d594fSAndroid Build Coastguard Worker       // Found a class which is in the reclaim range.
2013*795d594fSAndroid Build Coastguard Worker       if (reinterpret_cast<uint8_t*>(class_after_obj_iter_->second.AsMirrorPtr()) < idx_addr) {
2014*795d594fSAndroid Build Coastguard Worker         // Its lowest-address object is not compacted yet. Reclaim starting from
2015*795d594fSAndroid Build Coastguard Worker         // the end of this class.
2016*795d594fSAndroid Build Coastguard Worker         reclaim_begin = AlignUp(klass_end, gPageSize);
2017*795d594fSAndroid Build Coastguard Worker       } else {
2018*795d594fSAndroid Build Coastguard Worker         // Continue consuming pairs wherein the lowest address object has already
2019*795d594fSAndroid Build Coastguard Worker         // been compacted.
2020*795d594fSAndroid Build Coastguard Worker         continue;
2021*795d594fSAndroid Build Coastguard Worker       }
2022*795d594fSAndroid Build Coastguard Worker     }
2023*795d594fSAndroid Build Coastguard Worker     // All the remaining class (and thereby corresponding object) addresses are
2024*795d594fSAndroid Build Coastguard Worker     // lower than the reclaim range.
2025*795d594fSAndroid Build Coastguard Worker     break;
2026*795d594fSAndroid Build Coastguard Worker   }
2027*795d594fSAndroid Build Coastguard Worker   bool all_mapped = mode == kFallbackMode;
2028*795d594fSAndroid Build Coastguard Worker   ssize_t size = last_reclaimed_page_ - reclaim_begin;
2029*795d594fSAndroid Build Coastguard Worker   if (size > kMinFromSpaceMadviseSize) {
2030*795d594fSAndroid Build Coastguard Worker     // Map all the pages in the range.
2031*795d594fSAndroid Build Coastguard Worker     if (mode == kCopyMode && cur_page_idx < end_idx_for_mapping) {
2032*795d594fSAndroid Build Coastguard Worker       if (MapMovingSpacePages(cur_page_idx,
2033*795d594fSAndroid Build Coastguard Worker                               end_idx_for_mapping,
2034*795d594fSAndroid Build Coastguard Worker                               /*from_ioctl=*/false,
2035*795d594fSAndroid Build Coastguard Worker                               /*return_on_contention=*/true,
2036*795d594fSAndroid Build Coastguard Worker                               /*tolerate_enoent=*/false) == end_idx_for_mapping - cur_page_idx) {
2037*795d594fSAndroid Build Coastguard Worker         all_mapped = true;
2038*795d594fSAndroid Build Coastguard Worker       }
2039*795d594fSAndroid Build Coastguard Worker     } else {
2040*795d594fSAndroid Build Coastguard Worker       // This for the black-allocations pages so that madvise is not missed.
2041*795d594fSAndroid Build Coastguard Worker       all_mapped = true;
2042*795d594fSAndroid Build Coastguard Worker     }
2043*795d594fSAndroid Build Coastguard Worker     // If not all pages are mapped, then take it as a hint that mmap_lock is
2044*795d594fSAndroid Build Coastguard Worker     // contended and hence don't madvise as that also needs the same lock.
2045*795d594fSAndroid Build Coastguard Worker     if (all_mapped) {
2046*795d594fSAndroid Build Coastguard Worker       // Retain a few pages for subsequent compactions.
2047*795d594fSAndroid Build Coastguard Worker       const ssize_t gBufferPages = 4 * gPageSize;
2048*795d594fSAndroid Build Coastguard Worker       DCHECK_LT(gBufferPages, kMinFromSpaceMadviseSize);
2049*795d594fSAndroid Build Coastguard Worker       size -= gBufferPages;
2050*795d594fSAndroid Build Coastguard Worker       uint8_t* addr = last_reclaimed_page_ - size;
2051*795d594fSAndroid Build Coastguard Worker       CHECK_EQ(madvise(addr + from_space_slide_diff_, size, MADV_DONTNEED), 0)
2052*795d594fSAndroid Build Coastguard Worker           << "madvise of from-space failed: " << strerror(errno);
2053*795d594fSAndroid Build Coastguard Worker       last_reclaimed_page_ = addr;
2054*795d594fSAndroid Build Coastguard Worker       cur_reclaimable_page_ = addr;
2055*795d594fSAndroid Build Coastguard Worker     }
2056*795d594fSAndroid Build Coastguard Worker   }
2057*795d594fSAndroid Build Coastguard Worker   last_reclaimable_page_ = std::min(reclaim_begin, last_reclaimable_page_);
2058*795d594fSAndroid Build Coastguard Worker   last_checked_reclaim_page_idx_ = idx;
2059*795d594fSAndroid Build Coastguard Worker   return all_mapped;
2060*795d594fSAndroid Build Coastguard Worker }
2061*795d594fSAndroid Build Coastguard Worker 
2062*795d594fSAndroid Build Coastguard Worker template <int kMode>
CompactMovingSpace(uint8_t * page)2063*795d594fSAndroid Build Coastguard Worker void MarkCompact::CompactMovingSpace(uint8_t* page) {
2064*795d594fSAndroid Build Coastguard Worker   // For every page we have a starting object, which may have started in some
2065*795d594fSAndroid Build Coastguard Worker   // preceding page, and an offset within that object from where we must start
2066*795d594fSAndroid Build Coastguard Worker   // copying.
2067*795d594fSAndroid Build Coastguard Worker   // Consult the live-words bitmap to copy all contiguously live words at a
2068*795d594fSAndroid Build Coastguard Worker   // time. These words may constitute multiple objects. To avoid the need for
2069*795d594fSAndroid Build Coastguard Worker   // consulting mark-bitmap to find where does the next live object start, we
2070*795d594fSAndroid Build Coastguard Worker   // use the object-size returned by VisitRefsForCompaction.
2071*795d594fSAndroid Build Coastguard Worker   //
2072*795d594fSAndroid Build Coastguard Worker   // We do the compaction in reverse direction so that the pages containing
2073*795d594fSAndroid Build Coastguard Worker   // TLAB and latest allocations are processed first.
2074*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2075*795d594fSAndroid Build Coastguard Worker   size_t page_status_arr_len = moving_first_objs_count_ + black_page_count_;
2076*795d594fSAndroid Build Coastguard Worker   size_t idx = page_status_arr_len;
2077*795d594fSAndroid Build Coastguard Worker   size_t black_dense_end_idx = (black_dense_end_ - moving_space_begin_) / gPageSize;
2078*795d594fSAndroid Build Coastguard Worker   uint8_t* to_space_end = moving_space_begin_ + page_status_arr_len * gPageSize;
2079*795d594fSAndroid Build Coastguard Worker   uint8_t* pre_compact_page = black_allocations_begin_ + (black_page_count_ * gPageSize);
2080*795d594fSAndroid Build Coastguard Worker 
2081*795d594fSAndroid Build Coastguard Worker   DCHECK(IsAlignedParam(pre_compact_page, gPageSize));
2082*795d594fSAndroid Build Coastguard Worker 
2083*795d594fSAndroid Build Coastguard Worker   // These variables are maintained by FreeFromSpacePages().
2084*795d594fSAndroid Build Coastguard Worker   last_reclaimed_page_ = pre_compact_page;
2085*795d594fSAndroid Build Coastguard Worker   last_reclaimable_page_ = last_reclaimed_page_;
2086*795d594fSAndroid Build Coastguard Worker   cur_reclaimable_page_ = last_reclaimed_page_;
2087*795d594fSAndroid Build Coastguard Worker   last_checked_reclaim_page_idx_ = idx;
2088*795d594fSAndroid Build Coastguard Worker   class_after_obj_iter_ = class_after_obj_map_.rbegin();
2089*795d594fSAndroid Build Coastguard Worker   // Allocated-black pages
2090*795d594fSAndroid Build Coastguard Worker   mirror::Object* next_page_first_obj = nullptr;
2091*795d594fSAndroid Build Coastguard Worker   while (idx > moving_first_objs_count_) {
2092*795d594fSAndroid Build Coastguard Worker     idx--;
2093*795d594fSAndroid Build Coastguard Worker     pre_compact_page -= gPageSize;
2094*795d594fSAndroid Build Coastguard Worker     to_space_end -= gPageSize;
2095*795d594fSAndroid Build Coastguard Worker     if (kMode == kFallbackMode) {
2096*795d594fSAndroid Build Coastguard Worker       page = to_space_end;
2097*795d594fSAndroid Build Coastguard Worker     }
2098*795d594fSAndroid Build Coastguard Worker     mirror::Object* first_obj = first_objs_moving_space_[idx].AsMirrorPtr();
2099*795d594fSAndroid Build Coastguard Worker     uint32_t first_chunk_size = black_alloc_pages_first_chunk_size_[idx];
2100*795d594fSAndroid Build Coastguard Worker     if (first_obj != nullptr) {
2101*795d594fSAndroid Build Coastguard Worker       DoPageCompactionWithStateChange<kMode>(idx,
2102*795d594fSAndroid Build Coastguard Worker                                              to_space_end,
2103*795d594fSAndroid Build Coastguard Worker                                              page,
2104*795d594fSAndroid Build Coastguard Worker                                              /*map_immediately=*/true,
2105*795d594fSAndroid Build Coastguard Worker                                              [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
2106*795d594fSAndroid Build Coastguard Worker                                                SlideBlackPage(first_obj,
2107*795d594fSAndroid Build Coastguard Worker                                                               next_page_first_obj,
2108*795d594fSAndroid Build Coastguard Worker                                                               first_chunk_size,
2109*795d594fSAndroid Build Coastguard Worker                                                               pre_compact_page,
2110*795d594fSAndroid Build Coastguard Worker                                                               page,
2111*795d594fSAndroid Build Coastguard Worker                                                               kMode == kCopyMode);
2112*795d594fSAndroid Build Coastguard Worker                                              });
2113*795d594fSAndroid Build Coastguard Worker       // We are sliding here, so no point attempting to madvise for every
2114*795d594fSAndroid Build Coastguard Worker       // page. Wait for enough pages to be done.
2115*795d594fSAndroid Build Coastguard Worker       if (idx % DivideByPageSize(kMinFromSpaceMadviseSize) == 0) {
2116*795d594fSAndroid Build Coastguard Worker         FreeFromSpacePages(idx, kMode, /*end_idx_for_mapping=*/0);
2117*795d594fSAndroid Build Coastguard Worker       }
2118*795d594fSAndroid Build Coastguard Worker     }
2119*795d594fSAndroid Build Coastguard Worker     next_page_first_obj = first_obj;
2120*795d594fSAndroid Build Coastguard Worker   }
2121*795d594fSAndroid Build Coastguard Worker   DCHECK_EQ(pre_compact_page, black_allocations_begin_);
2122*795d594fSAndroid Build Coastguard Worker   // Reserved page to be used if we can't find any reclaimable page for processing.
2123*795d594fSAndroid Build Coastguard Worker   uint8_t* reserve_page = page;
2124*795d594fSAndroid Build Coastguard Worker   size_t end_idx_for_mapping = idx;
2125*795d594fSAndroid Build Coastguard Worker   while (idx > black_dense_end_idx) {
2126*795d594fSAndroid Build Coastguard Worker     idx--;
2127*795d594fSAndroid Build Coastguard Worker     to_space_end -= gPageSize;
2128*795d594fSAndroid Build Coastguard Worker     if (kMode == kFallbackMode) {
2129*795d594fSAndroid Build Coastguard Worker       page = to_space_end;
2130*795d594fSAndroid Build Coastguard Worker     } else {
2131*795d594fSAndroid Build Coastguard Worker       DCHECK_EQ(kMode, kCopyMode);
2132*795d594fSAndroid Build Coastguard Worker       if (cur_reclaimable_page_ > last_reclaimable_page_) {
2133*795d594fSAndroid Build Coastguard Worker         cur_reclaimable_page_ -= gPageSize;
2134*795d594fSAndroid Build Coastguard Worker         page = cur_reclaimable_page_ + from_space_slide_diff_;
2135*795d594fSAndroid Build Coastguard Worker       } else {
2136*795d594fSAndroid Build Coastguard Worker         page = reserve_page;
2137*795d594fSAndroid Build Coastguard Worker       }
2138*795d594fSAndroid Build Coastguard Worker     }
2139*795d594fSAndroid Build Coastguard Worker     mirror::Object* first_obj = first_objs_moving_space_[idx].AsMirrorPtr();
2140*795d594fSAndroid Build Coastguard Worker     bool success = DoPageCompactionWithStateChange<kMode>(
2141*795d594fSAndroid Build Coastguard Worker         idx,
2142*795d594fSAndroid Build Coastguard Worker         to_space_end,
2143*795d594fSAndroid Build Coastguard Worker         page,
2144*795d594fSAndroid Build Coastguard Worker         /*map_immediately=*/page == reserve_page,
2145*795d594fSAndroid Build Coastguard Worker         [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
2146*795d594fSAndroid Build Coastguard Worker           CompactPage(first_obj, pre_compact_offset_moving_space_[idx], page, kMode == kCopyMode);
2147*795d594fSAndroid Build Coastguard Worker         });
2148*795d594fSAndroid Build Coastguard Worker     if (kMode == kCopyMode && (!success || page == reserve_page) && end_idx_for_mapping - idx > 1) {
2149*795d594fSAndroid Build Coastguard Worker       // map the pages in the following address as they can't be mapped with the
2150*795d594fSAndroid Build Coastguard Worker       // pages yet-to-be-compacted as their src-side pages won't be contiguous.
2151*795d594fSAndroid Build Coastguard Worker       MapMovingSpacePages(idx + 1,
2152*795d594fSAndroid Build Coastguard Worker                           end_idx_for_mapping,
2153*795d594fSAndroid Build Coastguard Worker                           /*from_fault=*/false,
2154*795d594fSAndroid Build Coastguard Worker                           /*return_on_contention=*/true,
2155*795d594fSAndroid Build Coastguard Worker                           /*tolerate_enoent=*/false);
2156*795d594fSAndroid Build Coastguard Worker     }
2157*795d594fSAndroid Build Coastguard Worker     if (FreeFromSpacePages(idx, kMode, end_idx_for_mapping)) {
2158*795d594fSAndroid Build Coastguard Worker       end_idx_for_mapping = idx;
2159*795d594fSAndroid Build Coastguard Worker     }
2160*795d594fSAndroid Build Coastguard Worker   }
2161*795d594fSAndroid Build Coastguard Worker   while (idx > 0) {
2162*795d594fSAndroid Build Coastguard Worker     idx--;
2163*795d594fSAndroid Build Coastguard Worker     to_space_end -= gPageSize;
2164*795d594fSAndroid Build Coastguard Worker     mirror::Object* first_obj = first_objs_moving_space_[idx].AsMirrorPtr();
2165*795d594fSAndroid Build Coastguard Worker     if (first_obj != nullptr) {
2166*795d594fSAndroid Build Coastguard Worker       DoPageCompactionWithStateChange<kMode>(
2167*795d594fSAndroid Build Coastguard Worker           idx,
2168*795d594fSAndroid Build Coastguard Worker           to_space_end,
2169*795d594fSAndroid Build Coastguard Worker           to_space_end + from_space_slide_diff_,
2170*795d594fSAndroid Build Coastguard Worker           /*map_immediately=*/false,
2171*795d594fSAndroid Build Coastguard Worker           [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
2172*795d594fSAndroid Build Coastguard Worker             UpdateNonMovingPage(
2173*795d594fSAndroid Build Coastguard Worker                 first_obj, to_space_end, from_space_slide_diff_, moving_space_bitmap_);
2174*795d594fSAndroid Build Coastguard Worker             if (kMode == kFallbackMode) {
2175*795d594fSAndroid Build Coastguard Worker               memcpy(to_space_end, to_space_end + from_space_slide_diff_, gPageSize);
2176*795d594fSAndroid Build Coastguard Worker             }
2177*795d594fSAndroid Build Coastguard Worker           });
2178*795d594fSAndroid Build Coastguard Worker     } else {
2179*795d594fSAndroid Build Coastguard Worker       // The page has no reachable object on it. Just declare it mapped.
2180*795d594fSAndroid Build Coastguard Worker       // Mutators shouldn't step on this page, which is asserted in sigbus
2181*795d594fSAndroid Build Coastguard Worker       // handler.
2182*795d594fSAndroid Build Coastguard Worker       DCHECK_EQ(moving_pages_status_[idx].load(std::memory_order_relaxed),
2183*795d594fSAndroid Build Coastguard Worker                 static_cast<uint8_t>(PageState::kUnprocessed));
2184*795d594fSAndroid Build Coastguard Worker       moving_pages_status_[idx].store(static_cast<uint8_t>(PageState::kProcessedAndMapped),
2185*795d594fSAndroid Build Coastguard Worker                                       std::memory_order_release);
2186*795d594fSAndroid Build Coastguard Worker     }
2187*795d594fSAndroid Build Coastguard Worker     if (FreeFromSpacePages(idx, kMode, end_idx_for_mapping)) {
2188*795d594fSAndroid Build Coastguard Worker       end_idx_for_mapping = idx;
2189*795d594fSAndroid Build Coastguard Worker     }
2190*795d594fSAndroid Build Coastguard Worker   }
2191*795d594fSAndroid Build Coastguard Worker   // map one last time to finish anything left.
2192*795d594fSAndroid Build Coastguard Worker   if (kMode == kCopyMode && end_idx_for_mapping > 0) {
2193*795d594fSAndroid Build Coastguard Worker     MapMovingSpacePages(idx,
2194*795d594fSAndroid Build Coastguard Worker                         end_idx_for_mapping,
2195*795d594fSAndroid Build Coastguard Worker                         /*from_fault=*/false,
2196*795d594fSAndroid Build Coastguard Worker                         /*return_on_contention=*/false,
2197*795d594fSAndroid Build Coastguard Worker                         /*tolerate_enoent=*/false);
2198*795d594fSAndroid Build Coastguard Worker   }
2199*795d594fSAndroid Build Coastguard Worker   DCHECK_EQ(to_space_end, bump_pointer_space_->Begin());
2200*795d594fSAndroid Build Coastguard Worker }
2201*795d594fSAndroid Build Coastguard Worker 
MapMovingSpacePages(size_t start_idx,size_t arr_len,bool from_fault,bool return_on_contention,bool tolerate_enoent)2202*795d594fSAndroid Build Coastguard Worker size_t MarkCompact::MapMovingSpacePages(size_t start_idx,
2203*795d594fSAndroid Build Coastguard Worker                                         size_t arr_len,
2204*795d594fSAndroid Build Coastguard Worker                                         bool from_fault,
2205*795d594fSAndroid Build Coastguard Worker                                         bool return_on_contention,
2206*795d594fSAndroid Build Coastguard Worker                                         bool tolerate_enoent) {
2207*795d594fSAndroid Build Coastguard Worker   DCHECK_LT(start_idx, arr_len);
2208*795d594fSAndroid Build Coastguard Worker   size_t arr_idx = start_idx;
2209*795d594fSAndroid Build Coastguard Worker   bool wait_for_unmapped = false;
2210*795d594fSAndroid Build Coastguard Worker   while (arr_idx < arr_len) {
2211*795d594fSAndroid Build Coastguard Worker     size_t map_count = 0;
2212*795d594fSAndroid Build Coastguard Worker     uint32_t cur_state = moving_pages_status_[arr_idx].load(std::memory_order_acquire);
2213*795d594fSAndroid Build Coastguard Worker     // Find a contiguous range that can be mapped with single ioctl.
2214*795d594fSAndroid Build Coastguard Worker     for (uint32_t i = arr_idx, from_page = cur_state & ~kPageStateMask; i < arr_len;
2215*795d594fSAndroid Build Coastguard Worker          i++, map_count++, from_page += gPageSize) {
2216*795d594fSAndroid Build Coastguard Worker       uint32_t s = moving_pages_status_[i].load(std::memory_order_acquire);
2217*795d594fSAndroid Build Coastguard Worker       uint32_t cur_from_page = s & ~kPageStateMask;
2218*795d594fSAndroid Build Coastguard Worker       if (GetPageStateFromWord(s) != PageState::kProcessed || cur_from_page != from_page) {
2219*795d594fSAndroid Build Coastguard Worker         break;
2220*795d594fSAndroid Build Coastguard Worker       }
2221*795d594fSAndroid Build Coastguard Worker     }
2222*795d594fSAndroid Build Coastguard Worker 
2223*795d594fSAndroid Build Coastguard Worker     if (map_count == 0) {
2224*795d594fSAndroid Build Coastguard Worker       if (from_fault) {
2225*795d594fSAndroid Build Coastguard Worker         bool mapped = GetPageStateFromWord(cur_state) == PageState::kProcessedAndMapped;
2226*795d594fSAndroid Build Coastguard Worker         return mapped ? 1 : 0;
2227*795d594fSAndroid Build Coastguard Worker       }
2228*795d594fSAndroid Build Coastguard Worker       // Skip the pages that this thread cannot map.
2229*795d594fSAndroid Build Coastguard Worker       for (; arr_idx < arr_len; arr_idx++) {
2230*795d594fSAndroid Build Coastguard Worker         PageState s = GetMovingPageState(arr_idx);
2231*795d594fSAndroid Build Coastguard Worker         if (s == PageState::kProcessed) {
2232*795d594fSAndroid Build Coastguard Worker           break;
2233*795d594fSAndroid Build Coastguard Worker         } else if (s != PageState::kProcessedAndMapped) {
2234*795d594fSAndroid Build Coastguard Worker           wait_for_unmapped = true;
2235*795d594fSAndroid Build Coastguard Worker         }
2236*795d594fSAndroid Build Coastguard Worker       }
2237*795d594fSAndroid Build Coastguard Worker     } else {
2238*795d594fSAndroid Build Coastguard Worker       uint32_t from_space_offset = cur_state & ~kPageStateMask;
2239*795d594fSAndroid Build Coastguard Worker       uint8_t* to_space_start = moving_space_begin_ + arr_idx * gPageSize;
2240*795d594fSAndroid Build Coastguard Worker       uint8_t* from_space_start = from_space_begin_ + from_space_offset;
2241*795d594fSAndroid Build Coastguard Worker       DCHECK_ALIGNED_PARAM(to_space_start, gPageSize);
2242*795d594fSAndroid Build Coastguard Worker       DCHECK_ALIGNED_PARAM(from_space_start, gPageSize);
2243*795d594fSAndroid Build Coastguard Worker       size_t mapped_len = CopyIoctl(to_space_start,
2244*795d594fSAndroid Build Coastguard Worker                                     from_space_start,
2245*795d594fSAndroid Build Coastguard Worker                                     map_count * gPageSize,
2246*795d594fSAndroid Build Coastguard Worker                                     return_on_contention,
2247*795d594fSAndroid Build Coastguard Worker                                     tolerate_enoent);
2248*795d594fSAndroid Build Coastguard Worker       for (size_t l = 0; l < mapped_len; l += gPageSize, arr_idx++) {
2249*795d594fSAndroid Build Coastguard Worker         // Store is sufficient as anyone storing is doing it with the same value.
2250*795d594fSAndroid Build Coastguard Worker         moving_pages_status_[arr_idx].store(static_cast<uint8_t>(PageState::kProcessedAndMapped),
2251*795d594fSAndroid Build Coastguard Worker                                             std::memory_order_release);
2252*795d594fSAndroid Build Coastguard Worker       }
2253*795d594fSAndroid Build Coastguard Worker       if (from_fault) {
2254*795d594fSAndroid Build Coastguard Worker         return DivideByPageSize(mapped_len);
2255*795d594fSAndroid Build Coastguard Worker       }
2256*795d594fSAndroid Build Coastguard Worker       // We can return from COPY ioctl with a smaller length also if a page
2257*795d594fSAndroid Build Coastguard Worker       // was found to be already mapped. But that doesn't count as contention.
2258*795d594fSAndroid Build Coastguard Worker       if (return_on_contention && DivideByPageSize(mapped_len) < map_count && errno != EEXIST) {
2259*795d594fSAndroid Build Coastguard Worker         return arr_idx - start_idx;
2260*795d594fSAndroid Build Coastguard Worker       }
2261*795d594fSAndroid Build Coastguard Worker     }
2262*795d594fSAndroid Build Coastguard Worker   }
2263*795d594fSAndroid Build Coastguard Worker   if (wait_for_unmapped) {
2264*795d594fSAndroid Build Coastguard Worker     for (size_t i = start_idx; i < arr_len; i++) {
2265*795d594fSAndroid Build Coastguard Worker       PageState s = GetMovingPageState(i);
2266*795d594fSAndroid Build Coastguard Worker       DCHECK_GT(s, PageState::kProcessed);
2267*795d594fSAndroid Build Coastguard Worker       uint32_t backoff_count = 0;
2268*795d594fSAndroid Build Coastguard Worker       while (s != PageState::kProcessedAndMapped) {
2269*795d594fSAndroid Build Coastguard Worker         BackOff(backoff_count++);
2270*795d594fSAndroid Build Coastguard Worker         s = GetMovingPageState(i);
2271*795d594fSAndroid Build Coastguard Worker       }
2272*795d594fSAndroid Build Coastguard Worker     }
2273*795d594fSAndroid Build Coastguard Worker   }
2274*795d594fSAndroid Build Coastguard Worker   return arr_len - start_idx;
2275*795d594fSAndroid Build Coastguard Worker }
2276*795d594fSAndroid Build Coastguard Worker 
UpdateNonMovingPage(mirror::Object * first,uint8_t * page,ptrdiff_t from_space_diff,accounting::ContinuousSpaceBitmap * bitmap)2277*795d594fSAndroid Build Coastguard Worker void MarkCompact::UpdateNonMovingPage(mirror::Object* first,
2278*795d594fSAndroid Build Coastguard Worker                                       uint8_t* page,
2279*795d594fSAndroid Build Coastguard Worker                                       ptrdiff_t from_space_diff,
2280*795d594fSAndroid Build Coastguard Worker                                       accounting::ContinuousSpaceBitmap* bitmap) {
2281*795d594fSAndroid Build Coastguard Worker   DCHECK_LT(reinterpret_cast<uint8_t*>(first), page + gPageSize);
2282*795d594fSAndroid Build Coastguard Worker   // For every object found in the page, visit the previous object. This ensures
2283*795d594fSAndroid Build Coastguard Worker   // that we can visit without checking page-end boundary.
2284*795d594fSAndroid Build Coastguard Worker   // Call VisitRefsForCompaction with from-space read-barrier as the klass object and
2285*795d594fSAndroid Build Coastguard Worker   // super-class loads require it.
2286*795d594fSAndroid Build Coastguard Worker   // TODO: Set kVisitNativeRoots to false once we implement concurrent
2287*795d594fSAndroid Build Coastguard Worker   // compaction
2288*795d594fSAndroid Build Coastguard Worker   mirror::Object* curr_obj = first;
2289*795d594fSAndroid Build Coastguard Worker   uint8_t* from_page = page + from_space_diff;
2290*795d594fSAndroid Build Coastguard Worker   uint8_t* from_page_end = from_page + gPageSize;
2291*795d594fSAndroid Build Coastguard Worker   bitmap->VisitMarkedRange(
2292*795d594fSAndroid Build Coastguard Worker       reinterpret_cast<uintptr_t>(first) + mirror::kObjectHeaderSize,
2293*795d594fSAndroid Build Coastguard Worker       reinterpret_cast<uintptr_t>(page + gPageSize),
2294*795d594fSAndroid Build Coastguard Worker       [&](mirror::Object* next_obj) {
2295*795d594fSAndroid Build Coastguard Worker         mirror::Object* from_obj = reinterpret_cast<mirror::Object*>(
2296*795d594fSAndroid Build Coastguard Worker             reinterpret_cast<uint8_t*>(curr_obj) + from_space_diff);
2297*795d594fSAndroid Build Coastguard Worker         if (reinterpret_cast<uint8_t*>(curr_obj) < page) {
2298*795d594fSAndroid Build Coastguard Worker           RefsUpdateVisitor</*kCheckBegin*/ true, /*kCheckEnd*/ false> visitor(
2299*795d594fSAndroid Build Coastguard Worker               this, from_obj, from_page, from_page_end);
2300*795d594fSAndroid Build Coastguard Worker           MemberOffset begin_offset(page - reinterpret_cast<uint8_t*>(curr_obj));
2301*795d594fSAndroid Build Coastguard Worker           // Native roots shouldn't be visited as they are done when this
2302*795d594fSAndroid Build Coastguard Worker           // object's beginning was visited in the preceding page.
2303*795d594fSAndroid Build Coastguard Worker           from_obj->VisitRefsForCompaction</*kFetchObjSize*/ false, /*kVisitNativeRoots*/ false>(
2304*795d594fSAndroid Build Coastguard Worker               visitor, begin_offset, MemberOffset(-1));
2305*795d594fSAndroid Build Coastguard Worker         } else {
2306*795d594fSAndroid Build Coastguard Worker           RefsUpdateVisitor</*kCheckBegin*/ false, /*kCheckEnd*/ false> visitor(
2307*795d594fSAndroid Build Coastguard Worker               this, from_obj, from_page, from_page_end);
2308*795d594fSAndroid Build Coastguard Worker           from_obj->VisitRefsForCompaction</*kFetchObjSize*/ false>(
2309*795d594fSAndroid Build Coastguard Worker               visitor, MemberOffset(0), MemberOffset(-1));
2310*795d594fSAndroid Build Coastguard Worker         }
2311*795d594fSAndroid Build Coastguard Worker         curr_obj = next_obj;
2312*795d594fSAndroid Build Coastguard Worker       });
2313*795d594fSAndroid Build Coastguard Worker 
2314*795d594fSAndroid Build Coastguard Worker   mirror::Object* from_obj =
2315*795d594fSAndroid Build Coastguard Worker       reinterpret_cast<mirror::Object*>(reinterpret_cast<uint8_t*>(curr_obj) + from_space_diff);
2316*795d594fSAndroid Build Coastguard Worker   MemberOffset end_offset(page + gPageSize - reinterpret_cast<uint8_t*>(curr_obj));
2317*795d594fSAndroid Build Coastguard Worker   if (reinterpret_cast<uint8_t*>(curr_obj) < page) {
2318*795d594fSAndroid Build Coastguard Worker     RefsUpdateVisitor</*kCheckBegin*/ true, /*kCheckEnd*/ true> visitor(
2319*795d594fSAndroid Build Coastguard Worker         this, from_obj, from_page, from_page_end);
2320*795d594fSAndroid Build Coastguard Worker     from_obj->VisitRefsForCompaction</*kFetchObjSize*/ false, /*kVisitNativeRoots*/ false>(
2321*795d594fSAndroid Build Coastguard Worker         visitor, MemberOffset(page - reinterpret_cast<uint8_t*>(curr_obj)), end_offset);
2322*795d594fSAndroid Build Coastguard Worker   } else {
2323*795d594fSAndroid Build Coastguard Worker     RefsUpdateVisitor</*kCheckBegin*/ false, /*kCheckEnd*/ true> visitor(
2324*795d594fSAndroid Build Coastguard Worker         this, from_obj, from_page, from_page_end);
2325*795d594fSAndroid Build Coastguard Worker     from_obj->VisitRefsForCompaction</*kFetchObjSize*/ false>(visitor, MemberOffset(0), end_offset);
2326*795d594fSAndroid Build Coastguard Worker   }
2327*795d594fSAndroid Build Coastguard Worker }
2328*795d594fSAndroid Build Coastguard Worker 
UpdateNonMovingSpace()2329*795d594fSAndroid Build Coastguard Worker void MarkCompact::UpdateNonMovingSpace() {
2330*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t("(Paused)UpdateNonMovingSpace", GetTimings());
2331*795d594fSAndroid Build Coastguard Worker   // Iterating in reverse ensures that the class pointer in objects which span
2332*795d594fSAndroid Build Coastguard Worker   // across more than one page gets updated in the end. This is necessary for
2333*795d594fSAndroid Build Coastguard Worker   // VisitRefsForCompaction() to work correctly.
2334*795d594fSAndroid Build Coastguard Worker   // TODO: If and when we make non-moving space update concurrent, implement a
2335*795d594fSAndroid Build Coastguard Worker   // mechanism to remember class pointers for such objects off-heap and pass it
2336*795d594fSAndroid Build Coastguard Worker   // to VisitRefsForCompaction().
2337*795d594fSAndroid Build Coastguard Worker   uint8_t* page = non_moving_space_->Begin() + non_moving_first_objs_count_ * gPageSize;
2338*795d594fSAndroid Build Coastguard Worker   for (ssize_t i = non_moving_first_objs_count_ - 1; i >= 0; i--) {
2339*795d594fSAndroid Build Coastguard Worker     mirror::Object* obj = first_objs_non_moving_space_[i].AsMirrorPtr();
2340*795d594fSAndroid Build Coastguard Worker     page -= gPageSize;
2341*795d594fSAndroid Build Coastguard Worker     // null means there are no objects on the page to update references.
2342*795d594fSAndroid Build Coastguard Worker     if (obj != nullptr) {
2343*795d594fSAndroid Build Coastguard Worker       UpdateNonMovingPage(obj, page, /*from_space_diff=*/0, non_moving_space_bitmap_);
2344*795d594fSAndroid Build Coastguard Worker     }
2345*795d594fSAndroid Build Coastguard Worker   }
2346*795d594fSAndroid Build Coastguard Worker }
2347*795d594fSAndroid Build Coastguard Worker 
UpdateMovingSpaceBlackAllocations()2348*795d594fSAndroid Build Coastguard Worker void MarkCompact::UpdateMovingSpaceBlackAllocations() {
2349*795d594fSAndroid Build Coastguard Worker   // For sliding black pages, we need the first-object, which overlaps with the
2350*795d594fSAndroid Build Coastguard Worker   // first byte of the page. Additionally, we compute the size of first chunk of
2351*795d594fSAndroid Build Coastguard Worker   // black objects. This will suffice for most black pages. Unlike, compaction
2352*795d594fSAndroid Build Coastguard Worker   // pages, here we don't need to pre-compute the offset within first-obj from
2353*795d594fSAndroid Build Coastguard Worker   // where sliding has to start. That can be calculated using the pre-compact
2354*795d594fSAndroid Build Coastguard Worker   // address of the page. Therefore, to save space, we store the first chunk's
2355*795d594fSAndroid Build Coastguard Worker   // size in black_alloc_pages_first_chunk_size_ array.
2356*795d594fSAndroid Build Coastguard Worker   // For the pages which may have holes after the first chunk, which could happen
2357*795d594fSAndroid Build Coastguard Worker   // if a new TLAB starts in the middle of the page, we mark the objects in
2358*795d594fSAndroid Build Coastguard Worker   // the mark-bitmap. So, if the first-chunk size is smaller than gPageSize,
2359*795d594fSAndroid Build Coastguard Worker   // then we use the mark-bitmap for the remainder of the page.
2360*795d594fSAndroid Build Coastguard Worker   uint8_t* const begin = bump_pointer_space_->Begin();
2361*795d594fSAndroid Build Coastguard Worker   uint8_t* black_allocs = black_allocations_begin_;
2362*795d594fSAndroid Build Coastguard Worker   DCHECK_LE(begin, black_allocs);
2363*795d594fSAndroid Build Coastguard Worker   size_t consumed_blocks_count = 0;
2364*795d594fSAndroid Build Coastguard Worker   size_t first_block_size;
2365*795d594fSAndroid Build Coastguard Worker   // Needed only for debug at the end of the function. Hopefully compiler will
2366*795d594fSAndroid Build Coastguard Worker   // eliminate it otherwise.
2367*795d594fSAndroid Build Coastguard Worker   size_t num_blocks = 0;
2368*795d594fSAndroid Build Coastguard Worker   // Get the list of all blocks allocated in the bump-pointer space.
2369*795d594fSAndroid Build Coastguard Worker   std::vector<size_t>* block_sizes = bump_pointer_space_->GetBlockSizes(thread_running_gc_,
2370*795d594fSAndroid Build Coastguard Worker                                                                         &first_block_size);
2371*795d594fSAndroid Build Coastguard Worker   DCHECK_LE(first_block_size, (size_t)(black_allocs - begin));
2372*795d594fSAndroid Build Coastguard Worker   if (block_sizes != nullptr) {
2373*795d594fSAndroid Build Coastguard Worker     size_t black_page_idx = moving_first_objs_count_;
2374*795d594fSAndroid Build Coastguard Worker     uint8_t* block_end = begin + first_block_size;
2375*795d594fSAndroid Build Coastguard Worker     uint32_t remaining_chunk_size = 0;
2376*795d594fSAndroid Build Coastguard Worker     uint32_t first_chunk_size = 0;
2377*795d594fSAndroid Build Coastguard Worker     mirror::Object* first_obj = nullptr;
2378*795d594fSAndroid Build Coastguard Worker     num_blocks = block_sizes->size();
2379*795d594fSAndroid Build Coastguard Worker     for (size_t block_size : *block_sizes) {
2380*795d594fSAndroid Build Coastguard Worker       block_end += block_size;
2381*795d594fSAndroid Build Coastguard Worker       // Skip the blocks that are prior to the black allocations. These will be
2382*795d594fSAndroid Build Coastguard Worker       // merged with the main-block later.
2383*795d594fSAndroid Build Coastguard Worker       if (black_allocs >= block_end) {
2384*795d594fSAndroid Build Coastguard Worker         consumed_blocks_count++;
2385*795d594fSAndroid Build Coastguard Worker         continue;
2386*795d594fSAndroid Build Coastguard Worker       }
2387*795d594fSAndroid Build Coastguard Worker       mirror::Object* obj = reinterpret_cast<mirror::Object*>(black_allocs);
2388*795d594fSAndroid Build Coastguard Worker       bool set_mark_bit = remaining_chunk_size > 0;
2389*795d594fSAndroid Build Coastguard Worker       // We don't know how many objects are allocated in the current block. When we hit
2390*795d594fSAndroid Build Coastguard Worker       // a null assume it's the end. This works as every block is expected to
2391*795d594fSAndroid Build Coastguard Worker       // have objects allocated linearly using bump-pointer.
2392*795d594fSAndroid Build Coastguard Worker       // BumpPointerSpace::Walk() also works similarly.
2393*795d594fSAndroid Build Coastguard Worker       while (black_allocs < block_end
2394*795d594fSAndroid Build Coastguard Worker              && obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
2395*795d594fSAndroid Build Coastguard Worker         // Try to keep instructions which access class instance together to
2396*795d594fSAndroid Build Coastguard Worker         // avoid reloading the pointer from object.
2397*795d594fSAndroid Build Coastguard Worker         size_t obj_size = obj->SizeOf();
2398*795d594fSAndroid Build Coastguard Worker         bytes_scanned_ += obj_size;
2399*795d594fSAndroid Build Coastguard Worker         obj_size = RoundUp(obj_size, kAlignment);
2400*795d594fSAndroid Build Coastguard Worker         UpdateClassAfterObjectMap(obj);
2401*795d594fSAndroid Build Coastguard Worker         if (first_obj == nullptr) {
2402*795d594fSAndroid Build Coastguard Worker           first_obj = obj;
2403*795d594fSAndroid Build Coastguard Worker         }
2404*795d594fSAndroid Build Coastguard Worker         // We only need the mark-bitmap in the pages wherein a new TLAB starts in
2405*795d594fSAndroid Build Coastguard Worker         // the middle of the page.
2406*795d594fSAndroid Build Coastguard Worker         if (set_mark_bit) {
2407*795d594fSAndroid Build Coastguard Worker           moving_space_bitmap_->Set(obj);
2408*795d594fSAndroid Build Coastguard Worker         }
2409*795d594fSAndroid Build Coastguard Worker         // Handle objects which cross page boundary, including objects larger
2410*795d594fSAndroid Build Coastguard Worker         // than page size.
2411*795d594fSAndroid Build Coastguard Worker         if (remaining_chunk_size + obj_size >= gPageSize) {
2412*795d594fSAndroid Build Coastguard Worker           set_mark_bit = false;
2413*795d594fSAndroid Build Coastguard Worker           first_chunk_size += gPageSize - remaining_chunk_size;
2414*795d594fSAndroid Build Coastguard Worker           remaining_chunk_size += obj_size;
2415*795d594fSAndroid Build Coastguard Worker           // We should not store first-object and remaining_chunk_size if there were
2416*795d594fSAndroid Build Coastguard Worker           // unused bytes before this TLAB, in which case we must have already
2417*795d594fSAndroid Build Coastguard Worker           // stored the values (below).
2418*795d594fSAndroid Build Coastguard Worker           if (black_alloc_pages_first_chunk_size_[black_page_idx] == 0) {
2419*795d594fSAndroid Build Coastguard Worker             black_alloc_pages_first_chunk_size_[black_page_idx] = first_chunk_size;
2420*795d594fSAndroid Build Coastguard Worker             first_objs_moving_space_[black_page_idx].Assign(first_obj);
2421*795d594fSAndroid Build Coastguard Worker           }
2422*795d594fSAndroid Build Coastguard Worker           black_page_idx++;
2423*795d594fSAndroid Build Coastguard Worker           remaining_chunk_size -= gPageSize;
2424*795d594fSAndroid Build Coastguard Worker           // Consume an object larger than page size.
2425*795d594fSAndroid Build Coastguard Worker           while (remaining_chunk_size >= gPageSize) {
2426*795d594fSAndroid Build Coastguard Worker             black_alloc_pages_first_chunk_size_[black_page_idx] = gPageSize;
2427*795d594fSAndroid Build Coastguard Worker             first_objs_moving_space_[black_page_idx].Assign(obj);
2428*795d594fSAndroid Build Coastguard Worker             black_page_idx++;
2429*795d594fSAndroid Build Coastguard Worker             remaining_chunk_size -= gPageSize;
2430*795d594fSAndroid Build Coastguard Worker           }
2431*795d594fSAndroid Build Coastguard Worker           first_obj = remaining_chunk_size > 0 ? obj : nullptr;
2432*795d594fSAndroid Build Coastguard Worker           first_chunk_size = remaining_chunk_size;
2433*795d594fSAndroid Build Coastguard Worker         } else {
2434*795d594fSAndroid Build Coastguard Worker           DCHECK_LE(first_chunk_size, remaining_chunk_size);
2435*795d594fSAndroid Build Coastguard Worker           first_chunk_size += obj_size;
2436*795d594fSAndroid Build Coastguard Worker           remaining_chunk_size += obj_size;
2437*795d594fSAndroid Build Coastguard Worker         }
2438*795d594fSAndroid Build Coastguard Worker         black_allocs += obj_size;
2439*795d594fSAndroid Build Coastguard Worker         obj = reinterpret_cast<mirror::Object*>(black_allocs);
2440*795d594fSAndroid Build Coastguard Worker       }
2441*795d594fSAndroid Build Coastguard Worker       DCHECK_LE(black_allocs, block_end);
2442*795d594fSAndroid Build Coastguard Worker       DCHECK_LT(remaining_chunk_size, gPageSize);
2443*795d594fSAndroid Build Coastguard Worker       // consume the unallocated portion of the block
2444*795d594fSAndroid Build Coastguard Worker       if (black_allocs < block_end) {
2445*795d594fSAndroid Build Coastguard Worker         // first-chunk of the current page ends here. Store it.
2446*795d594fSAndroid Build Coastguard Worker         if (first_chunk_size > 0 && black_alloc_pages_first_chunk_size_[black_page_idx] == 0) {
2447*795d594fSAndroid Build Coastguard Worker           black_alloc_pages_first_chunk_size_[black_page_idx] = first_chunk_size;
2448*795d594fSAndroid Build Coastguard Worker           first_objs_moving_space_[black_page_idx].Assign(first_obj);
2449*795d594fSAndroid Build Coastguard Worker         }
2450*795d594fSAndroid Build Coastguard Worker         first_chunk_size = 0;
2451*795d594fSAndroid Build Coastguard Worker         first_obj = nullptr;
2452*795d594fSAndroid Build Coastguard Worker         size_t page_remaining = gPageSize - remaining_chunk_size;
2453*795d594fSAndroid Build Coastguard Worker         size_t block_remaining = block_end - black_allocs;
2454*795d594fSAndroid Build Coastguard Worker         if (page_remaining <= block_remaining) {
2455*795d594fSAndroid Build Coastguard Worker           block_remaining -= page_remaining;
2456*795d594fSAndroid Build Coastguard Worker           // current page and the subsequent empty pages in the block
2457*795d594fSAndroid Build Coastguard Worker           black_page_idx += 1 + DivideByPageSize(block_remaining);
2458*795d594fSAndroid Build Coastguard Worker           remaining_chunk_size = ModuloPageSize(block_remaining);
2459*795d594fSAndroid Build Coastguard Worker         } else {
2460*795d594fSAndroid Build Coastguard Worker           remaining_chunk_size += block_remaining;
2461*795d594fSAndroid Build Coastguard Worker         }
2462*795d594fSAndroid Build Coastguard Worker         black_allocs = block_end;
2463*795d594fSAndroid Build Coastguard Worker       }
2464*795d594fSAndroid Build Coastguard Worker     }
2465*795d594fSAndroid Build Coastguard Worker     if (black_page_idx < DivideByPageSize(bump_pointer_space_->Size())) {
2466*795d594fSAndroid Build Coastguard Worker       // Store the leftover first-chunk, if any, and update page index.
2467*795d594fSAndroid Build Coastguard Worker       if (black_alloc_pages_first_chunk_size_[black_page_idx] > 0) {
2468*795d594fSAndroid Build Coastguard Worker         black_page_idx++;
2469*795d594fSAndroid Build Coastguard Worker       } else if (first_chunk_size > 0) {
2470*795d594fSAndroid Build Coastguard Worker         black_alloc_pages_first_chunk_size_[black_page_idx] = first_chunk_size;
2471*795d594fSAndroid Build Coastguard Worker         first_objs_moving_space_[black_page_idx].Assign(first_obj);
2472*795d594fSAndroid Build Coastguard Worker         black_page_idx++;
2473*795d594fSAndroid Build Coastguard Worker       }
2474*795d594fSAndroid Build Coastguard Worker     }
2475*795d594fSAndroid Build Coastguard Worker     black_page_count_ = black_page_idx - moving_first_objs_count_;
2476*795d594fSAndroid Build Coastguard Worker     delete block_sizes;
2477*795d594fSAndroid Build Coastguard Worker   }
2478*795d594fSAndroid Build Coastguard Worker   // Update bump-pointer space by consuming all the pre-black blocks into the
2479*795d594fSAndroid Build Coastguard Worker   // main one.
2480*795d594fSAndroid Build Coastguard Worker   bump_pointer_space_->SetBlockSizes(thread_running_gc_,
2481*795d594fSAndroid Build Coastguard Worker                                      post_compact_end_ - begin,
2482*795d594fSAndroid Build Coastguard Worker                                      consumed_blocks_count);
2483*795d594fSAndroid Build Coastguard Worker   if (kIsDebugBuild) {
2484*795d594fSAndroid Build Coastguard Worker     size_t moving_space_size = bump_pointer_space_->Size();
2485*795d594fSAndroid Build Coastguard Worker     size_t los_size = 0;
2486*795d594fSAndroid Build Coastguard Worker     if (heap_->GetLargeObjectsSpace()) {
2487*795d594fSAndroid Build Coastguard Worker       los_size = heap_->GetLargeObjectsSpace()->GetBytesAllocated();
2488*795d594fSAndroid Build Coastguard Worker     }
2489*795d594fSAndroid Build Coastguard Worker     // The moving-space size is already updated to post-compact size in SetBlockSizes above.
2490*795d594fSAndroid Build Coastguard Worker     // Also, bytes-allocated has already been adjusted with large-object space' freed-bytes
2491*795d594fSAndroid Build Coastguard Worker     // in Sweep(), but not with moving-space freed-bytes.
2492*795d594fSAndroid Build Coastguard Worker     CHECK_GE(heap_->GetBytesAllocated() - black_objs_slide_diff_, moving_space_size + los_size)
2493*795d594fSAndroid Build Coastguard Worker         << " moving-space size:" << moving_space_size
2494*795d594fSAndroid Build Coastguard Worker         << " moving-space bytes-freed:" << black_objs_slide_diff_
2495*795d594fSAndroid Build Coastguard Worker         << " large-object-space size:" << los_size
2496*795d594fSAndroid Build Coastguard Worker         << " large-object-space bytes-freed:" << GetCurrentIteration()->GetFreedLargeObjectBytes()
2497*795d594fSAndroid Build Coastguard Worker         << " num-tlabs-merged:" << consumed_blocks_count
2498*795d594fSAndroid Build Coastguard Worker         << " main-block-size:" << (post_compact_end_ - begin)
2499*795d594fSAndroid Build Coastguard Worker         << " total-tlabs-moving-space:" << num_blocks;
2500*795d594fSAndroid Build Coastguard Worker   }
2501*795d594fSAndroid Build Coastguard Worker }
2502*795d594fSAndroid Build Coastguard Worker 
UpdateNonMovingSpaceBlackAllocations()2503*795d594fSAndroid Build Coastguard Worker void MarkCompact::UpdateNonMovingSpaceBlackAllocations() {
2504*795d594fSAndroid Build Coastguard Worker   accounting::ObjectStack* stack = heap_->GetAllocationStack();
2505*795d594fSAndroid Build Coastguard Worker   const StackReference<mirror::Object>* limit = stack->End();
2506*795d594fSAndroid Build Coastguard Worker   uint8_t* const space_begin = non_moving_space_->Begin();
2507*795d594fSAndroid Build Coastguard Worker   for (StackReference<mirror::Object>* it = stack->Begin(); it != limit; ++it) {
2508*795d594fSAndroid Build Coastguard Worker     mirror::Object* obj = it->AsMirrorPtr();
2509*795d594fSAndroid Build Coastguard Worker     if (obj != nullptr && non_moving_space_bitmap_->HasAddress(obj)) {
2510*795d594fSAndroid Build Coastguard Worker       non_moving_space_bitmap_->Set(obj);
2511*795d594fSAndroid Build Coastguard Worker       // Clear so that we don't try to set the bit again in the next GC-cycle.
2512*795d594fSAndroid Build Coastguard Worker       it->Clear();
2513*795d594fSAndroid Build Coastguard Worker       size_t idx = DivideByPageSize(reinterpret_cast<uint8_t*>(obj) - space_begin);
2514*795d594fSAndroid Build Coastguard Worker       uint8_t* page_begin = AlignDown(reinterpret_cast<uint8_t*>(obj), gPageSize);
2515*795d594fSAndroid Build Coastguard Worker       mirror::Object* first_obj = first_objs_non_moving_space_[idx].AsMirrorPtr();
2516*795d594fSAndroid Build Coastguard Worker       if (first_obj == nullptr
2517*795d594fSAndroid Build Coastguard Worker           || (obj < first_obj && reinterpret_cast<uint8_t*>(first_obj) > page_begin)) {
2518*795d594fSAndroid Build Coastguard Worker         first_objs_non_moving_space_[idx].Assign(obj);
2519*795d594fSAndroid Build Coastguard Worker       }
2520*795d594fSAndroid Build Coastguard Worker       mirror::Object* next_page_first_obj = first_objs_non_moving_space_[++idx].AsMirrorPtr();
2521*795d594fSAndroid Build Coastguard Worker       uint8_t* next_page_begin = page_begin + gPageSize;
2522*795d594fSAndroid Build Coastguard Worker       if (next_page_first_obj == nullptr
2523*795d594fSAndroid Build Coastguard Worker           || reinterpret_cast<uint8_t*>(next_page_first_obj) > next_page_begin) {
2524*795d594fSAndroid Build Coastguard Worker         size_t obj_size = RoundUp(obj->SizeOf<kDefaultVerifyFlags>(), kAlignment);
2525*795d594fSAndroid Build Coastguard Worker         uint8_t* obj_end = reinterpret_cast<uint8_t*>(obj) + obj_size;
2526*795d594fSAndroid Build Coastguard Worker         while (next_page_begin < obj_end) {
2527*795d594fSAndroid Build Coastguard Worker           first_objs_non_moving_space_[idx++].Assign(obj);
2528*795d594fSAndroid Build Coastguard Worker           next_page_begin += gPageSize;
2529*795d594fSAndroid Build Coastguard Worker         }
2530*795d594fSAndroid Build Coastguard Worker       }
2531*795d594fSAndroid Build Coastguard Worker       // update first_objs count in case we went past non_moving_first_objs_count_
2532*795d594fSAndroid Build Coastguard Worker       non_moving_first_objs_count_ = std::max(non_moving_first_objs_count_, idx);
2533*795d594fSAndroid Build Coastguard Worker     }
2534*795d594fSAndroid Build Coastguard Worker   }
2535*795d594fSAndroid Build Coastguard Worker }
2536*795d594fSAndroid Build Coastguard Worker 
2537*795d594fSAndroid Build Coastguard Worker class MarkCompact::ImmuneSpaceUpdateObjVisitor {
2538*795d594fSAndroid Build Coastguard Worker  public:
ImmuneSpaceUpdateObjVisitor(MarkCompact * collector)2539*795d594fSAndroid Build Coastguard Worker   explicit ImmuneSpaceUpdateObjVisitor(MarkCompact* collector) : collector_(collector) {}
2540*795d594fSAndroid Build Coastguard Worker 
operator ()(mirror::Object * obj) const2541*795d594fSAndroid Build Coastguard Worker   void operator()(mirror::Object* obj) const ALWAYS_INLINE REQUIRES(Locks::mutator_lock_) {
2542*795d594fSAndroid Build Coastguard Worker     RefsUpdateVisitor</*kCheckBegin*/false, /*kCheckEnd*/false> visitor(collector_,
2543*795d594fSAndroid Build Coastguard Worker                                                                         obj,
2544*795d594fSAndroid Build Coastguard Worker                                                                         /*begin_*/nullptr,
2545*795d594fSAndroid Build Coastguard Worker                                                                         /*end_*/nullptr);
2546*795d594fSAndroid Build Coastguard Worker     obj->VisitRefsForCompaction</*kFetchObjSize*/ false>(
2547*795d594fSAndroid Build Coastguard Worker         visitor, MemberOffset(0), MemberOffset(-1));
2548*795d594fSAndroid Build Coastguard Worker   }
2549*795d594fSAndroid Build Coastguard Worker 
Callback(mirror::Object * obj,void * arg)2550*795d594fSAndroid Build Coastguard Worker   static void Callback(mirror::Object* obj, void* arg) REQUIRES(Locks::mutator_lock_) {
2551*795d594fSAndroid Build Coastguard Worker     reinterpret_cast<ImmuneSpaceUpdateObjVisitor*>(arg)->operator()(obj);
2552*795d594fSAndroid Build Coastguard Worker   }
2553*795d594fSAndroid Build Coastguard Worker 
2554*795d594fSAndroid Build Coastguard Worker  private:
2555*795d594fSAndroid Build Coastguard Worker   MarkCompact* const collector_;
2556*795d594fSAndroid Build Coastguard Worker };
2557*795d594fSAndroid Build Coastguard Worker 
2558*795d594fSAndroid Build Coastguard Worker class MarkCompact::ClassLoaderRootsUpdater : public ClassLoaderVisitor {
2559*795d594fSAndroid Build Coastguard Worker  public:
ClassLoaderRootsUpdater(MarkCompact * collector)2560*795d594fSAndroid Build Coastguard Worker   explicit ClassLoaderRootsUpdater(MarkCompact* collector)
2561*795d594fSAndroid Build Coastguard Worker       : collector_(collector),
2562*795d594fSAndroid Build Coastguard Worker         moving_space_begin_(collector->black_dense_end_),
2563*795d594fSAndroid Build Coastguard Worker         moving_space_end_(collector->moving_space_end_) {}
2564*795d594fSAndroid Build Coastguard Worker 
Visit(ObjPtr<mirror::ClassLoader> class_loader)2565*795d594fSAndroid Build Coastguard Worker   void Visit(ObjPtr<mirror::ClassLoader> class_loader) override
2566*795d594fSAndroid Build Coastguard Worker       REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) {
2567*795d594fSAndroid Build Coastguard Worker     ClassTable* const class_table = class_loader->GetClassTable();
2568*795d594fSAndroid Build Coastguard Worker     if (class_table != nullptr) {
2569*795d594fSAndroid Build Coastguard Worker       // Classes are updated concurrently.
2570*795d594fSAndroid Build Coastguard Worker       class_table->VisitRoots(*this, /*skip_classes=*/true);
2571*795d594fSAndroid Build Coastguard Worker     }
2572*795d594fSAndroid Build Coastguard Worker   }
2573*795d594fSAndroid Build Coastguard Worker 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const2574*795d594fSAndroid Build Coastguard Worker   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const ALWAYS_INLINE
2575*795d594fSAndroid Build Coastguard Worker       REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
2576*795d594fSAndroid Build Coastguard Worker     if (!root->IsNull()) {
2577*795d594fSAndroid Build Coastguard Worker       VisitRoot(root);
2578*795d594fSAndroid Build Coastguard Worker     }
2579*795d594fSAndroid Build Coastguard Worker   }
2580*795d594fSAndroid Build Coastguard Worker 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const2581*795d594fSAndroid Build Coastguard Worker   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const ALWAYS_INLINE
2582*795d594fSAndroid Build Coastguard Worker       REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
2583*795d594fSAndroid Build Coastguard Worker     collector_->UpdateRoot(
2584*795d594fSAndroid Build Coastguard Worker         root, moving_space_begin_, moving_space_end_, RootInfo(RootType::kRootVMInternal));
2585*795d594fSAndroid Build Coastguard Worker   }
2586*795d594fSAndroid Build Coastguard Worker 
2587*795d594fSAndroid Build Coastguard Worker  private:
2588*795d594fSAndroid Build Coastguard Worker   MarkCompact* collector_;
2589*795d594fSAndroid Build Coastguard Worker   uint8_t* const moving_space_begin_;
2590*795d594fSAndroid Build Coastguard Worker   uint8_t* const moving_space_end_;
2591*795d594fSAndroid Build Coastguard Worker };
2592*795d594fSAndroid Build Coastguard Worker 
2593*795d594fSAndroid Build Coastguard Worker class MarkCompact::LinearAllocPageUpdater {
2594*795d594fSAndroid Build Coastguard Worker  public:
LinearAllocPageUpdater(MarkCompact * collector)2595*795d594fSAndroid Build Coastguard Worker   explicit LinearAllocPageUpdater(MarkCompact* collector)
2596*795d594fSAndroid Build Coastguard Worker       : collector_(collector),
2597*795d594fSAndroid Build Coastguard Worker         moving_space_begin_(collector->black_dense_end_),
2598*795d594fSAndroid Build Coastguard Worker         moving_space_end_(collector->moving_space_end_),
2599*795d594fSAndroid Build Coastguard Worker         last_page_touched_(false) {}
2600*795d594fSAndroid Build Coastguard Worker 
2601*795d594fSAndroid Build Coastguard Worker   // Update a page in multi-object arena.
MultiObjectArena(uint8_t * page_begin,uint8_t * first_obj)2602*795d594fSAndroid Build Coastguard Worker   void MultiObjectArena(uint8_t* page_begin, uint8_t* first_obj)
2603*795d594fSAndroid Build Coastguard Worker       REQUIRES_SHARED(Locks::mutator_lock_) {
2604*795d594fSAndroid Build Coastguard Worker     DCHECK(first_obj != nullptr);
2605*795d594fSAndroid Build Coastguard Worker     DCHECK_ALIGNED_PARAM(page_begin, gPageSize);
2606*795d594fSAndroid Build Coastguard Worker     uint8_t* page_end = page_begin + gPageSize;
2607*795d594fSAndroid Build Coastguard Worker     uint32_t obj_size;
2608*795d594fSAndroid Build Coastguard Worker     for (uint8_t* byte = first_obj; byte < page_end;) {
2609*795d594fSAndroid Build Coastguard Worker       TrackingHeader* header = reinterpret_cast<TrackingHeader*>(byte);
2610*795d594fSAndroid Build Coastguard Worker       obj_size = header->GetSize();
2611*795d594fSAndroid Build Coastguard Worker       if (UNLIKELY(obj_size == 0)) {
2612*795d594fSAndroid Build Coastguard Worker         // No more objects in this page to visit.
2613*795d594fSAndroid Build Coastguard Worker         last_page_touched_ = byte >= page_begin;
2614*795d594fSAndroid Build Coastguard Worker         return;
2615*795d594fSAndroid Build Coastguard Worker       }
2616*795d594fSAndroid Build Coastguard Worker       uint8_t* obj = byte + sizeof(TrackingHeader);
2617*795d594fSAndroid Build Coastguard Worker       uint8_t* obj_end = byte + obj_size;
2618*795d594fSAndroid Build Coastguard Worker       if (header->Is16Aligned()) {
2619*795d594fSAndroid Build Coastguard Worker         obj = AlignUp(obj, 16);
2620*795d594fSAndroid Build Coastguard Worker       }
2621*795d594fSAndroid Build Coastguard Worker       uint8_t* begin_boundary = std::max(obj, page_begin);
2622*795d594fSAndroid Build Coastguard Worker       uint8_t* end_boundary = std::min(obj_end, page_end);
2623*795d594fSAndroid Build Coastguard Worker       if (begin_boundary < end_boundary) {
2624*795d594fSAndroid Build Coastguard Worker         VisitObject(header->GetKind(), obj, begin_boundary, end_boundary);
2625*795d594fSAndroid Build Coastguard Worker       }
2626*795d594fSAndroid Build Coastguard Worker       if (ArenaAllocator::IsRunningOnMemoryTool()) {
2627*795d594fSAndroid Build Coastguard Worker         obj_size += ArenaAllocator::kMemoryToolRedZoneBytes;
2628*795d594fSAndroid Build Coastguard Worker       }
2629*795d594fSAndroid Build Coastguard Worker       byte += RoundUp(obj_size, LinearAlloc::kAlignment);
2630*795d594fSAndroid Build Coastguard Worker     }
2631*795d594fSAndroid Build Coastguard Worker     last_page_touched_ = true;
2632*795d594fSAndroid Build Coastguard Worker   }
2633*795d594fSAndroid Build Coastguard Worker 
2634*795d594fSAndroid Build Coastguard Worker   // This version is only used for cases where the entire page is filled with
2635*795d594fSAndroid Build Coastguard Worker   // GC-roots. For example, class-table and intern-table.
SingleObjectArena(uint8_t * page_begin,size_t page_size)2636*795d594fSAndroid Build Coastguard Worker   void SingleObjectArena(uint8_t* page_begin, size_t page_size)
2637*795d594fSAndroid Build Coastguard Worker       REQUIRES_SHARED(Locks::mutator_lock_) {
2638*795d594fSAndroid Build Coastguard Worker     static_assert(sizeof(uint32_t) == sizeof(GcRoot<mirror::Object>));
2639*795d594fSAndroid Build Coastguard Worker     DCHECK_ALIGNED(page_begin, kAlignment);
2640*795d594fSAndroid Build Coastguard Worker     // Least significant bits are used by class-table.
2641*795d594fSAndroid Build Coastguard Worker     static constexpr uint32_t kMask = kObjectAlignment - 1;
2642*795d594fSAndroid Build Coastguard Worker     size_t num_roots = page_size / sizeof(GcRoot<mirror::Object>);
2643*795d594fSAndroid Build Coastguard Worker     uint32_t* root_ptr = reinterpret_cast<uint32_t*>(page_begin);
2644*795d594fSAndroid Build Coastguard Worker     for (size_t i = 0; i < num_roots; root_ptr++, i++) {
2645*795d594fSAndroid Build Coastguard Worker       uint32_t word = *root_ptr;
2646*795d594fSAndroid Build Coastguard Worker       if (word != 0) {
2647*795d594fSAndroid Build Coastguard Worker         uint32_t lsbs = word & kMask;
2648*795d594fSAndroid Build Coastguard Worker         word &= ~kMask;
2649*795d594fSAndroid Build Coastguard Worker         VisitRootIfNonNull(reinterpret_cast<mirror::CompressedReference<mirror::Object>*>(&word));
2650*795d594fSAndroid Build Coastguard Worker         *root_ptr = word | lsbs;
2651*795d594fSAndroid Build Coastguard Worker         last_page_touched_ = true;
2652*795d594fSAndroid Build Coastguard Worker       }
2653*795d594fSAndroid Build Coastguard Worker     }
2654*795d594fSAndroid Build Coastguard Worker   }
2655*795d594fSAndroid Build Coastguard Worker 
WasLastPageTouched() const2656*795d594fSAndroid Build Coastguard Worker   bool WasLastPageTouched() const { return last_page_touched_; }
2657*795d594fSAndroid Build Coastguard Worker 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const2658*795d594fSAndroid Build Coastguard Worker   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
2659*795d594fSAndroid Build Coastguard Worker       ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
2660*795d594fSAndroid Build Coastguard Worker     if (!root->IsNull()) {
2661*795d594fSAndroid Build Coastguard Worker       VisitRoot(root);
2662*795d594fSAndroid Build Coastguard Worker     }
2663*795d594fSAndroid Build Coastguard Worker   }
2664*795d594fSAndroid Build Coastguard Worker 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const2665*795d594fSAndroid Build Coastguard Worker   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
2666*795d594fSAndroid Build Coastguard Worker       ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
2667*795d594fSAndroid Build Coastguard Worker     mirror::Object* old_ref = root->AsMirrorPtr();
2668*795d594fSAndroid Build Coastguard Worker     DCHECK_NE(old_ref, nullptr);
2669*795d594fSAndroid Build Coastguard Worker     if (MarkCompact::HasAddress(old_ref, moving_space_begin_, moving_space_end_)) {
2670*795d594fSAndroid Build Coastguard Worker       mirror::Object* new_ref = old_ref;
2671*795d594fSAndroid Build Coastguard Worker       if (reinterpret_cast<uint8_t*>(old_ref) >= collector_->black_allocations_begin_) {
2672*795d594fSAndroid Build Coastguard Worker         new_ref = collector_->PostCompactBlackObjAddr(old_ref);
2673*795d594fSAndroid Build Coastguard Worker       } else if (collector_->live_words_bitmap_->Test(old_ref)) {
2674*795d594fSAndroid Build Coastguard Worker         DCHECK(collector_->moving_space_bitmap_->Test(old_ref))
2675*795d594fSAndroid Build Coastguard Worker             << "ref:" << old_ref << " root:" << root;
2676*795d594fSAndroid Build Coastguard Worker         new_ref = collector_->PostCompactOldObjAddr(old_ref);
2677*795d594fSAndroid Build Coastguard Worker       }
2678*795d594fSAndroid Build Coastguard Worker       if (old_ref != new_ref) {
2679*795d594fSAndroid Build Coastguard Worker         root->Assign(new_ref);
2680*795d594fSAndroid Build Coastguard Worker       }
2681*795d594fSAndroid Build Coastguard Worker     }
2682*795d594fSAndroid Build Coastguard Worker   }
2683*795d594fSAndroid Build Coastguard Worker 
2684*795d594fSAndroid Build Coastguard Worker  private:
VisitObject(LinearAllocKind kind,void * obj,uint8_t * start_boundary,uint8_t * end_boundary) const2685*795d594fSAndroid Build Coastguard Worker   void VisitObject(LinearAllocKind kind,
2686*795d594fSAndroid Build Coastguard Worker                    void* obj,
2687*795d594fSAndroid Build Coastguard Worker                    uint8_t* start_boundary,
2688*795d594fSAndroid Build Coastguard Worker                    uint8_t* end_boundary) const ALWAYS_INLINE
2689*795d594fSAndroid Build Coastguard Worker       REQUIRES_SHARED(Locks::mutator_lock_) {
2690*795d594fSAndroid Build Coastguard Worker     switch (kind) {
2691*795d594fSAndroid Build Coastguard Worker       case LinearAllocKind::kNoGCRoots:
2692*795d594fSAndroid Build Coastguard Worker         break;
2693*795d594fSAndroid Build Coastguard Worker       case LinearAllocKind::kGCRootArray:
2694*795d594fSAndroid Build Coastguard Worker         {
2695*795d594fSAndroid Build Coastguard Worker           GcRoot<mirror::Object>* root = reinterpret_cast<GcRoot<mirror::Object>*>(start_boundary);
2696*795d594fSAndroid Build Coastguard Worker           GcRoot<mirror::Object>* last = reinterpret_cast<GcRoot<mirror::Object>*>(end_boundary);
2697*795d594fSAndroid Build Coastguard Worker           for (; root < last; root++) {
2698*795d594fSAndroid Build Coastguard Worker             VisitRootIfNonNull(root->AddressWithoutBarrier());
2699*795d594fSAndroid Build Coastguard Worker           }
2700*795d594fSAndroid Build Coastguard Worker         }
2701*795d594fSAndroid Build Coastguard Worker         break;
2702*795d594fSAndroid Build Coastguard Worker       case LinearAllocKind::kArtMethodArray:
2703*795d594fSAndroid Build Coastguard Worker         {
2704*795d594fSAndroid Build Coastguard Worker           LengthPrefixedArray<ArtMethod>* array = static_cast<LengthPrefixedArray<ArtMethod>*>(obj);
2705*795d594fSAndroid Build Coastguard Worker           // Old methods are clobbered in debug builds. Check size to confirm if the array
2706*795d594fSAndroid Build Coastguard Worker           // has any GC roots to visit. See ClassLinker::LinkMethodsHelper::ClobberOldMethods()
2707*795d594fSAndroid Build Coastguard Worker           if (array->size() > 0) {
2708*795d594fSAndroid Build Coastguard Worker             if (collector_->pointer_size_ == PointerSize::k64) {
2709*795d594fSAndroid Build Coastguard Worker               ArtMethod::VisitArrayRoots<PointerSize::k64>(
2710*795d594fSAndroid Build Coastguard Worker                   *this, start_boundary, end_boundary, array);
2711*795d594fSAndroid Build Coastguard Worker             } else {
2712*795d594fSAndroid Build Coastguard Worker               DCHECK_EQ(collector_->pointer_size_, PointerSize::k32);
2713*795d594fSAndroid Build Coastguard Worker               ArtMethod::VisitArrayRoots<PointerSize::k32>(
2714*795d594fSAndroid Build Coastguard Worker                   *this, start_boundary, end_boundary, array);
2715*795d594fSAndroid Build Coastguard Worker             }
2716*795d594fSAndroid Build Coastguard Worker           }
2717*795d594fSAndroid Build Coastguard Worker         }
2718*795d594fSAndroid Build Coastguard Worker         break;
2719*795d594fSAndroid Build Coastguard Worker       case LinearAllocKind::kArtMethod:
2720*795d594fSAndroid Build Coastguard Worker         ArtMethod::VisitRoots(*this, start_boundary, end_boundary, static_cast<ArtMethod*>(obj));
2721*795d594fSAndroid Build Coastguard Worker         break;
2722*795d594fSAndroid Build Coastguard Worker       case LinearAllocKind::kArtFieldArray:
2723*795d594fSAndroid Build Coastguard Worker         ArtField::VisitArrayRoots(*this,
2724*795d594fSAndroid Build Coastguard Worker                                   start_boundary,
2725*795d594fSAndroid Build Coastguard Worker                                   end_boundary,
2726*795d594fSAndroid Build Coastguard Worker                                   static_cast<LengthPrefixedArray<ArtField>*>(obj));
2727*795d594fSAndroid Build Coastguard Worker         break;
2728*795d594fSAndroid Build Coastguard Worker       case LinearAllocKind::kDexCacheArray:
2729*795d594fSAndroid Build Coastguard Worker         {
2730*795d594fSAndroid Build Coastguard Worker           mirror::DexCachePair<mirror::Object>* first =
2731*795d594fSAndroid Build Coastguard Worker               reinterpret_cast<mirror::DexCachePair<mirror::Object>*>(start_boundary);
2732*795d594fSAndroid Build Coastguard Worker           mirror::DexCachePair<mirror::Object>* last =
2733*795d594fSAndroid Build Coastguard Worker               reinterpret_cast<mirror::DexCachePair<mirror::Object>*>(end_boundary);
2734*795d594fSAndroid Build Coastguard Worker           mirror::DexCache::VisitDexCachePairRoots(*this, first, last);
2735*795d594fSAndroid Build Coastguard Worker       }
2736*795d594fSAndroid Build Coastguard Worker     }
2737*795d594fSAndroid Build Coastguard Worker   }
2738*795d594fSAndroid Build Coastguard Worker 
2739*795d594fSAndroid Build Coastguard Worker   MarkCompact* const collector_;
2740*795d594fSAndroid Build Coastguard Worker   // Cache to speed up checking if GC-root is in moving space or not.
2741*795d594fSAndroid Build Coastguard Worker   uint8_t* const moving_space_begin_;
2742*795d594fSAndroid Build Coastguard Worker   uint8_t* const moving_space_end_;
2743*795d594fSAndroid Build Coastguard Worker   // Whether the last page was touched or not.
2744*795d594fSAndroid Build Coastguard Worker   bool last_page_touched_ = false;
2745*795d594fSAndroid Build Coastguard Worker };
2746*795d594fSAndroid Build Coastguard Worker 
UpdateClassTableClasses(Runtime * runtime,bool immune_class_table_only)2747*795d594fSAndroid Build Coastguard Worker void MarkCompact::UpdateClassTableClasses(Runtime* runtime, bool immune_class_table_only) {
2748*795d594fSAndroid Build Coastguard Worker   // If the process is debuggable then redefinition is allowed, which may mean
2749*795d594fSAndroid Build Coastguard Worker   // pre-zygote-fork class-tables may have pointer to class in moving-space.
2750*795d594fSAndroid Build Coastguard Worker   // So visit classes from class-sets that are not in linear-alloc arena-pool.
2751*795d594fSAndroid Build Coastguard Worker   if (UNLIKELY(runtime->IsJavaDebuggableAtInit())) {
2752*795d594fSAndroid Build Coastguard Worker     ClassLinker* linker = runtime->GetClassLinker();
2753*795d594fSAndroid Build Coastguard Worker     ClassLoaderRootsUpdater updater(this);
2754*795d594fSAndroid Build Coastguard Worker     GcVisitedArenaPool* pool = static_cast<GcVisitedArenaPool*>(runtime->GetLinearAllocArenaPool());
2755*795d594fSAndroid Build Coastguard Worker     auto cond = [this, pool, immune_class_table_only](ClassTable::ClassSet& set) -> bool {
2756*795d594fSAndroid Build Coastguard Worker       if (!set.empty()) {
2757*795d594fSAndroid Build Coastguard Worker         return immune_class_table_only ?
2758*795d594fSAndroid Build Coastguard Worker                immune_spaces_.ContainsObject(reinterpret_cast<mirror::Object*>(&*set.begin())) :
2759*795d594fSAndroid Build Coastguard Worker                !pool->Contains(reinterpret_cast<void*>(&*set.begin()));
2760*795d594fSAndroid Build Coastguard Worker       }
2761*795d594fSAndroid Build Coastguard Worker       return false;
2762*795d594fSAndroid Build Coastguard Worker     };
2763*795d594fSAndroid Build Coastguard Worker     linker->VisitClassTables([cond, &updater](ClassTable* table)
2764*795d594fSAndroid Build Coastguard Worker                                  REQUIRES_SHARED(Locks::mutator_lock_) {
2765*795d594fSAndroid Build Coastguard Worker                                table->VisitClassesIfConditionMet(cond, updater);
2766*795d594fSAndroid Build Coastguard Worker                              });
2767*795d594fSAndroid Build Coastguard Worker     ReaderMutexLock rmu(thread_running_gc_, *Locks::classlinker_classes_lock_);
2768*795d594fSAndroid Build Coastguard Worker     linker->GetBootClassTable()->VisitClassesIfConditionMet(cond, updater);
2769*795d594fSAndroid Build Coastguard Worker   }
2770*795d594fSAndroid Build Coastguard Worker }
2771*795d594fSAndroid Build Coastguard Worker 
CompactionPause()2772*795d594fSAndroid Build Coastguard Worker void MarkCompact::CompactionPause() {
2773*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
2774*795d594fSAndroid Build Coastguard Worker   Runtime* runtime = Runtime::Current();
2775*795d594fSAndroid Build Coastguard Worker   non_moving_space_bitmap_ = non_moving_space_->GetLiveBitmap();
2776*795d594fSAndroid Build Coastguard Worker   if (kIsDebugBuild) {
2777*795d594fSAndroid Build Coastguard Worker     DCHECK_EQ(thread_running_gc_, Thread::Current());
2778*795d594fSAndroid Build Coastguard Worker     // TODO(Simulator): Test that this should not operate on the simulated stack when the simulator
2779*795d594fSAndroid Build Coastguard Worker     // supports mark compact.
2780*795d594fSAndroid Build Coastguard Worker     stack_low_addr_ = thread_running_gc_->GetStackEnd<kNativeStackType>();
2781*795d594fSAndroid Build Coastguard Worker     stack_high_addr_ = reinterpret_cast<char*>(stack_low_addr_)
2782*795d594fSAndroid Build Coastguard Worker                        + thread_running_gc_->GetUsableStackSize<kNativeStackType>();
2783*795d594fSAndroid Build Coastguard Worker   }
2784*795d594fSAndroid Build Coastguard Worker   {
2785*795d594fSAndroid Build Coastguard Worker     TimingLogger::ScopedTiming t2("(Paused)UpdateCompactionDataStructures", GetTimings());
2786*795d594fSAndroid Build Coastguard Worker     ReaderMutexLock rmu(thread_running_gc_, *Locks::heap_bitmap_lock_);
2787*795d594fSAndroid Build Coastguard Worker     // Refresh data-structures to catch-up on allocations that may have
2788*795d594fSAndroid Build Coastguard Worker     // happened since marking-phase pause.
2789*795d594fSAndroid Build Coastguard Worker     // There could be several TLABs that got allocated since marking pause. We
2790*795d594fSAndroid Build Coastguard Worker     // don't want to compact them and instead update the TLAB info in TLS and
2791*795d594fSAndroid Build Coastguard Worker     // let mutators continue to use the TLABs.
2792*795d594fSAndroid Build Coastguard Worker     // We need to set all the bits in live-words bitmap corresponding to allocated
2793*795d594fSAndroid Build Coastguard Worker     // objects. Also, we need to find the objects that are overlapping with
2794*795d594fSAndroid Build Coastguard Worker     // page-begin boundaries. Unlike objects allocated before
2795*795d594fSAndroid Build Coastguard Worker     // black_allocations_begin_, which can be identified via mark-bitmap, we can get
2796*795d594fSAndroid Build Coastguard Worker     // this info only via walking the space past black_allocations_begin_, which
2797*795d594fSAndroid Build Coastguard Worker     // involves fetching object size.
2798*795d594fSAndroid Build Coastguard Worker     // TODO: We can reduce the time spent on this in a pause by performing one
2799*795d594fSAndroid Build Coastguard Worker     // round of this concurrently prior to the pause.
2800*795d594fSAndroid Build Coastguard Worker     UpdateMovingSpaceBlackAllocations();
2801*795d594fSAndroid Build Coastguard Worker     // Iterate over the allocation_stack_, for every object in the non-moving
2802*795d594fSAndroid Build Coastguard Worker     // space:
2803*795d594fSAndroid Build Coastguard Worker     // 1. Mark the object in live bitmap
2804*795d594fSAndroid Build Coastguard Worker     // 2. Erase the object from allocation stack
2805*795d594fSAndroid Build Coastguard Worker     // 3. In the corresponding page, if the first-object vector needs updating
2806*795d594fSAndroid Build Coastguard Worker     // then do so.
2807*795d594fSAndroid Build Coastguard Worker     UpdateNonMovingSpaceBlackAllocations();
2808*795d594fSAndroid Build Coastguard Worker     // This store is visible to mutator (or uffd worker threads) as the mutator
2809*795d594fSAndroid Build Coastguard Worker     // lock's unlock guarantees that.
2810*795d594fSAndroid Build Coastguard Worker     compacting_ = true;
2811*795d594fSAndroid Build Coastguard Worker     // Start updating roots and system weaks now.
2812*795d594fSAndroid Build Coastguard Worker     heap_->GetReferenceProcessor()->UpdateRoots(this);
2813*795d594fSAndroid Build Coastguard Worker   }
2814*795d594fSAndroid Build Coastguard Worker   {
2815*795d594fSAndroid Build Coastguard Worker     // TODO: Immune space updation has to happen either before or after
2816*795d594fSAndroid Build Coastguard Worker     // remapping pre-compact pages to from-space. And depending on when it's
2817*795d594fSAndroid Build Coastguard Worker     // done, we have to invoke VisitRefsForCompaction() with or without
2818*795d594fSAndroid Build Coastguard Worker     // read-barrier.
2819*795d594fSAndroid Build Coastguard Worker     TimingLogger::ScopedTiming t2("(Paused)UpdateImmuneSpaces", GetTimings());
2820*795d594fSAndroid Build Coastguard Worker     accounting::CardTable* const card_table = heap_->GetCardTable();
2821*795d594fSAndroid Build Coastguard Worker     for (auto& space : immune_spaces_.GetSpaces()) {
2822*795d594fSAndroid Build Coastguard Worker       DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
2823*795d594fSAndroid Build Coastguard Worker       accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
2824*795d594fSAndroid Build Coastguard Worker       accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
2825*795d594fSAndroid Build Coastguard Worker       // Having zygote-space indicates that the first zygote fork has taken
2826*795d594fSAndroid Build Coastguard Worker       // place and that the classes/dex-caches in immune-spaces may have allocations
2827*795d594fSAndroid Build Coastguard Worker       // (ArtMethod/ArtField arrays, dex-cache array, etc.) in the
2828*795d594fSAndroid Build Coastguard Worker       // non-userfaultfd visited private-anonymous mappings. Visit them here.
2829*795d594fSAndroid Build Coastguard Worker       ImmuneSpaceUpdateObjVisitor visitor(this);
2830*795d594fSAndroid Build Coastguard Worker       if (table != nullptr) {
2831*795d594fSAndroid Build Coastguard Worker         table->ProcessCards();
2832*795d594fSAndroid Build Coastguard Worker         table->VisitObjects(ImmuneSpaceUpdateObjVisitor::Callback, &visitor);
2833*795d594fSAndroid Build Coastguard Worker       } else {
2834*795d594fSAndroid Build Coastguard Worker         WriterMutexLock wmu(thread_running_gc_, *Locks::heap_bitmap_lock_);
2835*795d594fSAndroid Build Coastguard Worker         card_table->Scan<false>(
2836*795d594fSAndroid Build Coastguard Worker             live_bitmap,
2837*795d594fSAndroid Build Coastguard Worker             space->Begin(),
2838*795d594fSAndroid Build Coastguard Worker             space->Limit(),
2839*795d594fSAndroid Build Coastguard Worker             visitor,
2840*795d594fSAndroid Build Coastguard Worker             accounting::CardTable::kCardDirty - 1);
2841*795d594fSAndroid Build Coastguard Worker       }
2842*795d594fSAndroid Build Coastguard Worker     }
2843*795d594fSAndroid Build Coastguard Worker   }
2844*795d594fSAndroid Build Coastguard Worker 
2845*795d594fSAndroid Build Coastguard Worker   {
2846*795d594fSAndroid Build Coastguard Worker     TimingLogger::ScopedTiming t2("(Paused)UpdateRoots", GetTimings());
2847*795d594fSAndroid Build Coastguard Worker     runtime->VisitConcurrentRoots(this, kVisitRootFlagAllRoots);
2848*795d594fSAndroid Build Coastguard Worker     runtime->VisitNonThreadRoots(this);
2849*795d594fSAndroid Build Coastguard Worker     {
2850*795d594fSAndroid Build Coastguard Worker       ClassLinker* linker = runtime->GetClassLinker();
2851*795d594fSAndroid Build Coastguard Worker       ClassLoaderRootsUpdater updater(this);
2852*795d594fSAndroid Build Coastguard Worker       ReaderMutexLock rmu(thread_running_gc_, *Locks::classlinker_classes_lock_);
2853*795d594fSAndroid Build Coastguard Worker       linker->VisitClassLoaders(&updater);
2854*795d594fSAndroid Build Coastguard Worker       linker->GetBootClassTable()->VisitRoots(updater, /*skip_classes=*/true);
2855*795d594fSAndroid Build Coastguard Worker     }
2856*795d594fSAndroid Build Coastguard Worker     SweepSystemWeaks(thread_running_gc_, runtime, /*paused=*/true);
2857*795d594fSAndroid Build Coastguard Worker 
2858*795d594fSAndroid Build Coastguard Worker     bool has_zygote_space = heap_->HasZygoteSpace();
2859*795d594fSAndroid Build Coastguard Worker     GcVisitedArenaPool* arena_pool =
2860*795d594fSAndroid Build Coastguard Worker         static_cast<GcVisitedArenaPool*>(runtime->GetLinearAllocArenaPool());
2861*795d594fSAndroid Build Coastguard Worker     // Update immune/pre-zygote class-tables in case class redefinition took
2862*795d594fSAndroid Build Coastguard Worker     // place. pre-zygote class-tables that are not in immune spaces are updated
2863*795d594fSAndroid Build Coastguard Worker     // below if we are in fallback-mode or if there is no zygote space. So in
2864*795d594fSAndroid Build Coastguard Worker     // that case only visit class-tables that are there in immune-spaces.
2865*795d594fSAndroid Build Coastguard Worker     UpdateClassTableClasses(runtime, uffd_ == kFallbackMode || !has_zygote_space);
2866*795d594fSAndroid Build Coastguard Worker 
2867*795d594fSAndroid Build Coastguard Worker     // Acquire arena-pool's lock, which should be released after the pool is
2868*795d594fSAndroid Build Coastguard Worker     // userfaultfd registered. This is to ensure that no new arenas are
2869*795d594fSAndroid Build Coastguard Worker     // allocated and used in between. Since they will not be captured in
2870*795d594fSAndroid Build Coastguard Worker     // linear_alloc_arenas_ below, we will miss updating their pages. The same
2871*795d594fSAndroid Build Coastguard Worker     // reason also applies to new allocations within the existing arena which
2872*795d594fSAndroid Build Coastguard Worker     // may change last_byte.
2873*795d594fSAndroid Build Coastguard Worker     // Since we are in a STW pause, this shouldn't happen anyways, but holding
2874*795d594fSAndroid Build Coastguard Worker     // the lock confirms it.
2875*795d594fSAndroid Build Coastguard Worker     // TODO (b/305779657): Replace with ExclusiveTryLock() and assert that it
2876*795d594fSAndroid Build Coastguard Worker     // doesn't fail once it is available for ReaderWriterMutex.
2877*795d594fSAndroid Build Coastguard Worker     WriterMutexLock pool_wmu(thread_running_gc_, arena_pool->GetLock());
2878*795d594fSAndroid Build Coastguard Worker 
2879*795d594fSAndroid Build Coastguard Worker     // TODO: Find out why it's not sufficient to visit native roots of immune
2880*795d594fSAndroid Build Coastguard Worker     // spaces, and why all the pre-zygote fork arenas have to be linearly updated.
2881*795d594fSAndroid Build Coastguard Worker     // Is it possible that some native root starts getting pointed to by some object
2882*795d594fSAndroid Build Coastguard Worker     // in moving space after fork? Or are we missing a write-barrier somewhere
2883*795d594fSAndroid Build Coastguard Worker     // when a native root is updated?
2884*795d594fSAndroid Build Coastguard Worker     auto arena_visitor = [this](uint8_t* page_begin, uint8_t* first_obj, size_t page_size)
2885*795d594fSAndroid Build Coastguard Worker                              REQUIRES_SHARED(Locks::mutator_lock_) {
2886*795d594fSAndroid Build Coastguard Worker                            LinearAllocPageUpdater updater(this);
2887*795d594fSAndroid Build Coastguard Worker                            if (first_obj != nullptr) {
2888*795d594fSAndroid Build Coastguard Worker                              updater.MultiObjectArena(page_begin, first_obj);
2889*795d594fSAndroid Build Coastguard Worker                            } else {
2890*795d594fSAndroid Build Coastguard Worker                              updater.SingleObjectArena(page_begin, page_size);
2891*795d594fSAndroid Build Coastguard Worker                            }
2892*795d594fSAndroid Build Coastguard Worker                          };
2893*795d594fSAndroid Build Coastguard Worker     if (uffd_ == kFallbackMode || (!has_zygote_space && runtime->IsZygote())) {
2894*795d594fSAndroid Build Coastguard Worker       // Besides fallback-mode, visit linear-alloc space in the pause for zygote
2895*795d594fSAndroid Build Coastguard Worker       // processes prior to first fork (that's when zygote space gets created).
2896*795d594fSAndroid Build Coastguard Worker       if (kIsDebugBuild && IsValidFd(uffd_)) {
2897*795d594fSAndroid Build Coastguard Worker         // All arenas allocated so far are expected to be pre-zygote fork.
2898*795d594fSAndroid Build Coastguard Worker         arena_pool->ForEachAllocatedArena(
2899*795d594fSAndroid Build Coastguard Worker             [](const TrackedArena& arena)
2900*795d594fSAndroid Build Coastguard Worker                 REQUIRES_SHARED(Locks::mutator_lock_) { CHECK(arena.IsPreZygoteForkArena()); });
2901*795d594fSAndroid Build Coastguard Worker       }
2902*795d594fSAndroid Build Coastguard Worker       arena_pool->VisitRoots(arena_visitor);
2903*795d594fSAndroid Build Coastguard Worker     } else {
2904*795d594fSAndroid Build Coastguard Worker       // Inform the arena-pool that compaction is going on. So the TrackedArena
2905*795d594fSAndroid Build Coastguard Worker       // objects corresponding to the arenas that are freed shouldn't be deleted
2906*795d594fSAndroid Build Coastguard Worker       // immediately. We will do that in FinishPhase(). This is to avoid ABA
2907*795d594fSAndroid Build Coastguard Worker       // problem.
2908*795d594fSAndroid Build Coastguard Worker       arena_pool->DeferArenaFreeing();
2909*795d594fSAndroid Build Coastguard Worker       arena_pool->ForEachAllocatedArena(
2910*795d594fSAndroid Build Coastguard Worker           [this, arena_visitor, has_zygote_space](const TrackedArena& arena)
2911*795d594fSAndroid Build Coastguard Worker               REQUIRES_SHARED(Locks::mutator_lock_) {
2912*795d594fSAndroid Build Coastguard Worker             // The pre-zygote fork arenas are not visited concurrently in the
2913*795d594fSAndroid Build Coastguard Worker             // zygote children processes. The native roots of the dirty objects
2914*795d594fSAndroid Build Coastguard Worker             // are visited during immune space visit below.
2915*795d594fSAndroid Build Coastguard Worker             if (!arena.IsPreZygoteForkArena()) {
2916*795d594fSAndroid Build Coastguard Worker               uint8_t* last_byte = arena.GetLastUsedByte();
2917*795d594fSAndroid Build Coastguard Worker               auto ret = linear_alloc_arenas_.insert({&arena, last_byte});
2918*795d594fSAndroid Build Coastguard Worker               CHECK(ret.second);
2919*795d594fSAndroid Build Coastguard Worker             } else if (!arena.IsSingleObjectArena() || !has_zygote_space) {
2920*795d594fSAndroid Build Coastguard Worker               // Pre-zygote class-table and intern-table don't need to be updated.
2921*795d594fSAndroid Build Coastguard Worker               // TODO: Explore the possibility of using /proc/self/pagemap to
2922*795d594fSAndroid Build Coastguard Worker               // fetch which pages in these arenas are private-dirty and then only
2923*795d594fSAndroid Build Coastguard Worker               // visit those pages. To optimize it further, we can keep all
2924*795d594fSAndroid Build Coastguard Worker               // pre-zygote arenas in a single memory range so that just one read
2925*795d594fSAndroid Build Coastguard Worker               // from pagemap is sufficient.
2926*795d594fSAndroid Build Coastguard Worker               arena.VisitRoots(arena_visitor);
2927*795d594fSAndroid Build Coastguard Worker             }
2928*795d594fSAndroid Build Coastguard Worker           });
2929*795d594fSAndroid Build Coastguard Worker     }
2930*795d594fSAndroid Build Coastguard Worker     // Release order wrt to mutator threads' SIGBUS handler load.
2931*795d594fSAndroid Build Coastguard Worker     sigbus_in_progress_count_[0].store(0, std::memory_order_relaxed);
2932*795d594fSAndroid Build Coastguard Worker     sigbus_in_progress_count_[1].store(0, std::memory_order_release);
2933*795d594fSAndroid Build Coastguard Worker     KernelPreparation();
2934*795d594fSAndroid Build Coastguard Worker   }
2935*795d594fSAndroid Build Coastguard Worker 
2936*795d594fSAndroid Build Coastguard Worker   UpdateNonMovingSpace();
2937*795d594fSAndroid Build Coastguard Worker   // fallback mode
2938*795d594fSAndroid Build Coastguard Worker   if (uffd_ == kFallbackMode) {
2939*795d594fSAndroid Build Coastguard Worker     CompactMovingSpace<kFallbackMode>(nullptr);
2940*795d594fSAndroid Build Coastguard Worker 
2941*795d594fSAndroid Build Coastguard Worker     int32_t freed_bytes = black_objs_slide_diff_;
2942*795d594fSAndroid Build Coastguard Worker     bump_pointer_space_->RecordFree(freed_objects_, freed_bytes);
2943*795d594fSAndroid Build Coastguard Worker     RecordFree(ObjectBytePair(freed_objects_, freed_bytes));
2944*795d594fSAndroid Build Coastguard Worker   } else {
2945*795d594fSAndroid Build Coastguard Worker     DCHECK_EQ(compaction_buffer_counter_.load(std::memory_order_relaxed), 1);
2946*795d594fSAndroid Build Coastguard Worker   }
2947*795d594fSAndroid Build Coastguard Worker   stack_low_addr_ = nullptr;
2948*795d594fSAndroid Build Coastguard Worker }
2949*795d594fSAndroid Build Coastguard Worker 
KernelPrepareRangeForUffd(uint8_t * to_addr,uint8_t * from_addr,size_t map_size)2950*795d594fSAndroid Build Coastguard Worker void MarkCompact::KernelPrepareRangeForUffd(uint8_t* to_addr, uint8_t* from_addr, size_t map_size) {
2951*795d594fSAndroid Build Coastguard Worker   int mremap_flags = MREMAP_MAYMOVE | MREMAP_FIXED;
2952*795d594fSAndroid Build Coastguard Worker   if (gHaveMremapDontunmap) {
2953*795d594fSAndroid Build Coastguard Worker     mremap_flags |= MREMAP_DONTUNMAP;
2954*795d594fSAndroid Build Coastguard Worker   }
2955*795d594fSAndroid Build Coastguard Worker 
2956*795d594fSAndroid Build Coastguard Worker   void* ret = mremap(to_addr, map_size, map_size, mremap_flags, from_addr);
2957*795d594fSAndroid Build Coastguard Worker   CHECK_EQ(ret, static_cast<void*>(from_addr))
2958*795d594fSAndroid Build Coastguard Worker       << "mremap to move pages failed: " << strerror(errno)
2959*795d594fSAndroid Build Coastguard Worker       << ". space-addr=" << reinterpret_cast<void*>(to_addr) << " size=" << PrettySize(map_size);
2960*795d594fSAndroid Build Coastguard Worker 
2961*795d594fSAndroid Build Coastguard Worker   if (!gHaveMremapDontunmap) {
2962*795d594fSAndroid Build Coastguard Worker     // Without MREMAP_DONTUNMAP the source mapping is unmapped by mremap. So mmap
2963*795d594fSAndroid Build Coastguard Worker     // the moving space again.
2964*795d594fSAndroid Build Coastguard Worker     int mmap_flags = MAP_FIXED;
2965*795d594fSAndroid Build Coastguard Worker     // Use MAP_FIXED_NOREPLACE so that if someone else reserves 'to_addr'
2966*795d594fSAndroid Build Coastguard Worker     // mapping in meantime, which can happen when MREMAP_DONTUNMAP isn't
2967*795d594fSAndroid Build Coastguard Worker     // available, to avoid unmapping someone else' mapping and then causing
2968*795d594fSAndroid Build Coastguard Worker     // crashes elsewhere.
2969*795d594fSAndroid Build Coastguard Worker     mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE;
2970*795d594fSAndroid Build Coastguard Worker     ret = mmap(to_addr, map_size, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
2971*795d594fSAndroid Build Coastguard Worker     CHECK_EQ(ret, static_cast<void*>(to_addr))
2972*795d594fSAndroid Build Coastguard Worker         << "mmap for moving space failed: " << strerror(errno);
2973*795d594fSAndroid Build Coastguard Worker   }
2974*795d594fSAndroid Build Coastguard Worker }
2975*795d594fSAndroid Build Coastguard Worker 
KernelPreparation()2976*795d594fSAndroid Build Coastguard Worker void MarkCompact::KernelPreparation() {
2977*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t("(Paused)KernelPreparation", GetTimings());
2978*795d594fSAndroid Build Coastguard Worker   uint8_t* moving_space_begin = bump_pointer_space_->Begin();
2979*795d594fSAndroid Build Coastguard Worker   size_t moving_space_size = bump_pointer_space_->Capacity();
2980*795d594fSAndroid Build Coastguard Worker   size_t moving_space_register_sz = (moving_first_objs_count_ + black_page_count_) * gPageSize;
2981*795d594fSAndroid Build Coastguard Worker   DCHECK_LE(moving_space_register_sz, moving_space_size);
2982*795d594fSAndroid Build Coastguard Worker 
2983*795d594fSAndroid Build Coastguard Worker   KernelPrepareRangeForUffd(moving_space_begin, from_space_begin_, moving_space_size);
2984*795d594fSAndroid Build Coastguard Worker 
2985*795d594fSAndroid Build Coastguard Worker   if (IsValidFd(uffd_)) {
2986*795d594fSAndroid Build Coastguard Worker     if (moving_space_register_sz > 0) {
2987*795d594fSAndroid Build Coastguard Worker       // mremap clears 'anon_vma' field of anonymous mappings. If we
2988*795d594fSAndroid Build Coastguard Worker       // uffd-register only the used portion of the space, then the vma gets
2989*795d594fSAndroid Build Coastguard Worker       // split (between used and unused portions) and as soon as pages are
2990*795d594fSAndroid Build Coastguard Worker       // mapped to the vmas, they get different `anon_vma` assigned, which
2991*795d594fSAndroid Build Coastguard Worker       // ensures that the two vmas cannot merge after we uffd-unregister the
2992*795d594fSAndroid Build Coastguard Worker       // used portion. OTOH, registering the entire space avoids the split, but
2993*795d594fSAndroid Build Coastguard Worker       // unnecessarily causes userfaults on allocations.
2994*795d594fSAndroid Build Coastguard Worker       // By faulting-in a page we force the kernel to allocate 'anon_vma' *before*
2995*795d594fSAndroid Build Coastguard Worker       // the vma-split in uffd-register. This ensures that when we unregister
2996*795d594fSAndroid Build Coastguard Worker       // the used portion after compaction, the two split vmas merge. This is
2997*795d594fSAndroid Build Coastguard Worker       // necessary for the mremap of the next GC cycle to not fail due to having
2998*795d594fSAndroid Build Coastguard Worker       // more than one vma in the source range.
2999*795d594fSAndroid Build Coastguard Worker       //
3000*795d594fSAndroid Build Coastguard Worker       // Fault in address aligned to PMD size so that in case THP is enabled,
3001*795d594fSAndroid Build Coastguard Worker       // we don't mistakenly fault a page in beginning portion that will be
3002*795d594fSAndroid Build Coastguard Worker       // registered with uffd. If the alignment takes us beyond the space, then
3003*795d594fSAndroid Build Coastguard Worker       // fault the first page and madvise it.
3004*795d594fSAndroid Build Coastguard Worker       size_t pmd_size = Heap::GetPMDSize();
3005*795d594fSAndroid Build Coastguard Worker       uint8_t* fault_in_addr = AlignUp(moving_space_begin + moving_space_register_sz, pmd_size);
3006*795d594fSAndroid Build Coastguard Worker       if (bump_pointer_space_->Contains(reinterpret_cast<mirror::Object*>(fault_in_addr))) {
3007*795d594fSAndroid Build Coastguard Worker         *const_cast<volatile uint8_t*>(fault_in_addr) = 0;
3008*795d594fSAndroid Build Coastguard Worker       } else {
3009*795d594fSAndroid Build Coastguard Worker         DCHECK_ALIGNED_PARAM(moving_space_begin, gPageSize);
3010*795d594fSAndroid Build Coastguard Worker         *const_cast<volatile uint8_t*>(moving_space_begin) = 0;
3011*795d594fSAndroid Build Coastguard Worker         madvise(moving_space_begin, pmd_size, MADV_DONTNEED);
3012*795d594fSAndroid Build Coastguard Worker       }
3013*795d594fSAndroid Build Coastguard Worker       // Register the moving space with userfaultfd.
3014*795d594fSAndroid Build Coastguard Worker       RegisterUffd(moving_space_begin, moving_space_register_sz);
3015*795d594fSAndroid Build Coastguard Worker       // madvise ensures that if any page gets mapped (only possible if some
3016*795d594fSAndroid Build Coastguard Worker       // thread is reading the page(s) without trying to make sense as we hold
3017*795d594fSAndroid Build Coastguard Worker       // mutator-lock exclusively) between mremap and uffd-registration, then
3018*795d594fSAndroid Build Coastguard Worker       // it gets zapped so that the map is empty and ready for userfaults. If
3019*795d594fSAndroid Build Coastguard Worker       // we could mremap after uffd-registration (like in case of linear-alloc
3020*795d594fSAndroid Build Coastguard Worker       // space below) then we wouldn't need it. But since we don't register the
3021*795d594fSAndroid Build Coastguard Worker       // entire space, we can't do that.
3022*795d594fSAndroid Build Coastguard Worker       madvise(moving_space_begin, moving_space_register_sz, MADV_DONTNEED);
3023*795d594fSAndroid Build Coastguard Worker     }
3024*795d594fSAndroid Build Coastguard Worker     // Prepare linear-alloc for concurrent compaction.
3025*795d594fSAndroid Build Coastguard Worker     for (auto& data : linear_alloc_spaces_data_) {
3026*795d594fSAndroid Build Coastguard Worker       DCHECK_EQ(static_cast<ssize_t>(data.shadow_.Size()), data.end_ - data.begin_);
3027*795d594fSAndroid Build Coastguard Worker       // There could be threads running in suspended mode when the compaction
3028*795d594fSAndroid Build Coastguard Worker       // pause is being executed. In order to make the userfaultfd setup atomic,
3029*795d594fSAndroid Build Coastguard Worker       // the registration has to be done *before* moving the pages to shadow map.
3030*795d594fSAndroid Build Coastguard Worker       RegisterUffd(data.begin_, data.shadow_.Size());
3031*795d594fSAndroid Build Coastguard Worker       KernelPrepareRangeForUffd(data.begin_, data.shadow_.Begin(), data.shadow_.Size());
3032*795d594fSAndroid Build Coastguard Worker     }
3033*795d594fSAndroid Build Coastguard Worker   }
3034*795d594fSAndroid Build Coastguard Worker }
3035*795d594fSAndroid Build Coastguard Worker 
SigbusHandler(siginfo_t * info)3036*795d594fSAndroid Build Coastguard Worker bool MarkCompact::SigbusHandler(siginfo_t* info) {
3037*795d594fSAndroid Build Coastguard Worker   class ScopedInProgressCount {
3038*795d594fSAndroid Build Coastguard Worker    public:
3039*795d594fSAndroid Build Coastguard Worker     explicit ScopedInProgressCount(MarkCompact* collector) : collector_(collector) {
3040*795d594fSAndroid Build Coastguard Worker       // Increment the count only if compaction is not done yet.
3041*795d594fSAndroid Build Coastguard Worker       for (idx_ = 0; idx_ < 2; idx_++) {
3042*795d594fSAndroid Build Coastguard Worker         SigbusCounterType prev =
3043*795d594fSAndroid Build Coastguard Worker             collector_->sigbus_in_progress_count_[idx_].load(std::memory_order_relaxed);
3044*795d594fSAndroid Build Coastguard Worker         while ((prev & kSigbusCounterCompactionDoneMask) == 0) {
3045*795d594fSAndroid Build Coastguard Worker           if (collector_->sigbus_in_progress_count_[idx_].compare_exchange_strong(
3046*795d594fSAndroid Build Coastguard Worker                   prev, prev + 1, std::memory_order_acquire)) {
3047*795d594fSAndroid Build Coastguard Worker             DCHECK_LT(prev, kSigbusCounterCompactionDoneMask - 1);
3048*795d594fSAndroid Build Coastguard Worker             return;
3049*795d594fSAndroid Build Coastguard Worker           }
3050*795d594fSAndroid Build Coastguard Worker         }
3051*795d594fSAndroid Build Coastguard Worker       }
3052*795d594fSAndroid Build Coastguard Worker     }
3053*795d594fSAndroid Build Coastguard Worker 
3054*795d594fSAndroid Build Coastguard Worker     bool TolerateEnoent() const { return idx_ == 1; }
3055*795d594fSAndroid Build Coastguard Worker 
3056*795d594fSAndroid Build Coastguard Worker     bool IsCompactionDone() const { return idx_ == 2; }
3057*795d594fSAndroid Build Coastguard Worker 
3058*795d594fSAndroid Build Coastguard Worker     ~ScopedInProgressCount() {
3059*795d594fSAndroid Build Coastguard Worker       if (idx_ < 2) {
3060*795d594fSAndroid Build Coastguard Worker         collector_->sigbus_in_progress_count_[idx_].fetch_sub(1, std::memory_order_release);
3061*795d594fSAndroid Build Coastguard Worker       }
3062*795d594fSAndroid Build Coastguard Worker     }
3063*795d594fSAndroid Build Coastguard Worker 
3064*795d594fSAndroid Build Coastguard Worker    private:
3065*795d594fSAndroid Build Coastguard Worker     MarkCompact* const collector_;
3066*795d594fSAndroid Build Coastguard Worker     uint8_t idx_;
3067*795d594fSAndroid Build Coastguard Worker   };
3068*795d594fSAndroid Build Coastguard Worker 
3069*795d594fSAndroid Build Coastguard Worker   if (info->si_code != BUS_ADRERR) {
3070*795d594fSAndroid Build Coastguard Worker     // Userfaultfd raises SIGBUS with BUS_ADRERR. All other causes can't be
3071*795d594fSAndroid Build Coastguard Worker     // handled here.
3072*795d594fSAndroid Build Coastguard Worker     return false;
3073*795d594fSAndroid Build Coastguard Worker   }
3074*795d594fSAndroid Build Coastguard Worker 
3075*795d594fSAndroid Build Coastguard Worker   ScopedInProgressCount spc(this);
3076*795d594fSAndroid Build Coastguard Worker   uint8_t* fault_page = AlignDown(reinterpret_cast<uint8_t*>(info->si_addr), gPageSize);
3077*795d594fSAndroid Build Coastguard Worker   if (!spc.IsCompactionDone()) {
3078*795d594fSAndroid Build Coastguard Worker     if (HasAddress(reinterpret_cast<mirror::Object*>(fault_page))) {
3079*795d594fSAndroid Build Coastguard Worker       Thread* self = Thread::Current();
3080*795d594fSAndroid Build Coastguard Worker       Locks::mutator_lock_->AssertSharedHeld(self);
3081*795d594fSAndroid Build Coastguard Worker       size_t nr_moving_space_used_pages = moving_first_objs_count_ + black_page_count_;
3082*795d594fSAndroid Build Coastguard Worker       ConcurrentlyProcessMovingPage(fault_page,
3083*795d594fSAndroid Build Coastguard Worker                                     self->GetThreadLocalGcBuffer(),
3084*795d594fSAndroid Build Coastguard Worker                                     nr_moving_space_used_pages,
3085*795d594fSAndroid Build Coastguard Worker                                     spc.TolerateEnoent());
3086*795d594fSAndroid Build Coastguard Worker       return true;
3087*795d594fSAndroid Build Coastguard Worker     } else {
3088*795d594fSAndroid Build Coastguard Worker       // Find the linear-alloc space containing fault-addr
3089*795d594fSAndroid Build Coastguard Worker       for (auto& data : linear_alloc_spaces_data_) {
3090*795d594fSAndroid Build Coastguard Worker         if (data.begin_ <= fault_page && data.end_ > fault_page) {
3091*795d594fSAndroid Build Coastguard Worker           ConcurrentlyProcessLinearAllocPage(fault_page, spc.TolerateEnoent());
3092*795d594fSAndroid Build Coastguard Worker           return true;
3093*795d594fSAndroid Build Coastguard Worker         }
3094*795d594fSAndroid Build Coastguard Worker       }
3095*795d594fSAndroid Build Coastguard Worker       // Fault address doesn't belong to either moving-space or linear-alloc.
3096*795d594fSAndroid Build Coastguard Worker       return false;
3097*795d594fSAndroid Build Coastguard Worker     }
3098*795d594fSAndroid Build Coastguard Worker   } else {
3099*795d594fSAndroid Build Coastguard Worker     // We may spuriously get SIGBUS fault, which was initiated before the
3100*795d594fSAndroid Build Coastguard Worker     // compaction was finished, but ends up here. In that case, if the fault
3101*795d594fSAndroid Build Coastguard Worker     // address is valid then consider it handled.
3102*795d594fSAndroid Build Coastguard Worker     return HasAddress(reinterpret_cast<mirror::Object*>(fault_page)) ||
3103*795d594fSAndroid Build Coastguard Worker            linear_alloc_spaces_data_.end() !=
3104*795d594fSAndroid Build Coastguard Worker                std::find_if(linear_alloc_spaces_data_.begin(),
3105*795d594fSAndroid Build Coastguard Worker                             linear_alloc_spaces_data_.end(),
3106*795d594fSAndroid Build Coastguard Worker                             [fault_page](const LinearAllocSpaceData& data) {
3107*795d594fSAndroid Build Coastguard Worker                               return data.begin_ <= fault_page && data.end_ > fault_page;
3108*795d594fSAndroid Build Coastguard Worker                             });
3109*795d594fSAndroid Build Coastguard Worker   }
3110*795d594fSAndroid Build Coastguard Worker }
3111*795d594fSAndroid Build Coastguard Worker 
ConcurrentlyProcessMovingPage(uint8_t * fault_page,uint8_t * buf,size_t nr_moving_space_used_pages,bool tolerate_enoent)3112*795d594fSAndroid Build Coastguard Worker void MarkCompact::ConcurrentlyProcessMovingPage(uint8_t* fault_page,
3113*795d594fSAndroid Build Coastguard Worker                                                 uint8_t* buf,
3114*795d594fSAndroid Build Coastguard Worker                                                 size_t nr_moving_space_used_pages,
3115*795d594fSAndroid Build Coastguard Worker                                                 bool tolerate_enoent) {
3116*795d594fSAndroid Build Coastguard Worker   Thread* self = Thread::Current();
3117*795d594fSAndroid Build Coastguard Worker   uint8_t* unused_space_begin =
3118*795d594fSAndroid Build Coastguard Worker       bump_pointer_space_->Begin() + nr_moving_space_used_pages * gPageSize;
3119*795d594fSAndroid Build Coastguard Worker   DCHECK(IsAlignedParam(unused_space_begin, gPageSize));
3120*795d594fSAndroid Build Coastguard Worker   if (fault_page >= unused_space_begin) {
3121*795d594fSAndroid Build Coastguard Worker     // There is a race which allows more than one thread to install a
3122*795d594fSAndroid Build Coastguard Worker     // zero-page. But we can tolerate that. So absorb the EEXIST returned by
3123*795d594fSAndroid Build Coastguard Worker     // the ioctl and move on.
3124*795d594fSAndroid Build Coastguard Worker     ZeropageIoctl(fault_page, gPageSize, /*tolerate_eexist=*/true, tolerate_enoent);
3125*795d594fSAndroid Build Coastguard Worker     return;
3126*795d594fSAndroid Build Coastguard Worker   }
3127*795d594fSAndroid Build Coastguard Worker   size_t page_idx = DivideByPageSize(fault_page - bump_pointer_space_->Begin());
3128*795d594fSAndroid Build Coastguard Worker   DCHECK_LT(page_idx, moving_first_objs_count_ + black_page_count_);
3129*795d594fSAndroid Build Coastguard Worker   mirror::Object* first_obj = first_objs_moving_space_[page_idx].AsMirrorPtr();
3130*795d594fSAndroid Build Coastguard Worker   if (first_obj == nullptr) {
3131*795d594fSAndroid Build Coastguard Worker     DCHECK_GT(fault_page, post_compact_end_);
3132*795d594fSAndroid Build Coastguard Worker     // Install zero-page in the entire remaining tlab to avoid multiple ioctl invocations.
3133*795d594fSAndroid Build Coastguard Worker     uint8_t* end = AlignDown(self->GetTlabEnd(), gPageSize);
3134*795d594fSAndroid Build Coastguard Worker     if (fault_page < self->GetTlabStart() || fault_page >= end) {
3135*795d594fSAndroid Build Coastguard Worker       end = fault_page + gPageSize;
3136*795d594fSAndroid Build Coastguard Worker     }
3137*795d594fSAndroid Build Coastguard Worker     size_t end_idx = page_idx + DivideByPageSize(end - fault_page);
3138*795d594fSAndroid Build Coastguard Worker     size_t length = 0;
3139*795d594fSAndroid Build Coastguard Worker     for (size_t idx = page_idx; idx < end_idx; idx++, length += gPageSize) {
3140*795d594fSAndroid Build Coastguard Worker       uint32_t cur_state = moving_pages_status_[idx].load(std::memory_order_acquire);
3141*795d594fSAndroid Build Coastguard Worker       if (cur_state != static_cast<uint8_t>(PageState::kUnprocessed)) {
3142*795d594fSAndroid Build Coastguard Worker         DCHECK_EQ(cur_state, static_cast<uint8_t>(PageState::kProcessedAndMapped));
3143*795d594fSAndroid Build Coastguard Worker         break;
3144*795d594fSAndroid Build Coastguard Worker       }
3145*795d594fSAndroid Build Coastguard Worker     }
3146*795d594fSAndroid Build Coastguard Worker     if (length > 0) {
3147*795d594fSAndroid Build Coastguard Worker       length = ZeropageIoctl(fault_page, length, /*tolerate_eexist=*/true, tolerate_enoent);
3148*795d594fSAndroid Build Coastguard Worker       for (size_t len = 0, idx = page_idx; len < length; idx++, len += gPageSize) {
3149*795d594fSAndroid Build Coastguard Worker         moving_pages_status_[idx].store(static_cast<uint8_t>(PageState::kProcessedAndMapped),
3150*795d594fSAndroid Build Coastguard Worker                                         std::memory_order_release);
3151*795d594fSAndroid Build Coastguard Worker       }
3152*795d594fSAndroid Build Coastguard Worker     }
3153*795d594fSAndroid Build Coastguard Worker     return;
3154*795d594fSAndroid Build Coastguard Worker   }
3155*795d594fSAndroid Build Coastguard Worker 
3156*795d594fSAndroid Build Coastguard Worker   uint32_t raw_state = moving_pages_status_[page_idx].load(std::memory_order_acquire);
3157*795d594fSAndroid Build Coastguard Worker   uint32_t backoff_count = 0;
3158*795d594fSAndroid Build Coastguard Worker   PageState state;
3159*795d594fSAndroid Build Coastguard Worker   while (true) {
3160*795d594fSAndroid Build Coastguard Worker     state = GetPageStateFromWord(raw_state);
3161*795d594fSAndroid Build Coastguard Worker     if (state == PageState::kProcessing || state == PageState::kMutatorProcessing ||
3162*795d594fSAndroid Build Coastguard Worker         state == PageState::kProcessingAndMapping || state == PageState::kProcessedAndMapping) {
3163*795d594fSAndroid Build Coastguard Worker       // Wait for the page to be mapped (by gc-thread or some mutator) before returning.
3164*795d594fSAndroid Build Coastguard Worker       // The wait is not expected to be long as the read state indicates that the other
3165*795d594fSAndroid Build Coastguard Worker       // thread is actively working on the page.
3166*795d594fSAndroid Build Coastguard Worker       BackOff(backoff_count++);
3167*795d594fSAndroid Build Coastguard Worker       raw_state = moving_pages_status_[page_idx].load(std::memory_order_acquire);
3168*795d594fSAndroid Build Coastguard Worker     } else if (state == PageState::kProcessedAndMapped) {
3169*795d594fSAndroid Build Coastguard Worker       // Nothing to do.
3170*795d594fSAndroid Build Coastguard Worker       break;
3171*795d594fSAndroid Build Coastguard Worker     } else {
3172*795d594fSAndroid Build Coastguard Worker       // Acquire order to ensure we don't start writing to a page, which could
3173*795d594fSAndroid Build Coastguard Worker       // be shared, before the CAS is successful.
3174*795d594fSAndroid Build Coastguard Worker       if (state == PageState::kUnprocessed &&
3175*795d594fSAndroid Build Coastguard Worker           moving_pages_status_[page_idx].compare_exchange_strong(
3176*795d594fSAndroid Build Coastguard Worker               raw_state,
3177*795d594fSAndroid Build Coastguard Worker               static_cast<uint8_t>(PageState::kMutatorProcessing),
3178*795d594fSAndroid Build Coastguard Worker               std::memory_order_acquire)) {
3179*795d594fSAndroid Build Coastguard Worker         if (fault_page < black_dense_end_) {
3180*795d594fSAndroid Build Coastguard Worker           UpdateNonMovingPage(first_obj, fault_page, from_space_slide_diff_, moving_space_bitmap_);
3181*795d594fSAndroid Build Coastguard Worker           buf = fault_page + from_space_slide_diff_;
3182*795d594fSAndroid Build Coastguard Worker         } else {
3183*795d594fSAndroid Build Coastguard Worker           if (UNLIKELY(buf == nullptr)) {
3184*795d594fSAndroid Build Coastguard Worker             uint16_t idx = compaction_buffer_counter_.fetch_add(1, std::memory_order_relaxed);
3185*795d594fSAndroid Build Coastguard Worker             // The buffer-map is one page bigger as the first buffer is used by GC-thread.
3186*795d594fSAndroid Build Coastguard Worker             CHECK_LE(idx, kMutatorCompactionBufferCount);
3187*795d594fSAndroid Build Coastguard Worker             buf = compaction_buffers_map_.Begin() + idx * gPageSize;
3188*795d594fSAndroid Build Coastguard Worker             DCHECK(compaction_buffers_map_.HasAddress(buf));
3189*795d594fSAndroid Build Coastguard Worker             self->SetThreadLocalGcBuffer(buf);
3190*795d594fSAndroid Build Coastguard Worker           }
3191*795d594fSAndroid Build Coastguard Worker 
3192*795d594fSAndroid Build Coastguard Worker           if (fault_page < post_compact_end_) {
3193*795d594fSAndroid Build Coastguard Worker             // The page has to be compacted.
3194*795d594fSAndroid Build Coastguard Worker             CompactPage(first_obj,
3195*795d594fSAndroid Build Coastguard Worker                         pre_compact_offset_moving_space_[page_idx],
3196*795d594fSAndroid Build Coastguard Worker                         buf,
3197*795d594fSAndroid Build Coastguard Worker                         /*needs_memset_zero=*/true);
3198*795d594fSAndroid Build Coastguard Worker           } else {
3199*795d594fSAndroid Build Coastguard Worker             DCHECK_NE(first_obj, nullptr);
3200*795d594fSAndroid Build Coastguard Worker             DCHECK_GT(pre_compact_offset_moving_space_[page_idx], 0u);
3201*795d594fSAndroid Build Coastguard Worker             uint8_t* pre_compact_page = black_allocations_begin_ + (fault_page - post_compact_end_);
3202*795d594fSAndroid Build Coastguard Worker             uint32_t first_chunk_size = black_alloc_pages_first_chunk_size_[page_idx];
3203*795d594fSAndroid Build Coastguard Worker             mirror::Object* next_page_first_obj = nullptr;
3204*795d594fSAndroid Build Coastguard Worker             if (page_idx + 1 < moving_first_objs_count_ + black_page_count_) {
3205*795d594fSAndroid Build Coastguard Worker               next_page_first_obj = first_objs_moving_space_[page_idx + 1].AsMirrorPtr();
3206*795d594fSAndroid Build Coastguard Worker             }
3207*795d594fSAndroid Build Coastguard Worker             DCHECK(IsAlignedParam(pre_compact_page, gPageSize));
3208*795d594fSAndroid Build Coastguard Worker             SlideBlackPage(first_obj,
3209*795d594fSAndroid Build Coastguard Worker                            next_page_first_obj,
3210*795d594fSAndroid Build Coastguard Worker                            first_chunk_size,
3211*795d594fSAndroid Build Coastguard Worker                            pre_compact_page,
3212*795d594fSAndroid Build Coastguard Worker                            buf,
3213*795d594fSAndroid Build Coastguard Worker                            /*needs_memset_zero=*/true);
3214*795d594fSAndroid Build Coastguard Worker           }
3215*795d594fSAndroid Build Coastguard Worker         }
3216*795d594fSAndroid Build Coastguard Worker         // Nobody else would simultaneously modify this page's state so an
3217*795d594fSAndroid Build Coastguard Worker         // atomic store is sufficient. Use 'release' order to guarantee that
3218*795d594fSAndroid Build Coastguard Worker         // loads/stores to the page are finished before this store. Since the
3219*795d594fSAndroid Build Coastguard Worker         // mutator used its own buffer for the processing, there is no reason to
3220*795d594fSAndroid Build Coastguard Worker         // put its index in the status of the page. Also, the mutator is going
3221*795d594fSAndroid Build Coastguard Worker         // to immediately map the page, so that info is not needed.
3222*795d594fSAndroid Build Coastguard Worker         moving_pages_status_[page_idx].store(static_cast<uint8_t>(PageState::kProcessedAndMapping),
3223*795d594fSAndroid Build Coastguard Worker                                              std::memory_order_release);
3224*795d594fSAndroid Build Coastguard Worker         CopyIoctl(fault_page, buf, gPageSize, /*return_on_contention=*/false, tolerate_enoent);
3225*795d594fSAndroid Build Coastguard Worker         // Store is sufficient as no other thread modifies the status at this stage.
3226*795d594fSAndroid Build Coastguard Worker         moving_pages_status_[page_idx].store(static_cast<uint8_t>(PageState::kProcessedAndMapped),
3227*795d594fSAndroid Build Coastguard Worker                                              std::memory_order_release);
3228*795d594fSAndroid Build Coastguard Worker         break;
3229*795d594fSAndroid Build Coastguard Worker       }
3230*795d594fSAndroid Build Coastguard Worker       state = GetPageStateFromWord(raw_state);
3231*795d594fSAndroid Build Coastguard Worker       if (state == PageState::kProcessed) {
3232*795d594fSAndroid Build Coastguard Worker         size_t arr_len = moving_first_objs_count_ + black_page_count_;
3233*795d594fSAndroid Build Coastguard Worker         // The page is processed but not mapped. We should map it. The release
3234*795d594fSAndroid Build Coastguard Worker         // order used in MapMovingSpacePages will ensure that the increment to
3235*795d594fSAndroid Build Coastguard Worker         // moving_compaction_in_progress is done first.
3236*795d594fSAndroid Build Coastguard Worker         if (MapMovingSpacePages(page_idx,
3237*795d594fSAndroid Build Coastguard Worker                                 arr_len,
3238*795d594fSAndroid Build Coastguard Worker                                 /*from_fault=*/true,
3239*795d594fSAndroid Build Coastguard Worker                                 /*return_on_contention=*/false,
3240*795d594fSAndroid Build Coastguard Worker                                 tolerate_enoent) > 0) {
3241*795d594fSAndroid Build Coastguard Worker           break;
3242*795d594fSAndroid Build Coastguard Worker         }
3243*795d594fSAndroid Build Coastguard Worker         raw_state = moving_pages_status_[page_idx].load(std::memory_order_acquire);
3244*795d594fSAndroid Build Coastguard Worker       }
3245*795d594fSAndroid Build Coastguard Worker     }
3246*795d594fSAndroid Build Coastguard Worker   }
3247*795d594fSAndroid Build Coastguard Worker }
3248*795d594fSAndroid Build Coastguard Worker 
MapUpdatedLinearAllocPages(uint8_t * start_page,uint8_t * start_shadow_page,Atomic<PageState> * state,size_t length,bool free_pages,bool single_ioctl,bool tolerate_enoent)3249*795d594fSAndroid Build Coastguard Worker bool MarkCompact::MapUpdatedLinearAllocPages(uint8_t* start_page,
3250*795d594fSAndroid Build Coastguard Worker                                              uint8_t* start_shadow_page,
3251*795d594fSAndroid Build Coastguard Worker                                              Atomic<PageState>* state,
3252*795d594fSAndroid Build Coastguard Worker                                              size_t length,
3253*795d594fSAndroid Build Coastguard Worker                                              bool free_pages,
3254*795d594fSAndroid Build Coastguard Worker                                              bool single_ioctl,
3255*795d594fSAndroid Build Coastguard Worker                                              bool tolerate_enoent) {
3256*795d594fSAndroid Build Coastguard Worker   DCHECK_ALIGNED_PARAM(length, gPageSize);
3257*795d594fSAndroid Build Coastguard Worker   Atomic<PageState>* madv_state = state;
3258*795d594fSAndroid Build Coastguard Worker   size_t madv_len = length;
3259*795d594fSAndroid Build Coastguard Worker   uint8_t* madv_start = start_shadow_page;
3260*795d594fSAndroid Build Coastguard Worker   bool check_state_for_madv = false;
3261*795d594fSAndroid Build Coastguard Worker   uint8_t* end_page = start_page + length;
3262*795d594fSAndroid Build Coastguard Worker   while (start_page < end_page) {
3263*795d594fSAndroid Build Coastguard Worker     size_t map_len = 0;
3264*795d594fSAndroid Build Coastguard Worker     // Find a contiguous range of pages that we can map in single ioctl.
3265*795d594fSAndroid Build Coastguard Worker     for (Atomic<PageState>* cur_state = state;
3266*795d594fSAndroid Build Coastguard Worker          map_len < length && cur_state->load(std::memory_order_acquire) == PageState::kProcessed;
3267*795d594fSAndroid Build Coastguard Worker          map_len += gPageSize, cur_state++) {
3268*795d594fSAndroid Build Coastguard Worker       // No body.
3269*795d594fSAndroid Build Coastguard Worker     }
3270*795d594fSAndroid Build Coastguard Worker 
3271*795d594fSAndroid Build Coastguard Worker     if (map_len == 0) {
3272*795d594fSAndroid Build Coastguard Worker       if (single_ioctl) {
3273*795d594fSAndroid Build Coastguard Worker         return state->load(std::memory_order_relaxed) == PageState::kProcessedAndMapped;
3274*795d594fSAndroid Build Coastguard Worker       }
3275*795d594fSAndroid Build Coastguard Worker       // Skip all the pages that this thread can't map.
3276*795d594fSAndroid Build Coastguard Worker       while (length > 0) {
3277*795d594fSAndroid Build Coastguard Worker         PageState s = state->load(std::memory_order_relaxed);
3278*795d594fSAndroid Build Coastguard Worker         if (s == PageState::kProcessed) {
3279*795d594fSAndroid Build Coastguard Worker           break;
3280*795d594fSAndroid Build Coastguard Worker         }
3281*795d594fSAndroid Build Coastguard Worker         // If we find any page which is being processed or mapped (only possible by a mutator(s))
3282*795d594fSAndroid Build Coastguard Worker         // then we need to re-check the page-state and, if needed, wait for the state to change
3283*795d594fSAndroid Build Coastguard Worker         // to 'mapped', before the shadow pages are reclaimed.
3284*795d594fSAndroid Build Coastguard Worker         check_state_for_madv |= s > PageState::kUnprocessed && s < PageState::kProcessedAndMapped;
3285*795d594fSAndroid Build Coastguard Worker         state++;
3286*795d594fSAndroid Build Coastguard Worker         length -= gPageSize;
3287*795d594fSAndroid Build Coastguard Worker         start_shadow_page += gPageSize;
3288*795d594fSAndroid Build Coastguard Worker         start_page += gPageSize;
3289*795d594fSAndroid Build Coastguard Worker       }
3290*795d594fSAndroid Build Coastguard Worker     } else {
3291*795d594fSAndroid Build Coastguard Worker       map_len = CopyIoctl(start_page,
3292*795d594fSAndroid Build Coastguard Worker                           start_shadow_page,
3293*795d594fSAndroid Build Coastguard Worker                           map_len,
3294*795d594fSAndroid Build Coastguard Worker                           /*return_on_contention=*/false,
3295*795d594fSAndroid Build Coastguard Worker                           tolerate_enoent);
3296*795d594fSAndroid Build Coastguard Worker       DCHECK_NE(map_len, 0u);
3297*795d594fSAndroid Build Coastguard Worker       // Declare that the pages are ready to be accessed. Store is sufficient
3298*795d594fSAndroid Build Coastguard Worker       // as any thread will be storing the same value.
3299*795d594fSAndroid Build Coastguard Worker       for (size_t l = 0; l < map_len; l += gPageSize, state++) {
3300*795d594fSAndroid Build Coastguard Worker         PageState s = state->load(std::memory_order_relaxed);
3301*795d594fSAndroid Build Coastguard Worker         DCHECK(s == PageState::kProcessed || s == PageState::kProcessedAndMapped) << "state:" << s;
3302*795d594fSAndroid Build Coastguard Worker         state->store(PageState::kProcessedAndMapped, std::memory_order_release);
3303*795d594fSAndroid Build Coastguard Worker       }
3304*795d594fSAndroid Build Coastguard Worker       if (single_ioctl) {
3305*795d594fSAndroid Build Coastguard Worker         break;
3306*795d594fSAndroid Build Coastguard Worker       }
3307*795d594fSAndroid Build Coastguard Worker       start_page += map_len;
3308*795d594fSAndroid Build Coastguard Worker       start_shadow_page += map_len;
3309*795d594fSAndroid Build Coastguard Worker       length -= map_len;
3310*795d594fSAndroid Build Coastguard Worker       // state is already updated above.
3311*795d594fSAndroid Build Coastguard Worker     }
3312*795d594fSAndroid Build Coastguard Worker   }
3313*795d594fSAndroid Build Coastguard Worker   if (free_pages) {
3314*795d594fSAndroid Build Coastguard Worker     if (check_state_for_madv) {
3315*795d594fSAndroid Build Coastguard Worker       // Wait until all the pages are mapped before releasing them. This is needed to be
3316*795d594fSAndroid Build Coastguard Worker       // checked only if some mutators were found to be concurrently mapping pages earlier.
3317*795d594fSAndroid Build Coastguard Worker       for (size_t l = 0; l < madv_len; l += gPageSize, madv_state++) {
3318*795d594fSAndroid Build Coastguard Worker         uint32_t backoff_count = 0;
3319*795d594fSAndroid Build Coastguard Worker         PageState s = madv_state->load(std::memory_order_relaxed);
3320*795d594fSAndroid Build Coastguard Worker         while (s > PageState::kUnprocessed && s < PageState::kProcessedAndMapped) {
3321*795d594fSAndroid Build Coastguard Worker           BackOff(backoff_count++);
3322*795d594fSAndroid Build Coastguard Worker           s = madv_state->load(std::memory_order_relaxed);
3323*795d594fSAndroid Build Coastguard Worker         }
3324*795d594fSAndroid Build Coastguard Worker       }
3325*795d594fSAndroid Build Coastguard Worker     }
3326*795d594fSAndroid Build Coastguard Worker     ZeroAndReleaseMemory(madv_start, madv_len);
3327*795d594fSAndroid Build Coastguard Worker   }
3328*795d594fSAndroid Build Coastguard Worker   return true;
3329*795d594fSAndroid Build Coastguard Worker }
3330*795d594fSAndroid Build Coastguard Worker 
ConcurrentlyProcessLinearAllocPage(uint8_t * fault_page,bool tolerate_enoent)3331*795d594fSAndroid Build Coastguard Worker void MarkCompact::ConcurrentlyProcessLinearAllocPage(uint8_t* fault_page, bool tolerate_enoent) {
3332*795d594fSAndroid Build Coastguard Worker   auto arena_iter = linear_alloc_arenas_.end();
3333*795d594fSAndroid Build Coastguard Worker   {
3334*795d594fSAndroid Build Coastguard Worker     TrackedArena temp_arena(fault_page);
3335*795d594fSAndroid Build Coastguard Worker     arena_iter = linear_alloc_arenas_.upper_bound(&temp_arena);
3336*795d594fSAndroid Build Coastguard Worker     arena_iter = arena_iter != linear_alloc_arenas_.begin() ? std::prev(arena_iter)
3337*795d594fSAndroid Build Coastguard Worker                                                             : linear_alloc_arenas_.end();
3338*795d594fSAndroid Build Coastguard Worker   }
3339*795d594fSAndroid Build Coastguard Worker   // Unlike ProcessLinearAlloc(), we don't need to hold arena-pool's lock here
3340*795d594fSAndroid Build Coastguard Worker   // because a thread trying to access the page and as a result causing this
3341*795d594fSAndroid Build Coastguard Worker   // userfault confirms that nobody can delete the corresponding arena and
3342*795d594fSAndroid Build Coastguard Worker   // release its pages.
3343*795d594fSAndroid Build Coastguard Worker   // NOTE: We may have some memory range be recycled several times during a
3344*795d594fSAndroid Build Coastguard Worker   // compaction cycle, thereby potentially causing userfault on the same page
3345*795d594fSAndroid Build Coastguard Worker   // several times. That's not a problem as all of them (except for possibly the
3346*795d594fSAndroid Build Coastguard Worker   // first one) would require us mapping a zero-page, which we do without updating
3347*795d594fSAndroid Build Coastguard Worker   // the 'state_arr'.
3348*795d594fSAndroid Build Coastguard Worker   if (arena_iter == linear_alloc_arenas_.end() ||
3349*795d594fSAndroid Build Coastguard Worker       arena_iter->first->IsWaitingForDeletion() ||
3350*795d594fSAndroid Build Coastguard Worker       arena_iter->second <= fault_page) {
3351*795d594fSAndroid Build Coastguard Worker     // Fault page isn't in any of the arenas that existed before we started
3352*795d594fSAndroid Build Coastguard Worker     // compaction. So map zeropage and return.
3353*795d594fSAndroid Build Coastguard Worker     ZeropageIoctl(fault_page, gPageSize, /*tolerate_eexist=*/true, tolerate_enoent);
3354*795d594fSAndroid Build Coastguard Worker   } else {
3355*795d594fSAndroid Build Coastguard Worker     // Find the linear-alloc space containing fault-page
3356*795d594fSAndroid Build Coastguard Worker     LinearAllocSpaceData* space_data = nullptr;
3357*795d594fSAndroid Build Coastguard Worker     for (auto& data : linear_alloc_spaces_data_) {
3358*795d594fSAndroid Build Coastguard Worker       if (data.begin_ <= fault_page && fault_page < data.end_) {
3359*795d594fSAndroid Build Coastguard Worker         space_data = &data;
3360*795d594fSAndroid Build Coastguard Worker         break;
3361*795d594fSAndroid Build Coastguard Worker       }
3362*795d594fSAndroid Build Coastguard Worker     }
3363*795d594fSAndroid Build Coastguard Worker     DCHECK_NE(space_data, nullptr);
3364*795d594fSAndroid Build Coastguard Worker     ptrdiff_t diff = space_data->shadow_.Begin() - space_data->begin_;
3365*795d594fSAndroid Build Coastguard Worker     size_t page_idx = DivideByPageSize(fault_page - space_data->begin_);
3366*795d594fSAndroid Build Coastguard Worker     Atomic<PageState>* state_arr =
3367*795d594fSAndroid Build Coastguard Worker         reinterpret_cast<Atomic<PageState>*>(space_data->page_status_map_.Begin());
3368*795d594fSAndroid Build Coastguard Worker     PageState state = state_arr[page_idx].load(std::memory_order_acquire);
3369*795d594fSAndroid Build Coastguard Worker     uint32_t backoff_count = 0;
3370*795d594fSAndroid Build Coastguard Worker     while (true) {
3371*795d594fSAndroid Build Coastguard Worker       switch (state) {
3372*795d594fSAndroid Build Coastguard Worker         case PageState::kUnprocessed: {
3373*795d594fSAndroid Build Coastguard Worker           // Acquire order to ensure we don't start writing to shadow map, which is
3374*795d594fSAndroid Build Coastguard Worker           // shared, before the CAS is successful.
3375*795d594fSAndroid Build Coastguard Worker           if (state_arr[page_idx].compare_exchange_strong(
3376*795d594fSAndroid Build Coastguard Worker                   state, PageState::kProcessing, std::memory_order_acquire)) {
3377*795d594fSAndroid Build Coastguard Worker             LinearAllocPageUpdater updater(this);
3378*795d594fSAndroid Build Coastguard Worker             uint8_t* first_obj = arena_iter->first->GetFirstObject(fault_page);
3379*795d594fSAndroid Build Coastguard Worker             // null first_obj indicates that it's a page from arena for
3380*795d594fSAndroid Build Coastguard Worker             // intern-table/class-table. So first object isn't required.
3381*795d594fSAndroid Build Coastguard Worker             if (first_obj != nullptr) {
3382*795d594fSAndroid Build Coastguard Worker               updater.MultiObjectArena(fault_page + diff, first_obj + diff);
3383*795d594fSAndroid Build Coastguard Worker             } else {
3384*795d594fSAndroid Build Coastguard Worker               updater.SingleObjectArena(fault_page + diff, gPageSize);
3385*795d594fSAndroid Build Coastguard Worker             }
3386*795d594fSAndroid Build Coastguard Worker             if (updater.WasLastPageTouched()) {
3387*795d594fSAndroid Build Coastguard Worker               state_arr[page_idx].store(PageState::kProcessed, std::memory_order_release);
3388*795d594fSAndroid Build Coastguard Worker               state = PageState::kProcessed;
3389*795d594fSAndroid Build Coastguard Worker               continue;
3390*795d594fSAndroid Build Coastguard Worker             } else {
3391*795d594fSAndroid Build Coastguard Worker               // If the page wasn't touched, then it means it is empty and
3392*795d594fSAndroid Build Coastguard Worker               // is most likely not present on the shadow-side. Furthermore,
3393*795d594fSAndroid Build Coastguard Worker               // since the shadow is also userfaultfd registered doing copy
3394*795d594fSAndroid Build Coastguard Worker               // ioctl fails as the copy-from-user in the kernel will cause
3395*795d594fSAndroid Build Coastguard Worker               // userfault. Instead, just map a zeropage, which is not only
3396*795d594fSAndroid Build Coastguard Worker               // correct but also efficient as it avoids unnecessary memcpy
3397*795d594fSAndroid Build Coastguard Worker               // in the kernel.
3398*795d594fSAndroid Build Coastguard Worker               if (ZeropageIoctl(fault_page,
3399*795d594fSAndroid Build Coastguard Worker                                 gPageSize,
3400*795d594fSAndroid Build Coastguard Worker                                 /*tolerate_eexist=*/false,
3401*795d594fSAndroid Build Coastguard Worker                                 tolerate_enoent)) {
3402*795d594fSAndroid Build Coastguard Worker                 state_arr[page_idx].store(PageState::kProcessedAndMapped,
3403*795d594fSAndroid Build Coastguard Worker                                           std::memory_order_release);
3404*795d594fSAndroid Build Coastguard Worker               }
3405*795d594fSAndroid Build Coastguard Worker               return;
3406*795d594fSAndroid Build Coastguard Worker             }
3407*795d594fSAndroid Build Coastguard Worker           }
3408*795d594fSAndroid Build Coastguard Worker         }
3409*795d594fSAndroid Build Coastguard Worker           continue;
3410*795d594fSAndroid Build Coastguard Worker         case PageState::kProcessed:
3411*795d594fSAndroid Build Coastguard Worker           // Map as many pages as possible in a single ioctl, without spending
3412*795d594fSAndroid Build Coastguard Worker           // time freeing pages.
3413*795d594fSAndroid Build Coastguard Worker           if (MapUpdatedLinearAllocPages(fault_page,
3414*795d594fSAndroid Build Coastguard Worker                                          fault_page + diff,
3415*795d594fSAndroid Build Coastguard Worker                                          state_arr + page_idx,
3416*795d594fSAndroid Build Coastguard Worker                                          space_data->end_ - fault_page,
3417*795d594fSAndroid Build Coastguard Worker                                          /*free_pages=*/false,
3418*795d594fSAndroid Build Coastguard Worker                                          /*single_ioctl=*/true,
3419*795d594fSAndroid Build Coastguard Worker                                          tolerate_enoent)) {
3420*795d594fSAndroid Build Coastguard Worker             return;
3421*795d594fSAndroid Build Coastguard Worker           }
3422*795d594fSAndroid Build Coastguard Worker           // fault_page was not mapped by this thread (some other thread claimed
3423*795d594fSAndroid Build Coastguard Worker           // it). Wait for it to be mapped before returning.
3424*795d594fSAndroid Build Coastguard Worker           FALLTHROUGH_INTENDED;
3425*795d594fSAndroid Build Coastguard Worker         case PageState::kProcessing:
3426*795d594fSAndroid Build Coastguard Worker         case PageState::kProcessingAndMapping:
3427*795d594fSAndroid Build Coastguard Worker         case PageState::kProcessedAndMapping:
3428*795d594fSAndroid Build Coastguard Worker           // Wait for the page to be mapped before returning.
3429*795d594fSAndroid Build Coastguard Worker           BackOff(backoff_count++);
3430*795d594fSAndroid Build Coastguard Worker           state = state_arr[page_idx].load(std::memory_order_acquire);
3431*795d594fSAndroid Build Coastguard Worker           continue;
3432*795d594fSAndroid Build Coastguard Worker         case PageState::kMutatorProcessing:
3433*795d594fSAndroid Build Coastguard Worker           LOG(FATAL) << "Unreachable";
3434*795d594fSAndroid Build Coastguard Worker           UNREACHABLE();
3435*795d594fSAndroid Build Coastguard Worker         case PageState::kProcessedAndMapped:
3436*795d594fSAndroid Build Coastguard Worker           // Somebody else took care of the page.
3437*795d594fSAndroid Build Coastguard Worker           return;
3438*795d594fSAndroid Build Coastguard Worker       }
3439*795d594fSAndroid Build Coastguard Worker       break;
3440*795d594fSAndroid Build Coastguard Worker     }
3441*795d594fSAndroid Build Coastguard Worker   }
3442*795d594fSAndroid Build Coastguard Worker }
3443*795d594fSAndroid Build Coastguard Worker 
ProcessLinearAlloc()3444*795d594fSAndroid Build Coastguard Worker void MarkCompact::ProcessLinearAlloc() {
3445*795d594fSAndroid Build Coastguard Worker   GcVisitedArenaPool* arena_pool =
3446*795d594fSAndroid Build Coastguard Worker       static_cast<GcVisitedArenaPool*>(Runtime::Current()->GetLinearAllocArenaPool());
3447*795d594fSAndroid Build Coastguard Worker   DCHECK_EQ(thread_running_gc_, Thread::Current());
3448*795d594fSAndroid Build Coastguard Worker   uint8_t* unmapped_range_start = nullptr;
3449*795d594fSAndroid Build Coastguard Worker   uint8_t* unmapped_range_end = nullptr;
3450*795d594fSAndroid Build Coastguard Worker   // Pointer to the linear-alloc space containing the current arena in the loop
3451*795d594fSAndroid Build Coastguard Worker   // below. Also helps in ensuring that two arenas, which are contiguous in
3452*795d594fSAndroid Build Coastguard Worker   // address space but are from different linear-alloc spaces, are not coalesced
3453*795d594fSAndroid Build Coastguard Worker   // into one range for mapping purpose.
3454*795d594fSAndroid Build Coastguard Worker   LinearAllocSpaceData* space_data = nullptr;
3455*795d594fSAndroid Build Coastguard Worker   Atomic<PageState>* state_arr = nullptr;
3456*795d594fSAndroid Build Coastguard Worker   ptrdiff_t diff = 0;
3457*795d594fSAndroid Build Coastguard Worker 
3458*795d594fSAndroid Build Coastguard Worker   auto map_pages = [&]() {
3459*795d594fSAndroid Build Coastguard Worker     DCHECK_NE(diff, 0);
3460*795d594fSAndroid Build Coastguard Worker     DCHECK_NE(space_data, nullptr);
3461*795d594fSAndroid Build Coastguard Worker     DCHECK_GE(unmapped_range_start, space_data->begin_);
3462*795d594fSAndroid Build Coastguard Worker     DCHECK_LT(unmapped_range_start, space_data->end_);
3463*795d594fSAndroid Build Coastguard Worker     DCHECK_GT(unmapped_range_end, space_data->begin_);
3464*795d594fSAndroid Build Coastguard Worker     DCHECK_LE(unmapped_range_end, space_data->end_);
3465*795d594fSAndroid Build Coastguard Worker     DCHECK_LT(unmapped_range_start, unmapped_range_end);
3466*795d594fSAndroid Build Coastguard Worker     DCHECK_ALIGNED_PARAM(unmapped_range_end - unmapped_range_start, gPageSize);
3467*795d594fSAndroid Build Coastguard Worker     size_t page_idx = DivideByPageSize(unmapped_range_start - space_data->begin_);
3468*795d594fSAndroid Build Coastguard Worker     MapUpdatedLinearAllocPages(unmapped_range_start,
3469*795d594fSAndroid Build Coastguard Worker                                unmapped_range_start + diff,
3470*795d594fSAndroid Build Coastguard Worker                                state_arr + page_idx,
3471*795d594fSAndroid Build Coastguard Worker                                unmapped_range_end - unmapped_range_start,
3472*795d594fSAndroid Build Coastguard Worker                                /*free_pages=*/true,
3473*795d594fSAndroid Build Coastguard Worker                                /*single_ioctl=*/false,
3474*795d594fSAndroid Build Coastguard Worker                                /*tolerate_enoent=*/false);
3475*795d594fSAndroid Build Coastguard Worker   };
3476*795d594fSAndroid Build Coastguard Worker   for (auto& pair : linear_alloc_arenas_) {
3477*795d594fSAndroid Build Coastguard Worker     const TrackedArena* arena = pair.first;
3478*795d594fSAndroid Build Coastguard Worker     size_t arena_size = arena->Size();
3479*795d594fSAndroid Build Coastguard Worker     uint8_t* arena_begin = arena->Begin();
3480*795d594fSAndroid Build Coastguard Worker     // linear_alloc_arenas_ is sorted on arena-begin. So we will get all arenas
3481*795d594fSAndroid Build Coastguard Worker     // in that order.
3482*795d594fSAndroid Build Coastguard Worker     DCHECK_LE(unmapped_range_end, arena_begin);
3483*795d594fSAndroid Build Coastguard Worker     DCHECK(space_data == nullptr || arena_begin > space_data->begin_)
3484*795d594fSAndroid Build Coastguard Worker         << "space-begin:" << static_cast<void*>(space_data->begin_)
3485*795d594fSAndroid Build Coastguard Worker         << " arena-begin:" << static_cast<void*>(arena_begin);
3486*795d594fSAndroid Build Coastguard Worker     if (space_data == nullptr || space_data->end_ <= arena_begin) {
3487*795d594fSAndroid Build Coastguard Worker       // Map the processed arenas as we are switching to another space.
3488*795d594fSAndroid Build Coastguard Worker       if (space_data != nullptr && unmapped_range_end != nullptr) {
3489*795d594fSAndroid Build Coastguard Worker         map_pages();
3490*795d594fSAndroid Build Coastguard Worker         unmapped_range_end = nullptr;
3491*795d594fSAndroid Build Coastguard Worker       }
3492*795d594fSAndroid Build Coastguard Worker       // Find the linear-alloc space containing the arena
3493*795d594fSAndroid Build Coastguard Worker       LinearAllocSpaceData* curr_space_data = space_data;
3494*795d594fSAndroid Build Coastguard Worker       for (auto& data : linear_alloc_spaces_data_) {
3495*795d594fSAndroid Build Coastguard Worker         if (data.begin_ <= arena_begin && arena_begin < data.end_) {
3496*795d594fSAndroid Build Coastguard Worker           // Since arenas are sorted, the next space should be higher in address
3497*795d594fSAndroid Build Coastguard Worker           // order than the current one.
3498*795d594fSAndroid Build Coastguard Worker           DCHECK(space_data == nullptr || data.begin_ >= space_data->end_);
3499*795d594fSAndroid Build Coastguard Worker           diff = data.shadow_.Begin() - data.begin_;
3500*795d594fSAndroid Build Coastguard Worker           state_arr = reinterpret_cast<Atomic<PageState>*>(data.page_status_map_.Begin());
3501*795d594fSAndroid Build Coastguard Worker           space_data = &data;
3502*795d594fSAndroid Build Coastguard Worker           break;
3503*795d594fSAndroid Build Coastguard Worker         }
3504*795d594fSAndroid Build Coastguard Worker       }
3505*795d594fSAndroid Build Coastguard Worker       CHECK_NE(space_data, curr_space_data)
3506*795d594fSAndroid Build Coastguard Worker           << "Couldn't find space for arena-begin:" << static_cast<void*>(arena_begin);
3507*795d594fSAndroid Build Coastguard Worker     }
3508*795d594fSAndroid Build Coastguard Worker     // Map the processed arenas if we found a hole within the current space.
3509*795d594fSAndroid Build Coastguard Worker     if (unmapped_range_end != nullptr && unmapped_range_end < arena_begin) {
3510*795d594fSAndroid Build Coastguard Worker       map_pages();
3511*795d594fSAndroid Build Coastguard Worker       unmapped_range_end = nullptr;
3512*795d594fSAndroid Build Coastguard Worker     }
3513*795d594fSAndroid Build Coastguard Worker     if (unmapped_range_end == nullptr) {
3514*795d594fSAndroid Build Coastguard Worker       unmapped_range_start = unmapped_range_end = arena_begin;
3515*795d594fSAndroid Build Coastguard Worker     }
3516*795d594fSAndroid Build Coastguard Worker     DCHECK_NE(unmapped_range_start, nullptr);
3517*795d594fSAndroid Build Coastguard Worker     // It's ok to include all arenas in the unmapped range. Since the
3518*795d594fSAndroid Build Coastguard Worker     // corresponding state bytes will be kUnprocessed, we will skip calling
3519*795d594fSAndroid Build Coastguard Worker     // ioctl and madvise on arenas which are waiting to be deleted.
3520*795d594fSAndroid Build Coastguard Worker     unmapped_range_end += arena_size;
3521*795d594fSAndroid Build Coastguard Worker     {
3522*795d594fSAndroid Build Coastguard Worker       // Acquire arena-pool's lock (in shared-mode) so that the arena being updated
3523*795d594fSAndroid Build Coastguard Worker       // does not get deleted at the same time. If this critical section is too
3524*795d594fSAndroid Build Coastguard Worker       // long and impacts mutator response time, then we get rid of this lock by
3525*795d594fSAndroid Build Coastguard Worker       // holding onto memory ranges of all deleted (since compaction pause)
3526*795d594fSAndroid Build Coastguard Worker       // arenas until completion finishes.
3527*795d594fSAndroid Build Coastguard Worker       ReaderMutexLock rmu(thread_running_gc_, arena_pool->GetLock());
3528*795d594fSAndroid Build Coastguard Worker       // If any arenas were freed since compaction pause then skip them from
3529*795d594fSAndroid Build Coastguard Worker       // visiting.
3530*795d594fSAndroid Build Coastguard Worker       if (arena->IsWaitingForDeletion()) {
3531*795d594fSAndroid Build Coastguard Worker         continue;
3532*795d594fSAndroid Build Coastguard Worker       }
3533*795d594fSAndroid Build Coastguard Worker       uint8_t* last_byte = pair.second;
3534*795d594fSAndroid Build Coastguard Worker       DCHECK_ALIGNED_PARAM(last_byte, gPageSize);
3535*795d594fSAndroid Build Coastguard Worker       auto visitor = [space_data, last_byte, diff, this, state_arr](
3536*795d594fSAndroid Build Coastguard Worker                          uint8_t* page_begin,
3537*795d594fSAndroid Build Coastguard Worker                          uint8_t* first_obj,
3538*795d594fSAndroid Build Coastguard Worker                          size_t page_size) REQUIRES_SHARED(Locks::mutator_lock_) {
3539*795d594fSAndroid Build Coastguard Worker         // No need to process pages past last_byte as they already have updated
3540*795d594fSAndroid Build Coastguard Worker         // gc-roots, if any.
3541*795d594fSAndroid Build Coastguard Worker         if (page_begin >= last_byte) {
3542*795d594fSAndroid Build Coastguard Worker           return;
3543*795d594fSAndroid Build Coastguard Worker         }
3544*795d594fSAndroid Build Coastguard Worker         LinearAllocPageUpdater updater(this);
3545*795d594fSAndroid Build Coastguard Worker         size_t page_idx = DivideByPageSize(page_begin - space_data->begin_);
3546*795d594fSAndroid Build Coastguard Worker         DCHECK_LT(page_idx, space_data->page_status_map_.Size());
3547*795d594fSAndroid Build Coastguard Worker         PageState expected_state = PageState::kUnprocessed;
3548*795d594fSAndroid Build Coastguard Worker         // Acquire order to ensure that we don't start accessing the shadow page,
3549*795d594fSAndroid Build Coastguard Worker         // which is shared with other threads, prior to CAS. Also, for same
3550*795d594fSAndroid Build Coastguard Worker         // reason, we used 'release' order for changing the state to 'processed'.
3551*795d594fSAndroid Build Coastguard Worker         if (state_arr[page_idx].compare_exchange_strong(
3552*795d594fSAndroid Build Coastguard Worker                 expected_state, PageState::kProcessing, std::memory_order_acquire)) {
3553*795d594fSAndroid Build Coastguard Worker           // null first_obj indicates that it's a page from arena for
3554*795d594fSAndroid Build Coastguard Worker           // intern-table/class-table. So first object isn't required.
3555*795d594fSAndroid Build Coastguard Worker           if (first_obj != nullptr) {
3556*795d594fSAndroid Build Coastguard Worker             updater.MultiObjectArena(page_begin + diff, first_obj + diff);
3557*795d594fSAndroid Build Coastguard Worker           } else {
3558*795d594fSAndroid Build Coastguard Worker             DCHECK_EQ(page_size, gPageSize);
3559*795d594fSAndroid Build Coastguard Worker             updater.SingleObjectArena(page_begin + diff, page_size);
3560*795d594fSAndroid Build Coastguard Worker           }
3561*795d594fSAndroid Build Coastguard Worker           expected_state = PageState::kProcessing;
3562*795d594fSAndroid Build Coastguard Worker           // Store is sufficient as no other thread could be modifying it. Use
3563*795d594fSAndroid Build Coastguard Worker           // release order to ensure that the writes to shadow page are
3564*795d594fSAndroid Build Coastguard Worker           // committed to memory before.
3565*795d594fSAndroid Build Coastguard Worker           if (updater.WasLastPageTouched()) {
3566*795d594fSAndroid Build Coastguard Worker             state_arr[page_idx].store(PageState::kProcessed, std::memory_order_release);
3567*795d594fSAndroid Build Coastguard Worker           } else {
3568*795d594fSAndroid Build Coastguard Worker             // See comment in ConcurrentlyProcessLinearAllocPage() with same situation.
3569*795d594fSAndroid Build Coastguard Worker             ZeropageIoctl(
3570*795d594fSAndroid Build Coastguard Worker                 page_begin, gPageSize, /*tolerate_eexist=*/false, /*tolerate_enoent=*/false);
3571*795d594fSAndroid Build Coastguard Worker             // Ioctl will act as release fence.
3572*795d594fSAndroid Build Coastguard Worker             state_arr[page_idx].store(PageState::kProcessedAndMapped, std::memory_order_release);
3573*795d594fSAndroid Build Coastguard Worker           }
3574*795d594fSAndroid Build Coastguard Worker         }
3575*795d594fSAndroid Build Coastguard Worker       };
3576*795d594fSAndroid Build Coastguard Worker 
3577*795d594fSAndroid Build Coastguard Worker       arena->VisitRoots(visitor);
3578*795d594fSAndroid Build Coastguard Worker     }
3579*795d594fSAndroid Build Coastguard Worker   }
3580*795d594fSAndroid Build Coastguard Worker   if (unmapped_range_end > unmapped_range_start) {
3581*795d594fSAndroid Build Coastguard Worker     // Map remaining pages.
3582*795d594fSAndroid Build Coastguard Worker     map_pages();
3583*795d594fSAndroid Build Coastguard Worker   }
3584*795d594fSAndroid Build Coastguard Worker }
3585*795d594fSAndroid Build Coastguard Worker 
RegisterUffd(void * addr,size_t size)3586*795d594fSAndroid Build Coastguard Worker void MarkCompact::RegisterUffd(void* addr, size_t size) {
3587*795d594fSAndroid Build Coastguard Worker   DCHECK(IsValidFd(uffd_));
3588*795d594fSAndroid Build Coastguard Worker   struct uffdio_register uffd_register;
3589*795d594fSAndroid Build Coastguard Worker   uffd_register.range.start = reinterpret_cast<uintptr_t>(addr);
3590*795d594fSAndroid Build Coastguard Worker   uffd_register.range.len = size;
3591*795d594fSAndroid Build Coastguard Worker   uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING;
3592*795d594fSAndroid Build Coastguard Worker   CHECK_EQ(ioctl(uffd_, UFFDIO_REGISTER, &uffd_register), 0)
3593*795d594fSAndroid Build Coastguard Worker       << "ioctl_userfaultfd: register failed: " << strerror(errno)
3594*795d594fSAndroid Build Coastguard Worker       << ". start:" << static_cast<void*>(addr) << " len:" << PrettySize(size);
3595*795d594fSAndroid Build Coastguard Worker }
3596*795d594fSAndroid Build Coastguard Worker 
3597*795d594fSAndroid Build Coastguard Worker // TODO: sometime we may want to tolerate certain error conditions (like ENOMEM
3598*795d594fSAndroid Build Coastguard Worker // when we unregister the unused portion of the moving-space). Implement support
3599*795d594fSAndroid Build Coastguard Worker // for that.
UnregisterUffd(uint8_t * start,size_t len)3600*795d594fSAndroid Build Coastguard Worker void MarkCompact::UnregisterUffd(uint8_t* start, size_t len) {
3601*795d594fSAndroid Build Coastguard Worker   DCHECK(IsValidFd(uffd_));
3602*795d594fSAndroid Build Coastguard Worker   struct uffdio_range range;
3603*795d594fSAndroid Build Coastguard Worker   range.start = reinterpret_cast<uintptr_t>(start);
3604*795d594fSAndroid Build Coastguard Worker   range.len = len;
3605*795d594fSAndroid Build Coastguard Worker   CHECK_EQ(ioctl(uffd_, UFFDIO_UNREGISTER, &range), 0)
3606*795d594fSAndroid Build Coastguard Worker       << "ioctl_userfaultfd: unregister failed: " << strerror(errno)
3607*795d594fSAndroid Build Coastguard Worker       << ". addr:" << static_cast<void*>(start) << " len:" << PrettySize(len);
3608*795d594fSAndroid Build Coastguard Worker }
3609*795d594fSAndroid Build Coastguard Worker 
CompactionPhase()3610*795d594fSAndroid Build Coastguard Worker void MarkCompact::CompactionPhase() {
3611*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
3612*795d594fSAndroid Build Coastguard Worker   {
3613*795d594fSAndroid Build Coastguard Worker     int32_t freed_bytes = black_objs_slide_diff_;
3614*795d594fSAndroid Build Coastguard Worker     bump_pointer_space_->RecordFree(freed_objects_, freed_bytes);
3615*795d594fSAndroid Build Coastguard Worker     RecordFree(ObjectBytePair(freed_objects_, freed_bytes));
3616*795d594fSAndroid Build Coastguard Worker   }
3617*795d594fSAndroid Build Coastguard Worker 
3618*795d594fSAndroid Build Coastguard Worker   CompactMovingSpace<kCopyMode>(compaction_buffers_map_.Begin());
3619*795d594fSAndroid Build Coastguard Worker 
3620*795d594fSAndroid Build Coastguard Worker   ProcessLinearAlloc();
3621*795d594fSAndroid Build Coastguard Worker 
3622*795d594fSAndroid Build Coastguard Worker   auto wait_for_compaction_counter = [this](size_t idx) {
3623*795d594fSAndroid Build Coastguard Worker     SigbusCounterType count = sigbus_in_progress_count_[idx].fetch_or(
3624*795d594fSAndroid Build Coastguard Worker         kSigbusCounterCompactionDoneMask, std::memory_order_acq_rel);
3625*795d594fSAndroid Build Coastguard Worker     // Wait for SIGBUS handlers already in play.
3626*795d594fSAndroid Build Coastguard Worker     for (uint32_t i = 0; count > 0; i++) {
3627*795d594fSAndroid Build Coastguard Worker       BackOff(i);
3628*795d594fSAndroid Build Coastguard Worker       count = sigbus_in_progress_count_[idx].load(std::memory_order_acquire);
3629*795d594fSAndroid Build Coastguard Worker       count &= ~kSigbusCounterCompactionDoneMask;
3630*795d594fSAndroid Build Coastguard Worker     }
3631*795d594fSAndroid Build Coastguard Worker   };
3632*795d594fSAndroid Build Coastguard Worker   // Set compaction-done bit in the first counter to indicate that gc-thread
3633*795d594fSAndroid Build Coastguard Worker   // is done compacting and mutators should stop incrementing this counter.
3634*795d594fSAndroid Build Coastguard Worker   // Mutator should tolerate ENOENT after this. This helps avoid priority
3635*795d594fSAndroid Build Coastguard Worker   // inversion in case mutators need to map zero-pages after compaction is
3636*795d594fSAndroid Build Coastguard Worker   // finished but before gc-thread manages to unregister the spaces.
3637*795d594fSAndroid Build Coastguard Worker   wait_for_compaction_counter(0);
3638*795d594fSAndroid Build Coastguard Worker 
3639*795d594fSAndroid Build Coastguard Worker   // Unregister moving-space
3640*795d594fSAndroid Build Coastguard Worker   size_t moving_space_size = bump_pointer_space_->Capacity();
3641*795d594fSAndroid Build Coastguard Worker   size_t used_size = (moving_first_objs_count_ + black_page_count_) * gPageSize;
3642*795d594fSAndroid Build Coastguard Worker   if (used_size > 0) {
3643*795d594fSAndroid Build Coastguard Worker     UnregisterUffd(bump_pointer_space_->Begin(), used_size);
3644*795d594fSAndroid Build Coastguard Worker   }
3645*795d594fSAndroid Build Coastguard Worker   // Unregister linear-alloc spaces
3646*795d594fSAndroid Build Coastguard Worker   for (auto& data : linear_alloc_spaces_data_) {
3647*795d594fSAndroid Build Coastguard Worker     DCHECK_EQ(data.end_ - data.begin_, static_cast<ssize_t>(data.shadow_.Size()));
3648*795d594fSAndroid Build Coastguard Worker     UnregisterUffd(data.begin_, data.shadow_.Size());
3649*795d594fSAndroid Build Coastguard Worker   }
3650*795d594fSAndroid Build Coastguard Worker 
3651*795d594fSAndroid Build Coastguard Worker   // Set compaction-done bit in the second counter to indicate that gc-thread
3652*795d594fSAndroid Build Coastguard Worker   // is done unregistering the spaces and therefore mutators, if in SIGBUS,
3653*795d594fSAndroid Build Coastguard Worker   // should return without attempting to map the faulted page. When the mutator
3654*795d594fSAndroid Build Coastguard Worker   // will access the address again, it will succeed. Once this counter is 0,
3655*795d594fSAndroid Build Coastguard Worker   // the gc-thread can safely initialize/madvise the data structures.
3656*795d594fSAndroid Build Coastguard Worker   wait_for_compaction_counter(1);
3657*795d594fSAndroid Build Coastguard Worker 
3658*795d594fSAndroid Build Coastguard Worker   // Release all of the memory taken by moving-space's from-map
3659*795d594fSAndroid Build Coastguard Worker   from_space_map_.MadviseDontNeedAndZero();
3660*795d594fSAndroid Build Coastguard Worker   // mprotect(PROT_NONE) all maps except to-space in debug-mode to catch any unexpected accesses.
3661*795d594fSAndroid Build Coastguard Worker   DCHECK_EQ(mprotect(from_space_begin_, moving_space_size, PROT_NONE), 0)
3662*795d594fSAndroid Build Coastguard Worker       << "mprotect(PROT_NONE) for from-space failed: " << strerror(errno);
3663*795d594fSAndroid Build Coastguard Worker 
3664*795d594fSAndroid Build Coastguard Worker   // madvise linear-allocs's page-status array. Note that we don't need to
3665*795d594fSAndroid Build Coastguard Worker   // madvise the shado-map as the pages from it were reclaimed in
3666*795d594fSAndroid Build Coastguard Worker   // ProcessLinearAlloc() after arenas were mapped.
3667*795d594fSAndroid Build Coastguard Worker   for (auto& data : linear_alloc_spaces_data_) {
3668*795d594fSAndroid Build Coastguard Worker     data.page_status_map_.MadviseDontNeedAndZero();
3669*795d594fSAndroid Build Coastguard Worker   }
3670*795d594fSAndroid Build Coastguard Worker }
3671*795d594fSAndroid Build Coastguard Worker 
3672*795d594fSAndroid Build Coastguard Worker template <size_t kBufferSize>
3673*795d594fSAndroid Build Coastguard Worker class MarkCompact::ThreadRootsVisitor : public RootVisitor {
3674*795d594fSAndroid Build Coastguard Worker  public:
ThreadRootsVisitor(MarkCompact * mark_compact,Thread * const self)3675*795d594fSAndroid Build Coastguard Worker   explicit ThreadRootsVisitor(MarkCompact* mark_compact, Thread* const self)
3676*795d594fSAndroid Build Coastguard Worker         : mark_compact_(mark_compact), self_(self) {}
3677*795d594fSAndroid Build Coastguard Worker 
~ThreadRootsVisitor()3678*795d594fSAndroid Build Coastguard Worker   ~ThreadRootsVisitor() {
3679*795d594fSAndroid Build Coastguard Worker     Flush();
3680*795d594fSAndroid Build Coastguard Worker   }
3681*795d594fSAndroid Build Coastguard Worker 
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info)3682*795d594fSAndroid Build Coastguard Worker   void VisitRoots(mirror::Object*** roots,
3683*795d594fSAndroid Build Coastguard Worker                   size_t count,
3684*795d594fSAndroid Build Coastguard Worker                   [[maybe_unused]] const RootInfo& info) override
3685*795d594fSAndroid Build Coastguard Worker       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
3686*795d594fSAndroid Build Coastguard Worker     for (size_t i = 0; i < count; i++) {
3687*795d594fSAndroid Build Coastguard Worker       mirror::Object* obj = *roots[i];
3688*795d594fSAndroid Build Coastguard Worker       if (mark_compact_->MarkObjectNonNullNoPush</*kParallel*/true>(obj)) {
3689*795d594fSAndroid Build Coastguard Worker         Push(obj);
3690*795d594fSAndroid Build Coastguard Worker       }
3691*795d594fSAndroid Build Coastguard Worker     }
3692*795d594fSAndroid Build Coastguard Worker   }
3693*795d594fSAndroid Build Coastguard Worker 
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info)3694*795d594fSAndroid Build Coastguard Worker   void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
3695*795d594fSAndroid Build Coastguard Worker                   size_t count,
3696*795d594fSAndroid Build Coastguard Worker                   [[maybe_unused]] const RootInfo& info) override
3697*795d594fSAndroid Build Coastguard Worker       REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
3698*795d594fSAndroid Build Coastguard Worker     for (size_t i = 0; i < count; i++) {
3699*795d594fSAndroid Build Coastguard Worker       mirror::Object* obj = roots[i]->AsMirrorPtr();
3700*795d594fSAndroid Build Coastguard Worker       if (mark_compact_->MarkObjectNonNullNoPush</*kParallel*/true>(obj)) {
3701*795d594fSAndroid Build Coastguard Worker         Push(obj);
3702*795d594fSAndroid Build Coastguard Worker       }
3703*795d594fSAndroid Build Coastguard Worker     }
3704*795d594fSAndroid Build Coastguard Worker   }
3705*795d594fSAndroid Build Coastguard Worker 
3706*795d594fSAndroid Build Coastguard Worker  private:
Flush()3707*795d594fSAndroid Build Coastguard Worker   void Flush() REQUIRES_SHARED(Locks::mutator_lock_)
3708*795d594fSAndroid Build Coastguard Worker                REQUIRES(Locks::heap_bitmap_lock_) {
3709*795d594fSAndroid Build Coastguard Worker     StackReference<mirror::Object>* start;
3710*795d594fSAndroid Build Coastguard Worker     StackReference<mirror::Object>* end;
3711*795d594fSAndroid Build Coastguard Worker     {
3712*795d594fSAndroid Build Coastguard Worker       MutexLock mu(self_, mark_compact_->lock_);
3713*795d594fSAndroid Build Coastguard Worker       // Loop here because even after expanding once it may not be sufficient to
3714*795d594fSAndroid Build Coastguard Worker       // accommodate all references. It's almost impossible, but there is no harm
3715*795d594fSAndroid Build Coastguard Worker       // in implementing it this way.
3716*795d594fSAndroid Build Coastguard Worker       while (!mark_compact_->mark_stack_->BumpBack(idx_, &start, &end)) {
3717*795d594fSAndroid Build Coastguard Worker         mark_compact_->ExpandMarkStack();
3718*795d594fSAndroid Build Coastguard Worker       }
3719*795d594fSAndroid Build Coastguard Worker     }
3720*795d594fSAndroid Build Coastguard Worker     while (idx_ > 0) {
3721*795d594fSAndroid Build Coastguard Worker       *start++ = roots_[--idx_];
3722*795d594fSAndroid Build Coastguard Worker     }
3723*795d594fSAndroid Build Coastguard Worker     DCHECK_EQ(start, end);
3724*795d594fSAndroid Build Coastguard Worker   }
3725*795d594fSAndroid Build Coastguard Worker 
Push(mirror::Object * obj)3726*795d594fSAndroid Build Coastguard Worker   void Push(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
3727*795d594fSAndroid Build Coastguard Worker                                  REQUIRES(Locks::heap_bitmap_lock_) {
3728*795d594fSAndroid Build Coastguard Worker     if (UNLIKELY(idx_ >= kBufferSize)) {
3729*795d594fSAndroid Build Coastguard Worker       Flush();
3730*795d594fSAndroid Build Coastguard Worker     }
3731*795d594fSAndroid Build Coastguard Worker     roots_[idx_++].Assign(obj);
3732*795d594fSAndroid Build Coastguard Worker   }
3733*795d594fSAndroid Build Coastguard Worker 
3734*795d594fSAndroid Build Coastguard Worker   StackReference<mirror::Object> roots_[kBufferSize];
3735*795d594fSAndroid Build Coastguard Worker   size_t idx_ = 0;
3736*795d594fSAndroid Build Coastguard Worker   MarkCompact* const mark_compact_;
3737*795d594fSAndroid Build Coastguard Worker   Thread* const self_;
3738*795d594fSAndroid Build Coastguard Worker };
3739*795d594fSAndroid Build Coastguard Worker 
3740*795d594fSAndroid Build Coastguard Worker class MarkCompact::CheckpointMarkThreadRoots : public Closure {
3741*795d594fSAndroid Build Coastguard Worker  public:
CheckpointMarkThreadRoots(MarkCompact * mark_compact)3742*795d594fSAndroid Build Coastguard Worker   explicit CheckpointMarkThreadRoots(MarkCompact* mark_compact) : mark_compact_(mark_compact) {}
3743*795d594fSAndroid Build Coastguard Worker 
Run(Thread * thread)3744*795d594fSAndroid Build Coastguard Worker   void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
3745*795d594fSAndroid Build Coastguard Worker     ScopedTrace trace("Marking thread roots");
3746*795d594fSAndroid Build Coastguard Worker     // Note: self is not necessarily equal to thread since thread may be
3747*795d594fSAndroid Build Coastguard Worker     // suspended.
3748*795d594fSAndroid Build Coastguard Worker     Thread* const self = Thread::Current();
3749*795d594fSAndroid Build Coastguard Worker     CHECK(thread == self
3750*795d594fSAndroid Build Coastguard Worker           || thread->IsSuspended()
3751*795d594fSAndroid Build Coastguard Worker           || thread->GetState() == ThreadState::kWaitingPerformingGc)
3752*795d594fSAndroid Build Coastguard Worker         << thread->GetState() << " thread " << thread << " self " << self;
3753*795d594fSAndroid Build Coastguard Worker     {
3754*795d594fSAndroid Build Coastguard Worker       ThreadRootsVisitor</*kBufferSize*/ 20> visitor(mark_compact_, self);
3755*795d594fSAndroid Build Coastguard Worker       thread->VisitRoots(&visitor, kVisitRootFlagAllRoots);
3756*795d594fSAndroid Build Coastguard Worker     }
3757*795d594fSAndroid Build Coastguard Worker     // Clear page-buffer to prepare for compaction phase.
3758*795d594fSAndroid Build Coastguard Worker     thread->SetThreadLocalGcBuffer(nullptr);
3759*795d594fSAndroid Build Coastguard Worker 
3760*795d594fSAndroid Build Coastguard Worker     // If thread is a running mutator, then act on behalf of the garbage
3761*795d594fSAndroid Build Coastguard Worker     // collector. See the code in ThreadList::RunCheckpoint.
3762*795d594fSAndroid Build Coastguard Worker     mark_compact_->GetBarrier().Pass(self);
3763*795d594fSAndroid Build Coastguard Worker   }
3764*795d594fSAndroid Build Coastguard Worker 
3765*795d594fSAndroid Build Coastguard Worker  private:
3766*795d594fSAndroid Build Coastguard Worker   MarkCompact* const mark_compact_;
3767*795d594fSAndroid Build Coastguard Worker };
3768*795d594fSAndroid Build Coastguard Worker 
MarkRootsCheckpoint(Thread * self,Runtime * runtime)3769*795d594fSAndroid Build Coastguard Worker void MarkCompact::MarkRootsCheckpoint(Thread* self, Runtime* runtime) {
3770*795d594fSAndroid Build Coastguard Worker   // We revote TLABs later during paused round of marking.
3771*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
3772*795d594fSAndroid Build Coastguard Worker   CheckpointMarkThreadRoots check_point(this);
3773*795d594fSAndroid Build Coastguard Worker   ThreadList* thread_list = runtime->GetThreadList();
3774*795d594fSAndroid Build Coastguard Worker   gc_barrier_.Init(self, 0);
3775*795d594fSAndroid Build Coastguard Worker   // Request the check point is run on all threads returning a count of the threads that must
3776*795d594fSAndroid Build Coastguard Worker   // run through the barrier including self.
3777*795d594fSAndroid Build Coastguard Worker   size_t barrier_count = thread_list->RunCheckpoint(&check_point);
3778*795d594fSAndroid Build Coastguard Worker   // Release locks then wait for all mutator threads to pass the barrier.
3779*795d594fSAndroid Build Coastguard Worker   // If there are no threads to wait which implys that all the checkpoint functions are finished,
3780*795d594fSAndroid Build Coastguard Worker   // then no need to release locks.
3781*795d594fSAndroid Build Coastguard Worker   if (barrier_count == 0) {
3782*795d594fSAndroid Build Coastguard Worker     return;
3783*795d594fSAndroid Build Coastguard Worker   }
3784*795d594fSAndroid Build Coastguard Worker   Locks::heap_bitmap_lock_->ExclusiveUnlock(self);
3785*795d594fSAndroid Build Coastguard Worker   Locks::mutator_lock_->SharedUnlock(self);
3786*795d594fSAndroid Build Coastguard Worker   {
3787*795d594fSAndroid Build Coastguard Worker     ScopedThreadStateChange tsc(self, ThreadState::kWaitingForCheckPointsToRun);
3788*795d594fSAndroid Build Coastguard Worker     gc_barrier_.Increment(self, barrier_count);
3789*795d594fSAndroid Build Coastguard Worker   }
3790*795d594fSAndroid Build Coastguard Worker   Locks::mutator_lock_->SharedLock(self);
3791*795d594fSAndroid Build Coastguard Worker   Locks::heap_bitmap_lock_->ExclusiveLock(self);
3792*795d594fSAndroid Build Coastguard Worker }
3793*795d594fSAndroid Build Coastguard Worker 
MarkNonThreadRoots(Runtime * runtime)3794*795d594fSAndroid Build Coastguard Worker void MarkCompact::MarkNonThreadRoots(Runtime* runtime) {
3795*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
3796*795d594fSAndroid Build Coastguard Worker   runtime->VisitNonThreadRoots(this);
3797*795d594fSAndroid Build Coastguard Worker }
3798*795d594fSAndroid Build Coastguard Worker 
MarkConcurrentRoots(VisitRootFlags flags,Runtime * runtime)3799*795d594fSAndroid Build Coastguard Worker void MarkCompact::MarkConcurrentRoots(VisitRootFlags flags, Runtime* runtime) {
3800*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
3801*795d594fSAndroid Build Coastguard Worker   runtime->VisitConcurrentRoots(this, flags);
3802*795d594fSAndroid Build Coastguard Worker }
3803*795d594fSAndroid Build Coastguard Worker 
RevokeAllThreadLocalBuffers()3804*795d594fSAndroid Build Coastguard Worker void MarkCompact::RevokeAllThreadLocalBuffers() {
3805*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
3806*795d594fSAndroid Build Coastguard Worker   bump_pointer_space_->RevokeAllThreadLocalBuffers();
3807*795d594fSAndroid Build Coastguard Worker }
3808*795d594fSAndroid Build Coastguard Worker 
3809*795d594fSAndroid Build Coastguard Worker class MarkCompact::ScanObjectVisitor {
3810*795d594fSAndroid Build Coastguard Worker  public:
ScanObjectVisitor(MarkCompact * const mark_compact)3811*795d594fSAndroid Build Coastguard Worker   explicit ScanObjectVisitor(MarkCompact* const mark_compact) ALWAYS_INLINE
3812*795d594fSAndroid Build Coastguard Worker       : mark_compact_(mark_compact) {}
3813*795d594fSAndroid Build Coastguard Worker 
operator ()(ObjPtr<mirror::Object> obj) const3814*795d594fSAndroid Build Coastguard Worker   void operator()(ObjPtr<mirror::Object> obj) const
3815*795d594fSAndroid Build Coastguard Worker       ALWAYS_INLINE
3816*795d594fSAndroid Build Coastguard Worker       REQUIRES(Locks::heap_bitmap_lock_)
3817*795d594fSAndroid Build Coastguard Worker       REQUIRES_SHARED(Locks::mutator_lock_) {
3818*795d594fSAndroid Build Coastguard Worker     mark_compact_->ScanObject</*kUpdateLiveWords*/ false>(obj.Ptr());
3819*795d594fSAndroid Build Coastguard Worker   }
3820*795d594fSAndroid Build Coastguard Worker 
3821*795d594fSAndroid Build Coastguard Worker  private:
3822*795d594fSAndroid Build Coastguard Worker   MarkCompact* const mark_compact_;
3823*795d594fSAndroid Build Coastguard Worker };
3824*795d594fSAndroid Build Coastguard Worker 
UpdateAndMarkModUnion()3825*795d594fSAndroid Build Coastguard Worker void MarkCompact::UpdateAndMarkModUnion() {
3826*795d594fSAndroid Build Coastguard Worker   accounting::CardTable* const card_table = heap_->GetCardTable();
3827*795d594fSAndroid Build Coastguard Worker   for (const auto& space : immune_spaces_.GetSpaces()) {
3828*795d594fSAndroid Build Coastguard Worker     const char* name = space->IsZygoteSpace()
3829*795d594fSAndroid Build Coastguard Worker         ? "UpdateAndMarkZygoteModUnionTable"
3830*795d594fSAndroid Build Coastguard Worker         : "UpdateAndMarkImageModUnionTable";
3831*795d594fSAndroid Build Coastguard Worker     DCHECK(space->IsZygoteSpace() || space->IsImageSpace()) << *space;
3832*795d594fSAndroid Build Coastguard Worker     TimingLogger::ScopedTiming t(name, GetTimings());
3833*795d594fSAndroid Build Coastguard Worker     accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
3834*795d594fSAndroid Build Coastguard Worker     if (table != nullptr) {
3835*795d594fSAndroid Build Coastguard Worker       // UpdateAndMarkReferences() doesn't visit Reference-type objects. But
3836*795d594fSAndroid Build Coastguard Worker       // that's fine because these objects are immutable enough (referent can
3837*795d594fSAndroid Build Coastguard Worker       // only be cleared) and hence the only referents they can have are intra-space.
3838*795d594fSAndroid Build Coastguard Worker       table->UpdateAndMarkReferences(this);
3839*795d594fSAndroid Build Coastguard Worker     } else {
3840*795d594fSAndroid Build Coastguard Worker       // No mod-union table, scan all dirty/aged cards in the corresponding
3841*795d594fSAndroid Build Coastguard Worker       // card-table. This can only occur for app images.
3842*795d594fSAndroid Build Coastguard Worker       card_table->Scan</*kClearCard*/ false>(space->GetMarkBitmap(),
3843*795d594fSAndroid Build Coastguard Worker                                              space->Begin(),
3844*795d594fSAndroid Build Coastguard Worker                                              space->End(),
3845*795d594fSAndroid Build Coastguard Worker                                              ScanObjectVisitor(this),
3846*795d594fSAndroid Build Coastguard Worker                                              gc::accounting::CardTable::kCardAged);
3847*795d594fSAndroid Build Coastguard Worker     }
3848*795d594fSAndroid Build Coastguard Worker   }
3849*795d594fSAndroid Build Coastguard Worker }
3850*795d594fSAndroid Build Coastguard Worker 
MarkReachableObjects()3851*795d594fSAndroid Build Coastguard Worker void MarkCompact::MarkReachableObjects() {
3852*795d594fSAndroid Build Coastguard Worker   UpdateAndMarkModUnion();
3853*795d594fSAndroid Build Coastguard Worker   // Recursively mark all the non-image bits set in the mark bitmap.
3854*795d594fSAndroid Build Coastguard Worker   ProcessMarkStack();
3855*795d594fSAndroid Build Coastguard Worker }
3856*795d594fSAndroid Build Coastguard Worker 
ScanDirtyObjects(bool paused,uint8_t minimum_age)3857*795d594fSAndroid Build Coastguard Worker void MarkCompact::ScanDirtyObjects(bool paused, uint8_t minimum_age) {
3858*795d594fSAndroid Build Coastguard Worker   accounting::CardTable* card_table = heap_->GetCardTable();
3859*795d594fSAndroid Build Coastguard Worker   for (const auto& space : heap_->GetContinuousSpaces()) {
3860*795d594fSAndroid Build Coastguard Worker     const char* name = nullptr;
3861*795d594fSAndroid Build Coastguard Worker     switch (space->GetGcRetentionPolicy()) {
3862*795d594fSAndroid Build Coastguard Worker     case space::kGcRetentionPolicyNeverCollect:
3863*795d594fSAndroid Build Coastguard Worker       name = paused ? "(Paused)ScanGrayImmuneSpaceObjects" : "ScanGrayImmuneSpaceObjects";
3864*795d594fSAndroid Build Coastguard Worker       break;
3865*795d594fSAndroid Build Coastguard Worker     case space::kGcRetentionPolicyFullCollect:
3866*795d594fSAndroid Build Coastguard Worker       name = paused ? "(Paused)ScanGrayZygoteSpaceObjects" : "ScanGrayZygoteSpaceObjects";
3867*795d594fSAndroid Build Coastguard Worker       break;
3868*795d594fSAndroid Build Coastguard Worker     case space::kGcRetentionPolicyAlwaysCollect:
3869*795d594fSAndroid Build Coastguard Worker       name = paused ? "(Paused)ScanGrayAllocSpaceObjects" : "ScanGrayAllocSpaceObjects";
3870*795d594fSAndroid Build Coastguard Worker       break;
3871*795d594fSAndroid Build Coastguard Worker     }
3872*795d594fSAndroid Build Coastguard Worker     TimingLogger::ScopedTiming t(name, GetTimings());
3873*795d594fSAndroid Build Coastguard Worker     card_table->Scan</*kClearCard*/ false>(
3874*795d594fSAndroid Build Coastguard Worker         space->GetMarkBitmap(), space->Begin(), space->End(), ScanObjectVisitor(this), minimum_age);
3875*795d594fSAndroid Build Coastguard Worker   }
3876*795d594fSAndroid Build Coastguard Worker }
3877*795d594fSAndroid Build Coastguard Worker 
RecursiveMarkDirtyObjects(bool paused,uint8_t minimum_age)3878*795d594fSAndroid Build Coastguard Worker void MarkCompact::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
3879*795d594fSAndroid Build Coastguard Worker   ScanDirtyObjects(paused, minimum_age);
3880*795d594fSAndroid Build Coastguard Worker   ProcessMarkStack();
3881*795d594fSAndroid Build Coastguard Worker }
3882*795d594fSAndroid Build Coastguard Worker 
MarkRoots(VisitRootFlags flags)3883*795d594fSAndroid Build Coastguard Worker void MarkCompact::MarkRoots(VisitRootFlags flags) {
3884*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
3885*795d594fSAndroid Build Coastguard Worker   Runtime* runtime = Runtime::Current();
3886*795d594fSAndroid Build Coastguard Worker   // Make sure that the checkpoint which collects the stack roots is the first
3887*795d594fSAndroid Build Coastguard Worker   // one capturning GC-roots. As this one is supposed to find the address
3888*795d594fSAndroid Build Coastguard Worker   // everything allocated after that (during this marking phase) will be
3889*795d594fSAndroid Build Coastguard Worker   // considered 'marked'.
3890*795d594fSAndroid Build Coastguard Worker   MarkRootsCheckpoint(thread_running_gc_, runtime);
3891*795d594fSAndroid Build Coastguard Worker   MarkNonThreadRoots(runtime);
3892*795d594fSAndroid Build Coastguard Worker   MarkConcurrentRoots(flags, runtime);
3893*795d594fSAndroid Build Coastguard Worker }
3894*795d594fSAndroid Build Coastguard Worker 
PreCleanCards()3895*795d594fSAndroid Build Coastguard Worker void MarkCompact::PreCleanCards() {
3896*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
3897*795d594fSAndroid Build Coastguard Worker   CHECK(!Locks::mutator_lock_->IsExclusiveHeld(thread_running_gc_));
3898*795d594fSAndroid Build Coastguard Worker   // Age the card-table before thread stack scanning checkpoint in MarkRoots()
3899*795d594fSAndroid Build Coastguard Worker   // as it ensures that there are no in-progress write barriers which started
3900*795d594fSAndroid Build Coastguard Worker   // prior to aging the card-table.
3901*795d594fSAndroid Build Coastguard Worker   PrepareCardTableForMarking(/*clear_alloc_space_cards*/ false);
3902*795d594fSAndroid Build Coastguard Worker   MarkRoots(static_cast<VisitRootFlags>(kVisitRootFlagClearRootLog | kVisitRootFlagNewRoots));
3903*795d594fSAndroid Build Coastguard Worker   RecursiveMarkDirtyObjects(/*paused*/ false, accounting::CardTable::kCardDirty - 1);
3904*795d594fSAndroid Build Coastguard Worker }
3905*795d594fSAndroid Build Coastguard Worker 
3906*795d594fSAndroid Build Coastguard Worker // In a concurrent marking algorithm, if we are not using a write/read barrier, as
3907*795d594fSAndroid Build Coastguard Worker // in this case, then we need a stop-the-world (STW) round in the end to mark
3908*795d594fSAndroid Build Coastguard Worker // objects which were written into concurrently while concurrent marking was
3909*795d594fSAndroid Build Coastguard Worker // performed.
3910*795d594fSAndroid Build Coastguard Worker // In order to minimize the pause time, we could take one of the two approaches:
3911*795d594fSAndroid Build Coastguard Worker // 1. Keep repeating concurrent marking of dirty cards until the time spent goes
3912*795d594fSAndroid Build Coastguard Worker // below a threshold.
3913*795d594fSAndroid Build Coastguard Worker // 2. Do two rounds concurrently and then attempt a paused one. If we figure
3914*795d594fSAndroid Build Coastguard Worker // that it's taking too long, then resume mutators and retry.
3915*795d594fSAndroid Build Coastguard Worker //
3916*795d594fSAndroid Build Coastguard Worker // Given the non-trivial fixed overhead of running a round (card table and root
3917*795d594fSAndroid Build Coastguard Worker // scan), it might be better to go with approach 2.
MarkingPhase()3918*795d594fSAndroid Build Coastguard Worker void MarkCompact::MarkingPhase() {
3919*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
3920*795d594fSAndroid Build Coastguard Worker   DCHECK_EQ(thread_running_gc_, Thread::Current());
3921*795d594fSAndroid Build Coastguard Worker   WriterMutexLock mu(thread_running_gc_, *Locks::heap_bitmap_lock_);
3922*795d594fSAndroid Build Coastguard Worker   MaybeClampGcStructures();
3923*795d594fSAndroid Build Coastguard Worker   PrepareCardTableForMarking(/*clear_alloc_space_cards*/ true);
3924*795d594fSAndroid Build Coastguard Worker   MarkZygoteLargeObjects();
3925*795d594fSAndroid Build Coastguard Worker   MarkRoots(
3926*795d594fSAndroid Build Coastguard Worker         static_cast<VisitRootFlags>(kVisitRootFlagAllRoots | kVisitRootFlagStartLoggingNewRoots));
3927*795d594fSAndroid Build Coastguard Worker   MarkReachableObjects();
3928*795d594fSAndroid Build Coastguard Worker   // Pre-clean dirtied cards to reduce pauses.
3929*795d594fSAndroid Build Coastguard Worker   PreCleanCards();
3930*795d594fSAndroid Build Coastguard Worker 
3931*795d594fSAndroid Build Coastguard Worker   // Setup reference processing and forward soft references once before enabling
3932*795d594fSAndroid Build Coastguard Worker   // slow path (in MarkingPause)
3933*795d594fSAndroid Build Coastguard Worker   ReferenceProcessor* rp = GetHeap()->GetReferenceProcessor();
3934*795d594fSAndroid Build Coastguard Worker   bool clear_soft_references = GetCurrentIteration()->GetClearSoftReferences();
3935*795d594fSAndroid Build Coastguard Worker   rp->Setup(thread_running_gc_, this, /*concurrent=*/ true, clear_soft_references);
3936*795d594fSAndroid Build Coastguard Worker   if (!clear_soft_references) {
3937*795d594fSAndroid Build Coastguard Worker     // Forward as many SoftReferences as possible before inhibiting reference access.
3938*795d594fSAndroid Build Coastguard Worker     rp->ForwardSoftReferences(GetTimings());
3939*795d594fSAndroid Build Coastguard Worker   }
3940*795d594fSAndroid Build Coastguard Worker }
3941*795d594fSAndroid Build Coastguard Worker 
3942*795d594fSAndroid Build Coastguard Worker class MarkCompact::RefFieldsVisitor {
3943*795d594fSAndroid Build Coastguard Worker  public:
RefFieldsVisitor(MarkCompact * const mark_compact)3944*795d594fSAndroid Build Coastguard Worker   ALWAYS_INLINE explicit RefFieldsVisitor(MarkCompact* const mark_compact)
3945*795d594fSAndroid Build Coastguard Worker     : mark_compact_(mark_compact) {}
3946*795d594fSAndroid Build Coastguard Worker 
operator ()(mirror::Object * obj,MemberOffset offset,bool is_static) const3947*795d594fSAndroid Build Coastguard Worker   ALWAYS_INLINE void operator()(mirror::Object* obj,
3948*795d594fSAndroid Build Coastguard Worker                                 MemberOffset offset,
3949*795d594fSAndroid Build Coastguard Worker                                 [[maybe_unused]] bool is_static) const
3950*795d594fSAndroid Build Coastguard Worker       REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
3951*795d594fSAndroid Build Coastguard Worker     if (kCheckLocks) {
3952*795d594fSAndroid Build Coastguard Worker       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
3953*795d594fSAndroid Build Coastguard Worker       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
3954*795d594fSAndroid Build Coastguard Worker     }
3955*795d594fSAndroid Build Coastguard Worker     mark_compact_->MarkObject(obj->GetFieldObject<mirror::Object>(offset), obj, offset);
3956*795d594fSAndroid Build Coastguard Worker   }
3957*795d594fSAndroid Build Coastguard Worker 
operator ()(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref) const3958*795d594fSAndroid Build Coastguard Worker   void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const ALWAYS_INLINE
3959*795d594fSAndroid Build Coastguard Worker       REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
3960*795d594fSAndroid Build Coastguard Worker     mark_compact_->DelayReferenceReferent(klass, ref);
3961*795d594fSAndroid Build Coastguard Worker   }
3962*795d594fSAndroid Build Coastguard Worker 
VisitRootIfNonNull(mirror::CompressedReference<mirror::Object> * root) const3963*795d594fSAndroid Build Coastguard Worker   void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const ALWAYS_INLINE
3964*795d594fSAndroid Build Coastguard Worker       REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_) {
3965*795d594fSAndroid Build Coastguard Worker     if (!root->IsNull()) {
3966*795d594fSAndroid Build Coastguard Worker       VisitRoot(root);
3967*795d594fSAndroid Build Coastguard Worker     }
3968*795d594fSAndroid Build Coastguard Worker   }
3969*795d594fSAndroid Build Coastguard Worker 
VisitRoot(mirror::CompressedReference<mirror::Object> * root) const3970*795d594fSAndroid Build Coastguard Worker   void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
3971*795d594fSAndroid Build Coastguard Worker       REQUIRES(Locks::heap_bitmap_lock_)
3972*795d594fSAndroid Build Coastguard Worker       REQUIRES_SHARED(Locks::mutator_lock_) {
3973*795d594fSAndroid Build Coastguard Worker     if (kCheckLocks) {
3974*795d594fSAndroid Build Coastguard Worker       Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
3975*795d594fSAndroid Build Coastguard Worker       Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
3976*795d594fSAndroid Build Coastguard Worker     }
3977*795d594fSAndroid Build Coastguard Worker     mark_compact_->MarkObject(root->AsMirrorPtr());
3978*795d594fSAndroid Build Coastguard Worker   }
3979*795d594fSAndroid Build Coastguard Worker 
3980*795d594fSAndroid Build Coastguard Worker  private:
3981*795d594fSAndroid Build Coastguard Worker   MarkCompact* const mark_compact_;
3982*795d594fSAndroid Build Coastguard Worker };
3983*795d594fSAndroid Build Coastguard Worker 
3984*795d594fSAndroid Build Coastguard Worker template <size_t kAlignment>
LiveBytesInBitmapWord(size_t chunk_idx) const3985*795d594fSAndroid Build Coastguard Worker size_t MarkCompact::LiveWordsBitmap<kAlignment>::LiveBytesInBitmapWord(size_t chunk_idx) const {
3986*795d594fSAndroid Build Coastguard Worker   const size_t index = chunk_idx * kBitmapWordsPerVectorWord;
3987*795d594fSAndroid Build Coastguard Worker   size_t words = 0;
3988*795d594fSAndroid Build Coastguard Worker   for (uint32_t i = 0; i < kBitmapWordsPerVectorWord; i++) {
3989*795d594fSAndroid Build Coastguard Worker     words += POPCOUNT(Bitmap::Begin()[index + i]);
3990*795d594fSAndroid Build Coastguard Worker   }
3991*795d594fSAndroid Build Coastguard Worker   return words * kAlignment;
3992*795d594fSAndroid Build Coastguard Worker }
3993*795d594fSAndroid Build Coastguard Worker 
UpdateLivenessInfo(mirror::Object * obj,size_t obj_size)3994*795d594fSAndroid Build Coastguard Worker void MarkCompact::UpdateLivenessInfo(mirror::Object* obj, size_t obj_size) {
3995*795d594fSAndroid Build Coastguard Worker   DCHECK(obj != nullptr);
3996*795d594fSAndroid Build Coastguard Worker   DCHECK_EQ(obj_size, obj->SizeOf<kDefaultVerifyFlags>());
3997*795d594fSAndroid Build Coastguard Worker   uintptr_t obj_begin = reinterpret_cast<uintptr_t>(obj);
3998*795d594fSAndroid Build Coastguard Worker   UpdateClassAfterObjectMap(obj);
3999*795d594fSAndroid Build Coastguard Worker   size_t size = RoundUp(obj_size, kAlignment);
4000*795d594fSAndroid Build Coastguard Worker   uintptr_t bit_index = live_words_bitmap_->SetLiveWords(obj_begin, size);
4001*795d594fSAndroid Build Coastguard Worker   size_t chunk_idx = (obj_begin - live_words_bitmap_->Begin()) / kOffsetChunkSize;
4002*795d594fSAndroid Build Coastguard Worker   // Compute the bit-index within the chunk-info vector word.
4003*795d594fSAndroid Build Coastguard Worker   bit_index %= kBitsPerVectorWord;
4004*795d594fSAndroid Build Coastguard Worker   size_t first_chunk_portion = std::min(size, (kBitsPerVectorWord - bit_index) * kAlignment);
4005*795d594fSAndroid Build Coastguard Worker 
4006*795d594fSAndroid Build Coastguard Worker   chunk_info_vec_[chunk_idx++] += first_chunk_portion;
4007*795d594fSAndroid Build Coastguard Worker   DCHECK_LE(first_chunk_portion, size);
4008*795d594fSAndroid Build Coastguard Worker   for (size -= first_chunk_portion; size > kOffsetChunkSize; size -= kOffsetChunkSize) {
4009*795d594fSAndroid Build Coastguard Worker     DCHECK_EQ(chunk_info_vec_[chunk_idx], 0u);
4010*795d594fSAndroid Build Coastguard Worker     chunk_info_vec_[chunk_idx++] = kOffsetChunkSize;
4011*795d594fSAndroid Build Coastguard Worker   }
4012*795d594fSAndroid Build Coastguard Worker   chunk_info_vec_[chunk_idx] += size;
4013*795d594fSAndroid Build Coastguard Worker   freed_objects_--;
4014*795d594fSAndroid Build Coastguard Worker }
4015*795d594fSAndroid Build Coastguard Worker 
4016*795d594fSAndroid Build Coastguard Worker template <bool kUpdateLiveWords>
ScanObject(mirror::Object * obj)4017*795d594fSAndroid Build Coastguard Worker void MarkCompact::ScanObject(mirror::Object* obj) {
4018*795d594fSAndroid Build Coastguard Worker   mirror::Class* klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
4019*795d594fSAndroid Build Coastguard Worker   // TODO(lokeshgidra): Remove the following condition once b/373609505 is fixed.
4020*795d594fSAndroid Build Coastguard Worker   if (UNLIKELY(klass == nullptr)) {
4021*795d594fSAndroid Build Coastguard Worker     // It was seen in ConcurrentCopying GC that after a small wait when we reload
4022*795d594fSAndroid Build Coastguard Worker     // the class pointer, it turns out to be a valid class object. So as a workaround,
4023*795d594fSAndroid Build Coastguard Worker     // we can continue execution and log an error that this happened.
4024*795d594fSAndroid Build Coastguard Worker     for (size_t i = 0; i < 1000; i++) {
4025*795d594fSAndroid Build Coastguard Worker       // Wait for 1ms at a time. Don't wait for more than 1 second in total.
4026*795d594fSAndroid Build Coastguard Worker       usleep(1000);
4027*795d594fSAndroid Build Coastguard Worker       klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
4028*795d594fSAndroid Build Coastguard Worker       if (klass != nullptr) {
4029*795d594fSAndroid Build Coastguard Worker         std::ostringstream oss;
4030*795d594fSAndroid Build Coastguard Worker         klass->DumpClass(oss, mirror::Class::kDumpClassFullDetail);
4031*795d594fSAndroid Build Coastguard Worker         LOG(FATAL_WITHOUT_ABORT) << "klass pointer for obj: " << obj
4032*795d594fSAndroid Build Coastguard Worker                                  << " found to be null first. Reloading after " << i
4033*795d594fSAndroid Build Coastguard Worker                                  << " iterations of 1ms sleep fetched klass: " << oss.str();
4034*795d594fSAndroid Build Coastguard Worker         break;
4035*795d594fSAndroid Build Coastguard Worker       }
4036*795d594fSAndroid Build Coastguard Worker     }
4037*795d594fSAndroid Build Coastguard Worker 
4038*795d594fSAndroid Build Coastguard Worker     if (UNLIKELY(klass == nullptr)) {
4039*795d594fSAndroid Build Coastguard Worker       // It must be heap corruption.
4040*795d594fSAndroid Build Coastguard Worker       LOG(FATAL_WITHOUT_ABORT) << "klass pointer for obj: " << obj << " found to be null.";
4041*795d594fSAndroid Build Coastguard Worker     }
4042*795d594fSAndroid Build Coastguard Worker     heap_->GetVerification()->LogHeapCorruption(
4043*795d594fSAndroid Build Coastguard Worker         obj, mirror::Object::ClassOffset(), klass, /*fatal=*/true);
4044*795d594fSAndroid Build Coastguard Worker   }
4045*795d594fSAndroid Build Coastguard Worker   // The size of `obj` is used both here (to update `bytes_scanned_`) and in
4046*795d594fSAndroid Build Coastguard Worker   // `UpdateLivenessInfo`. As fetching this value can be expensive, do it once
4047*795d594fSAndroid Build Coastguard Worker   // here and pass that information to `UpdateLivenessInfo`.
4048*795d594fSAndroid Build Coastguard Worker   size_t obj_size = obj->SizeOf<kDefaultVerifyFlags>();
4049*795d594fSAndroid Build Coastguard Worker   bytes_scanned_ += obj_size;
4050*795d594fSAndroid Build Coastguard Worker 
4051*795d594fSAndroid Build Coastguard Worker   RefFieldsVisitor visitor(this);
4052*795d594fSAndroid Build Coastguard Worker   DCHECK(IsMarked(obj)) << "Scanning marked object " << obj << "\n" << heap_->DumpSpaces();
4053*795d594fSAndroid Build Coastguard Worker   if (kUpdateLiveWords && HasAddress(obj)) {
4054*795d594fSAndroid Build Coastguard Worker     UpdateLivenessInfo(obj, obj_size);
4055*795d594fSAndroid Build Coastguard Worker   }
4056*795d594fSAndroid Build Coastguard Worker   obj->VisitReferences(visitor, visitor);
4057*795d594fSAndroid Build Coastguard Worker }
4058*795d594fSAndroid Build Coastguard Worker 
4059*795d594fSAndroid Build Coastguard Worker // Scan anything that's on the mark stack.
ProcessMarkStack()4060*795d594fSAndroid Build Coastguard Worker void MarkCompact::ProcessMarkStack() {
4061*795d594fSAndroid Build Coastguard Worker   TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
4062*795d594fSAndroid Build Coastguard Worker   // TODO: try prefetch like in CMS
4063*795d594fSAndroid Build Coastguard Worker   while (!mark_stack_->IsEmpty()) {
4064*795d594fSAndroid Build Coastguard Worker     mirror::Object* obj = mark_stack_->PopBack();
4065*795d594fSAndroid Build Coastguard Worker     DCHECK(obj != nullptr);
4066*795d594fSAndroid Build Coastguard Worker     ScanObject</*kUpdateLiveWords*/ true>(obj);
4067*795d594fSAndroid Build Coastguard Worker   }
4068*795d594fSAndroid Build Coastguard Worker }
4069*795d594fSAndroid Build Coastguard Worker 
ExpandMarkStack()4070*795d594fSAndroid Build Coastguard Worker void MarkCompact::ExpandMarkStack() {
4071*795d594fSAndroid Build Coastguard Worker   const size_t new_size = mark_stack_->Capacity() * 2;
4072*795d594fSAndroid Build Coastguard Worker   std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(),
4073*795d594fSAndroid Build Coastguard Worker                                                    mark_stack_->End());
4074*795d594fSAndroid Build Coastguard Worker   mark_stack_->Resize(new_size);
4075*795d594fSAndroid Build Coastguard Worker   for (auto& ref : temp) {
4076*795d594fSAndroid Build Coastguard Worker     mark_stack_->PushBack(ref.AsMirrorPtr());
4077*795d594fSAndroid Build Coastguard Worker   }
4078*795d594fSAndroid Build Coastguard Worker   DCHECK(!mark_stack_->IsFull());
4079*795d594fSAndroid Build Coastguard Worker }
4080*795d594fSAndroid Build Coastguard Worker 
PushOnMarkStack(mirror::Object * obj)4081*795d594fSAndroid Build Coastguard Worker inline void MarkCompact::PushOnMarkStack(mirror::Object* obj) {
4082*795d594fSAndroid Build Coastguard Worker   if (UNLIKELY(mark_stack_->IsFull())) {
4083*795d594fSAndroid Build Coastguard Worker     ExpandMarkStack();
4084*795d594fSAndroid Build Coastguard Worker   }
4085*795d594fSAndroid Build Coastguard Worker   mark_stack_->PushBack(obj);
4086*795d594fSAndroid Build Coastguard Worker }
4087*795d594fSAndroid Build Coastguard Worker 
MarkObjectNonNull(mirror::Object * obj,mirror::Object * holder,MemberOffset offset)4088*795d594fSAndroid Build Coastguard Worker inline void MarkCompact::MarkObjectNonNull(mirror::Object* obj,
4089*795d594fSAndroid Build Coastguard Worker                                            mirror::Object* holder,
4090*795d594fSAndroid Build Coastguard Worker                                            MemberOffset offset) {
4091*795d594fSAndroid Build Coastguard Worker   DCHECK(obj != nullptr);
4092*795d594fSAndroid Build Coastguard Worker   if (MarkObjectNonNullNoPush</*kParallel*/false>(obj, holder, offset)) {
4093*795d594fSAndroid Build Coastguard Worker     PushOnMarkStack(obj);
4094*795d594fSAndroid Build Coastguard Worker   }
4095*795d594fSAndroid Build Coastguard Worker }
4096*795d594fSAndroid Build Coastguard Worker 
4097*795d594fSAndroid Build Coastguard Worker template <bool kParallel>
MarkObjectNonNullNoPush(mirror::Object * obj,mirror::Object * holder,MemberOffset offset)4098*795d594fSAndroid Build Coastguard Worker inline bool MarkCompact::MarkObjectNonNullNoPush(mirror::Object* obj,
4099*795d594fSAndroid Build Coastguard Worker                                                  mirror::Object* holder,
4100*795d594fSAndroid Build Coastguard Worker                                                  MemberOffset offset) {
4101*795d594fSAndroid Build Coastguard Worker   // We expect most of the referenes to be in bump-pointer space, so try that
4102*795d594fSAndroid Build Coastguard Worker   // first to keep the cost of this function minimal.
4103*795d594fSAndroid Build Coastguard Worker   if (LIKELY(HasAddress(obj))) {
4104*795d594fSAndroid Build Coastguard Worker     return kParallel ? !moving_space_bitmap_->AtomicTestAndSet(obj)
4105*795d594fSAndroid Build Coastguard Worker                      : !moving_space_bitmap_->Set(obj);
4106*795d594fSAndroid Build Coastguard Worker   } else if (non_moving_space_bitmap_->HasAddress(obj)) {
4107*795d594fSAndroid Build Coastguard Worker     return kParallel ? !non_moving_space_bitmap_->AtomicTestAndSet(obj)
4108*795d594fSAndroid Build Coastguard Worker                      : !non_moving_space_bitmap_->Set(obj);
4109*795d594fSAndroid Build Coastguard Worker   } else if (immune_spaces_.ContainsObject(obj)) {
4110*795d594fSAndroid Build Coastguard Worker     DCHECK(IsMarked(obj) != nullptr);
4111*795d594fSAndroid Build Coastguard Worker     return false;
4112*795d594fSAndroid Build Coastguard Worker   } else {
4113*795d594fSAndroid Build Coastguard Worker     // Must be a large-object space, otherwise it's a case of heap corruption.
4114*795d594fSAndroid Build Coastguard Worker     if (!IsAlignedParam(obj, space::LargeObjectSpace::ObjectAlignment())) {
4115*795d594fSAndroid Build Coastguard Worker       // Objects in large-object space are aligned to the large-object alignment.
4116*795d594fSAndroid Build Coastguard Worker       // So if we have an object which doesn't belong to any space and is not
4117*795d594fSAndroid Build Coastguard Worker       // page-aligned as well, then it's memory corruption.
4118*795d594fSAndroid Build Coastguard Worker       // TODO: implement protect/unprotect in bump-pointer space.
4119*795d594fSAndroid Build Coastguard Worker       heap_->GetVerification()->LogHeapCorruption(holder, offset, obj, /*fatal*/ true);
4120*795d594fSAndroid Build Coastguard Worker     }
4121*795d594fSAndroid Build Coastguard Worker     DCHECK_NE(heap_->GetLargeObjectsSpace(), nullptr)
4122*795d594fSAndroid Build Coastguard Worker         << "ref=" << obj
4123*795d594fSAndroid Build Coastguard Worker         << " doesn't belong to any of the spaces and large object space doesn't exist";
4124*795d594fSAndroid Build Coastguard Worker     accounting::LargeObjectBitmap* los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
4125*795d594fSAndroid Build Coastguard Worker     DCHECK(los_bitmap->HasAddress(obj));
4126*795d594fSAndroid Build Coastguard Worker     if (kParallel) {
4127*795d594fSAndroid Build Coastguard Worker       los_bitmap->AtomicTestAndSet(obj);
4128*795d594fSAndroid Build Coastguard Worker     } else {
4129*795d594fSAndroid Build Coastguard Worker       los_bitmap->Set(obj);
4130*795d594fSAndroid Build Coastguard Worker     }
4131*795d594fSAndroid Build Coastguard Worker     // We only have primitive arrays in large object space. So there is no
4132*795d594fSAndroid Build Coastguard Worker     // reason to push into mark-stack.
4133*795d594fSAndroid Build Coastguard Worker     DCHECK(obj->IsString() || (obj->IsArrayInstance() && !obj->IsObjectArray()));
4134*795d594fSAndroid Build Coastguard Worker     return false;
4135*795d594fSAndroid Build Coastguard Worker   }
4136*795d594fSAndroid Build Coastguard Worker }
4137*795d594fSAndroid Build Coastguard Worker 
MarkObject(mirror::Object * obj,mirror::Object * holder,MemberOffset offset)4138*795d594fSAndroid Build Coastguard Worker inline void MarkCompact::MarkObject(mirror::Object* obj,
4139*795d594fSAndroid Build Coastguard Worker                                     mirror::Object* holder,
4140*795d594fSAndroid Build Coastguard Worker                                     MemberOffset offset) {
4141*795d594fSAndroid Build Coastguard Worker   if (obj != nullptr) {
4142*795d594fSAndroid Build Coastguard Worker     MarkObjectNonNull(obj, holder, offset);
4143*795d594fSAndroid Build Coastguard Worker   }
4144*795d594fSAndroid Build Coastguard Worker }
4145*795d594fSAndroid Build Coastguard Worker 
MarkObject(mirror::Object * obj)4146*795d594fSAndroid Build Coastguard Worker mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) {
4147*795d594fSAndroid Build Coastguard Worker   MarkObject(obj, nullptr, MemberOffset(0));
4148*795d594fSAndroid Build Coastguard Worker   return obj;
4149*795d594fSAndroid Build Coastguard Worker }
4150*795d594fSAndroid Build Coastguard Worker 
MarkHeapReference(mirror::HeapReference<mirror::Object> * obj,bool do_atomic_update)4151*795d594fSAndroid Build Coastguard Worker void MarkCompact::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj,
4152*795d594fSAndroid Build Coastguard Worker                                     [[maybe_unused]] bool do_atomic_update) {
4153*795d594fSAndroid Build Coastguard Worker   MarkObject(obj->AsMirrorPtr(), nullptr, MemberOffset(0));
4154*795d594fSAndroid Build Coastguard Worker }
4155*795d594fSAndroid Build Coastguard Worker 
VisitRoots(mirror::Object *** roots,size_t count,const RootInfo & info)4156*795d594fSAndroid Build Coastguard Worker void MarkCompact::VisitRoots(mirror::Object*** roots,
4157*795d594fSAndroid Build Coastguard Worker                              size_t count,
4158*795d594fSAndroid Build Coastguard Worker                              const RootInfo& info) {
4159*795d594fSAndroid Build Coastguard Worker   if (compacting_) {
4160*795d594fSAndroid Build Coastguard Worker     uint8_t* moving_space_begin = black_dense_end_;
4161*795d594fSAndroid Build Coastguard Worker     uint8_t* moving_space_end = moving_space_end_;
4162*795d594fSAndroid Build Coastguard Worker     for (size_t i = 0; i < count; ++i) {
4163*795d594fSAndroid Build Coastguard Worker       UpdateRoot(roots[i], moving_space_begin, moving_space_end, info);
4164*795d594fSAndroid Build Coastguard Worker     }
4165*795d594fSAndroid Build Coastguard Worker   } else {
4166*795d594fSAndroid Build Coastguard Worker     for (size_t i = 0; i < count; ++i) {
4167*795d594fSAndroid Build Coastguard Worker       MarkObjectNonNull(*roots[i]);
4168*795d594fSAndroid Build Coastguard Worker     }
4169*795d594fSAndroid Build Coastguard Worker   }
4170*795d594fSAndroid Build Coastguard Worker }
4171*795d594fSAndroid Build Coastguard Worker 
VisitRoots(mirror::CompressedReference<mirror::Object> ** roots,size_t count,const RootInfo & info)4172*795d594fSAndroid Build Coastguard Worker void MarkCompact::VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
4173*795d594fSAndroid Build Coastguard Worker                              size_t count,
4174*795d594fSAndroid Build Coastguard Worker                              const RootInfo& info) {
4175*795d594fSAndroid Build Coastguard Worker   // TODO: do we need to check if the root is null or not?
4176*795d594fSAndroid Build Coastguard Worker   if (compacting_) {
4177*795d594fSAndroid Build Coastguard Worker     uint8_t* moving_space_begin = black_dense_end_;
4178*795d594fSAndroid Build Coastguard Worker     uint8_t* moving_space_end = moving_space_end_;
4179*795d594fSAndroid Build Coastguard Worker     for (size_t i = 0; i < count; ++i) {
4180*795d594fSAndroid Build Coastguard Worker       UpdateRoot(roots[i], moving_space_begin, moving_space_end, info);
4181*795d594fSAndroid Build Coastguard Worker     }
4182*795d594fSAndroid Build Coastguard Worker   } else {
4183*795d594fSAndroid Build Coastguard Worker     for (size_t i = 0; i < count; ++i) {
4184*795d594fSAndroid Build Coastguard Worker       MarkObjectNonNull(roots[i]->AsMirrorPtr());
4185*795d594fSAndroid Build Coastguard Worker     }
4186*795d594fSAndroid Build Coastguard Worker   }
4187*795d594fSAndroid Build Coastguard Worker }
4188*795d594fSAndroid Build Coastguard Worker 
IsMarked(mirror::Object * obj)4189*795d594fSAndroid Build Coastguard Worker mirror::Object* MarkCompact::IsMarked(mirror::Object* obj) {
4190*795d594fSAndroid Build Coastguard Worker   if (HasAddress(obj)) {
4191*795d594fSAndroid Build Coastguard Worker     const bool is_black = reinterpret_cast<uint8_t*>(obj) >= black_allocations_begin_;
4192*795d594fSAndroid Build Coastguard Worker     if (compacting_) {
4193*795d594fSAndroid Build Coastguard Worker       if (is_black) {
4194*795d594fSAndroid Build Coastguard Worker         return PostCompactBlackObjAddr(obj);
4195*795d594fSAndroid Build Coastguard Worker       } else if (moving_space_bitmap_->Test(obj)) {
4196*795d594fSAndroid Build Coastguard Worker         if (reinterpret_cast<uint8_t*>(obj) < black_dense_end_) {
4197*795d594fSAndroid Build Coastguard Worker           return obj;
4198*795d594fSAndroid Build Coastguard Worker         } else {
4199*795d594fSAndroid Build Coastguard Worker           return PostCompactOldObjAddr(obj);
4200*795d594fSAndroid Build Coastguard Worker         }
4201*795d594fSAndroid Build Coastguard Worker       } else {
4202*795d594fSAndroid Build Coastguard Worker         return nullptr;
4203*795d594fSAndroid Build Coastguard Worker       }
4204*795d594fSAndroid Build Coastguard Worker     }
4205*795d594fSAndroid Build Coastguard Worker     return (is_black || moving_space_bitmap_->Test(obj)) ? obj : nullptr;
4206*795d594fSAndroid Build Coastguard Worker   } else if (non_moving_space_bitmap_->HasAddress(obj)) {
4207*795d594fSAndroid Build Coastguard Worker     if (non_moving_space_bitmap_->Test(obj)) {
4208*795d594fSAndroid Build Coastguard Worker       return obj;
4209*795d594fSAndroid Build Coastguard Worker     }
4210*795d594fSAndroid Build Coastguard Worker   } else if (immune_spaces_.ContainsObject(obj)) {
4211*795d594fSAndroid Build Coastguard Worker     return obj;
4212*795d594fSAndroid Build Coastguard Worker   } else {
4213*795d594fSAndroid Build Coastguard Worker     DCHECK(heap_->GetLargeObjectsSpace())
4214*795d594fSAndroid Build Coastguard Worker         << "ref=" << obj
4215*795d594fSAndroid Build Coastguard Worker         << " doesn't belong to any of the spaces and large object space doesn't exist";
4216*795d594fSAndroid Build Coastguard Worker     accounting::LargeObjectBitmap* los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
4217*795d594fSAndroid Build Coastguard Worker     if (los_bitmap->HasAddress(obj)) {
4218*795d594fSAndroid Build Coastguard Worker       DCHECK(IsAlignedParam(obj, space::LargeObjectSpace::ObjectAlignment()));
4219*795d594fSAndroid Build Coastguard Worker       if (los_bitmap->Test(obj)) {
4220*795d594fSAndroid Build Coastguard Worker         return obj;
4221*795d594fSAndroid Build Coastguard Worker       }
4222*795d594fSAndroid Build Coastguard Worker     } else {
4223*795d594fSAndroid Build Coastguard Worker       // The given obj is not in any of the known spaces, so return null. This could
4224*795d594fSAndroid Build Coastguard Worker       // happen for instance in interpreter caches wherein a concurrent updation
4225*795d594fSAndroid Build Coastguard Worker       // to the cache could result in obj being a non-reference. This is
4226*795d594fSAndroid Build Coastguard Worker       // tolerable because SweepInterpreterCaches only updates if the given
4227*795d594fSAndroid Build Coastguard Worker       // object has moved, which can't be the case for the non-reference.
4228*795d594fSAndroid Build Coastguard Worker       return nullptr;
4229*795d594fSAndroid Build Coastguard Worker     }
4230*795d594fSAndroid Build Coastguard Worker   }
4231*795d594fSAndroid Build Coastguard Worker   return marking_done_ && IsOnAllocStack(obj) ? obj : nullptr;
4232*795d594fSAndroid Build Coastguard Worker }
4233*795d594fSAndroid Build Coastguard Worker 
IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object> * obj,bool do_atomic_update)4234*795d594fSAndroid Build Coastguard Worker bool MarkCompact::IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj,
4235*795d594fSAndroid Build Coastguard Worker                                               [[maybe_unused]] bool do_atomic_update) {
4236*795d594fSAndroid Build Coastguard Worker   mirror::Object* ref = obj->AsMirrorPtr();
4237*795d594fSAndroid Build Coastguard Worker   if (ref == nullptr) {
4238*795d594fSAndroid Build Coastguard Worker     return true;
4239*795d594fSAndroid Build Coastguard Worker   }
4240*795d594fSAndroid Build Coastguard Worker   return IsMarked(ref);
4241*795d594fSAndroid Build Coastguard Worker }
4242*795d594fSAndroid Build Coastguard Worker 
4243*795d594fSAndroid Build Coastguard Worker // Process the 'referent' field in a java.lang.ref.Reference. If the referent
4244*795d594fSAndroid Build Coastguard Worker // has not yet been marked, put it on the appropriate list in the heap for later
4245*795d594fSAndroid Build Coastguard Worker // processing.
DelayReferenceReferent(ObjPtr<mirror::Class> klass,ObjPtr<mirror::Reference> ref)4246*795d594fSAndroid Build Coastguard Worker void MarkCompact::DelayReferenceReferent(ObjPtr<mirror::Class> klass,
4247*795d594fSAndroid Build Coastguard Worker                                          ObjPtr<mirror::Reference> ref) {
4248*795d594fSAndroid Build Coastguard Worker   heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
4249*795d594fSAndroid Build Coastguard Worker }
4250*795d594fSAndroid Build Coastguard Worker 
FinishPhase()4251*795d594fSAndroid Build Coastguard Worker void MarkCompact::FinishPhase() {
4252*795d594fSAndroid Build Coastguard Worker   GetCurrentIteration()->SetScannedBytes(bytes_scanned_);
4253*795d594fSAndroid Build Coastguard Worker   bool is_zygote = Runtime::Current()->IsZygote();
4254*795d594fSAndroid Build Coastguard Worker   compacting_ = false;
4255*795d594fSAndroid Build Coastguard Worker   marking_done_ = false;
4256*795d594fSAndroid Build Coastguard Worker 
4257*795d594fSAndroid Build Coastguard Worker   ZeroAndReleaseMemory(compaction_buffers_map_.Begin(), compaction_buffers_map_.Size());
4258*795d594fSAndroid Build Coastguard Worker   info_map_.MadviseDontNeedAndZero();
4259*795d594fSAndroid Build Coastguard Worker   live_words_bitmap_->ClearBitmap();
4260*795d594fSAndroid Build Coastguard Worker   if (moving_space_begin_ == black_dense_end_) {
4261*795d594fSAndroid Build Coastguard Worker     moving_space_bitmap_->Clear();
4262*795d594fSAndroid Build Coastguard Worker   } else {
4263*795d594fSAndroid Build Coastguard Worker     DCHECK_LT(moving_space_begin_, black_dense_end_);
4264*795d594fSAndroid Build Coastguard Worker     DCHECK_LE(black_dense_end_, moving_space_end_);
4265*795d594fSAndroid Build Coastguard Worker     moving_space_bitmap_->ClearRange(reinterpret_cast<mirror::Object*>(black_dense_end_),
4266*795d594fSAndroid Build Coastguard Worker                                      reinterpret_cast<mirror::Object*>(moving_space_end_));
4267*795d594fSAndroid Build Coastguard Worker   }
4268*795d594fSAndroid Build Coastguard Worker   bump_pointer_space_->SetBlackDenseRegionSize(black_dense_end_ - moving_space_begin_);
4269*795d594fSAndroid Build Coastguard Worker 
4270*795d594fSAndroid Build Coastguard Worker   if (UNLIKELY(is_zygote && IsValidFd(uffd_))) {
4271*795d594fSAndroid Build Coastguard Worker     // This unregisters all ranges as a side-effect.
4272*795d594fSAndroid Build Coastguard Worker     close(uffd_);
4273*795d594fSAndroid Build Coastguard Worker     uffd_ = kFdUnused;
4274*795d594fSAndroid Build Coastguard Worker     uffd_initialized_ = false;
4275*795d594fSAndroid Build Coastguard Worker   }
4276*795d594fSAndroid Build Coastguard Worker   CHECK(mark_stack_->IsEmpty());  // Ensure that the mark stack is empty.
4277*795d594fSAndroid Build Coastguard Worker   mark_stack_->Reset();
4278*795d594fSAndroid Build Coastguard Worker   DCHECK_EQ(thread_running_gc_, Thread::Current());
4279*795d594fSAndroid Build Coastguard Worker   if (kIsDebugBuild) {
4280*795d594fSAndroid Build Coastguard Worker     MutexLock mu(thread_running_gc_, lock_);
4281*795d594fSAndroid Build Coastguard Worker     if (updated_roots_.get() != nullptr) {
4282*795d594fSAndroid Build Coastguard Worker       updated_roots_->clear();
4283*795d594fSAndroid Build Coastguard Worker     }
4284*795d594fSAndroid Build Coastguard Worker   }
4285*795d594fSAndroid Build Coastguard Worker   class_after_obj_map_.clear();
4286*795d594fSAndroid Build Coastguard Worker   linear_alloc_arenas_.clear();
4287*795d594fSAndroid Build Coastguard Worker   {
4288*795d594fSAndroid Build Coastguard Worker     ReaderMutexLock mu(thread_running_gc_, *Locks::mutator_lock_);
4289*795d594fSAndroid Build Coastguard Worker     WriterMutexLock mu2(thread_running_gc_, *Locks::heap_bitmap_lock_);
4290*795d594fSAndroid Build Coastguard Worker     heap_->ClearMarkedObjects();
4291*795d594fSAndroid Build Coastguard Worker   }
4292*795d594fSAndroid Build Coastguard Worker   GcVisitedArenaPool* arena_pool =
4293*795d594fSAndroid Build Coastguard Worker       static_cast<GcVisitedArenaPool*>(Runtime::Current()->GetLinearAllocArenaPool());
4294*795d594fSAndroid Build Coastguard Worker   arena_pool->DeleteUnusedArenas();
4295*795d594fSAndroid Build Coastguard Worker }
4296*795d594fSAndroid Build Coastguard Worker 
4297*795d594fSAndroid Build Coastguard Worker }  // namespace collector
4298*795d594fSAndroid Build Coastguard Worker }  // namespace gc
4299*795d594fSAndroid Build Coastguard Worker }  // namespace art
4300