xref: /aosp_15_r20/external/scudo/standalone/primary32.h (revision 76559068c068bd27e82aff38fac3bfc865233bca)
1 //===-- primary32.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #ifndef SCUDO_PRIMARY32_H_
10 #define SCUDO_PRIMARY32_H_
11 
12 #include "allocator_common.h"
13 #include "bytemap.h"
14 #include "common.h"
15 #include "list.h"
16 #include "local_cache.h"
17 #include "options.h"
18 #include "release.h"
19 #include "report.h"
20 #include "stats.h"
21 #include "string_utils.h"
22 #include "thread_annotations.h"
23 
24 namespace scudo {
25 
26 // SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
27 //
28 // It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
29 // boundary, and keeps a bytemap of the mappable address space to track the size
30 // class they are associated with.
31 //
32 // Mapped regions are split into equally sized Blocks according to the size
33 // class they belong to, and the associated pointers are shuffled to prevent any
34 // predictable address pattern (the predictability increases with the block
35 // size).
36 //
37 // Regions for size class 0 are special and used to hold TransferBatches, which
38 // allow to transfer arrays of pointers from the global size class freelist to
39 // the thread specific freelist for said class, and back.
40 //
41 // Memory used by this allocator is never unmapped but can be partially
42 // reclaimed if the platform allows for it.
43 
44 template <typename Config> class SizeClassAllocator32 {
45 public:
46   typedef typename Config::CompactPtrT CompactPtrT;
47   typedef typename Config::SizeClassMap SizeClassMap;
48   static const uptr GroupSizeLog = Config::getGroupSizeLog();
49   // The bytemap can only track UINT8_MAX - 1 classes.
50   static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
51   // Regions should be large enough to hold the largest Block.
52   static_assert((1UL << Config::getRegionSizeLog()) >= SizeClassMap::MaxSize,
53                 "");
54   typedef SizeClassAllocator32<Config> ThisT;
55   typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
56   typedef TransferBatch<ThisT> TransferBatchT;
57   typedef BatchGroup<ThisT> BatchGroupT;
58 
getSizeByClassId(uptr ClassId)59   static uptr getSizeByClassId(uptr ClassId) {
60     return (ClassId == SizeClassMap::BatchClassId)
61                ? Max(sizeof(BatchGroupT), sizeof(TransferBatchT))
62                : SizeClassMap::getSizeByClassId(ClassId);
63   }
64 
canAllocate(uptr Size)65   static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
66 
init(s32 ReleaseToOsInterval)67   void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
68     if (SCUDO_FUCHSIA)
69       reportError("SizeClassAllocator32 is not supported on Fuchsia");
70 
71     if (SCUDO_TRUSTY)
72       reportError("SizeClassAllocator32 is not supported on Trusty");
73 
74     DCHECK(isAligned(reinterpret_cast<uptr>(this), alignof(ThisT)));
75     PossibleRegions.init();
76     u32 Seed;
77     const u64 Time = getMonotonicTimeFast();
78     if (!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed)))
79       Seed = static_cast<u32>(
80           Time ^ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
81     for (uptr I = 0; I < NumClasses; I++) {
82       SizeClassInfo *Sci = getSizeClassInfo(I);
83       Sci->RandState = getRandomU32(&Seed);
84       // Sci->MaxRegionIndex is already initialized to 0.
85       Sci->MinRegionIndex = NumRegions;
86       Sci->ReleaseInfo.LastReleaseAtNs = Time;
87     }
88 
89     // The default value in the primary config has the higher priority.
90     if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
91       ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
92     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
93   }
94 
unmapTestOnly()95   void unmapTestOnly() {
96     {
97       ScopedLock L(RegionsStashMutex);
98       while (NumberOfStashedRegions > 0) {
99         unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
100               RegionSize);
101       }
102     }
103 
104     uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
105     for (uptr I = 0; I < NumClasses; I++) {
106       SizeClassInfo *Sci = getSizeClassInfo(I);
107       ScopedLock L(Sci->Mutex);
108       if (Sci->MinRegionIndex < MinRegionIndex)
109         MinRegionIndex = Sci->MinRegionIndex;
110       if (Sci->MaxRegionIndex > MaxRegionIndex)
111         MaxRegionIndex = Sci->MaxRegionIndex;
112       *Sci = {};
113     }
114 
115     ScopedLock L(ByteMapMutex);
116     for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
117       if (PossibleRegions[I])
118         unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
119     PossibleRegions.unmapTestOnly();
120   }
121 
122   // When all blocks are freed, it has to be the same size as `AllocatedUser`.
verifyAllBlocksAreReleasedTestOnly()123   void verifyAllBlocksAreReleasedTestOnly() {
124     // `BatchGroup` and `TransferBatch` also use the blocks from BatchClass.
125     uptr BatchClassUsedInFreeLists = 0;
126     for (uptr I = 0; I < NumClasses; I++) {
127       // We have to count BatchClassUsedInFreeLists in other regions first.
128       if (I == SizeClassMap::BatchClassId)
129         continue;
130       SizeClassInfo *Sci = getSizeClassInfo(I);
131       ScopedLock L1(Sci->Mutex);
132       uptr TotalBlocks = 0;
133       for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
134         // `BG::Batches` are `TransferBatches`. +1 for `BatchGroup`.
135         BatchClassUsedInFreeLists += BG.Batches.size() + 1;
136         for (const auto &It : BG.Batches)
137           TotalBlocks += It.getCount();
138       }
139 
140       const uptr BlockSize = getSizeByClassId(I);
141       DCHECK_EQ(TotalBlocks, Sci->AllocatedUser / BlockSize);
142       DCHECK_EQ(Sci->FreeListInfo.PushedBlocks, Sci->FreeListInfo.PoppedBlocks);
143     }
144 
145     SizeClassInfo *Sci = getSizeClassInfo(SizeClassMap::BatchClassId);
146     ScopedLock L1(Sci->Mutex);
147     uptr TotalBlocks = 0;
148     for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
149       if (LIKELY(!BG.Batches.empty())) {
150         for (const auto &It : BG.Batches)
151           TotalBlocks += It.getCount();
152       } else {
153         // `BatchGroup` with empty freelist doesn't have `TransferBatch` record
154         // itself.
155         ++TotalBlocks;
156       }
157     }
158 
159     const uptr BlockSize = getSizeByClassId(SizeClassMap::BatchClassId);
160     DCHECK_EQ(TotalBlocks + BatchClassUsedInFreeLists,
161               Sci->AllocatedUser / BlockSize);
162     const uptr BlocksInUse =
163         Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
164     DCHECK_EQ(BlocksInUse, BatchClassUsedInFreeLists);
165   }
166 
compactPtr(UNUSED uptr ClassId,uptr Ptr)167   CompactPtrT compactPtr(UNUSED uptr ClassId, uptr Ptr) const {
168     return static_cast<CompactPtrT>(Ptr);
169   }
170 
decompactPtr(UNUSED uptr ClassId,CompactPtrT CompactPtr)171   void *decompactPtr(UNUSED uptr ClassId, CompactPtrT CompactPtr) const {
172     return reinterpret_cast<void *>(static_cast<uptr>(CompactPtr));
173   }
174 
compactPtrGroupBase(CompactPtrT CompactPtr)175   uptr compactPtrGroupBase(CompactPtrT CompactPtr) {
176     const uptr Mask = (static_cast<uptr>(1) << GroupSizeLog) - 1;
177     return CompactPtr & ~Mask;
178   }
179 
decompactGroupBase(uptr CompactPtrGroupBase)180   uptr decompactGroupBase(uptr CompactPtrGroupBase) {
181     return CompactPtrGroupBase;
182   }
183 
isSmallBlock(uptr BlockSize)184   ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) {
185     const uptr PageSize = getPageSizeCached();
186     return BlockSize < PageSize / 16U;
187   }
188 
isLargeBlock(uptr BlockSize)189   ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) {
190     const uptr PageSize = getPageSizeCached();
191     return BlockSize > PageSize;
192   }
193 
popBlocks(CacheT * C,uptr ClassId,CompactPtrT * ToArray,const u16 MaxBlockCount)194   u16 popBlocks(CacheT *C, uptr ClassId, CompactPtrT *ToArray,
195                 const u16 MaxBlockCount) {
196     DCHECK_LT(ClassId, NumClasses);
197     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
198     ScopedLock L(Sci->Mutex);
199 
200     u16 PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
201     if (UNLIKELY(PopCount == 0)) {
202       if (UNLIKELY(!populateFreeList(C, ClassId, Sci)))
203         return 0U;
204       PopCount = popBlocksImpl(C, ClassId, Sci, ToArray, MaxBlockCount);
205       DCHECK_NE(PopCount, 0U);
206     }
207 
208     return PopCount;
209   }
210 
211   // Push the array of free blocks to the designated batch group.
pushBlocks(CacheT * C,uptr ClassId,CompactPtrT * Array,u32 Size)212   void pushBlocks(CacheT *C, uptr ClassId, CompactPtrT *Array, u32 Size) {
213     DCHECK_LT(ClassId, NumClasses);
214     DCHECK_GT(Size, 0);
215 
216     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
217     if (ClassId == SizeClassMap::BatchClassId) {
218       ScopedLock L(Sci->Mutex);
219       pushBatchClassBlocks(Sci, Array, Size);
220       return;
221     }
222 
223     // TODO(chiahungduan): Consider not doing grouping if the group size is not
224     // greater than the block size with a certain scale.
225 
226     // Sort the blocks so that blocks belonging to the same group can be pushed
227     // together.
228     bool SameGroup = true;
229     for (u32 I = 1; I < Size; ++I) {
230       if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I]))
231         SameGroup = false;
232       CompactPtrT Cur = Array[I];
233       u32 J = I;
234       while (J > 0 &&
235              compactPtrGroupBase(Cur) < compactPtrGroupBase(Array[J - 1])) {
236         Array[J] = Array[J - 1];
237         --J;
238       }
239       Array[J] = Cur;
240     }
241 
242     ScopedLock L(Sci->Mutex);
243     pushBlocksImpl(C, ClassId, Sci, Array, Size, SameGroup);
244   }
245 
disable()246   void disable() NO_THREAD_SAFETY_ANALYSIS {
247     // The BatchClassId must be locked last since other classes can use it.
248     for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--) {
249       if (static_cast<uptr>(I) == SizeClassMap::BatchClassId)
250         continue;
251       getSizeClassInfo(static_cast<uptr>(I))->Mutex.lock();
252     }
253     getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.lock();
254     RegionsStashMutex.lock();
255     ByteMapMutex.lock();
256   }
257 
enable()258   void enable() NO_THREAD_SAFETY_ANALYSIS {
259     ByteMapMutex.unlock();
260     RegionsStashMutex.unlock();
261     getSizeClassInfo(SizeClassMap::BatchClassId)->Mutex.unlock();
262     for (uptr I = 0; I < NumClasses; I++) {
263       if (I == SizeClassMap::BatchClassId)
264         continue;
265       getSizeClassInfo(I)->Mutex.unlock();
266     }
267   }
268 
iterateOverBlocks(F Callback)269   template <typename F> void iterateOverBlocks(F Callback) {
270     uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
271     for (uptr I = 0; I < NumClasses; I++) {
272       SizeClassInfo *Sci = getSizeClassInfo(I);
273       // TODO: The call of `iterateOverBlocks` requires disabling
274       // SizeClassAllocator32. We may consider locking each region on demand
275       // only.
276       Sci->Mutex.assertHeld();
277       if (Sci->MinRegionIndex < MinRegionIndex)
278         MinRegionIndex = Sci->MinRegionIndex;
279       if (Sci->MaxRegionIndex > MaxRegionIndex)
280         MaxRegionIndex = Sci->MaxRegionIndex;
281     }
282 
283     // SizeClassAllocator32 is disabled, i.e., ByteMapMutex is held.
284     ByteMapMutex.assertHeld();
285 
286     for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
287       if (PossibleRegions[I] &&
288           (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
289         const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
290         const uptr From = I * RegionSize;
291         const uptr To = From + (RegionSize / BlockSize) * BlockSize;
292         for (uptr Block = From; Block < To; Block += BlockSize)
293           Callback(Block);
294       }
295     }
296   }
297 
getStats(ScopedString * Str)298   void getStats(ScopedString *Str) {
299     // TODO(kostyak): get the RSS per region.
300     uptr TotalMapped = 0;
301     uptr PoppedBlocks = 0;
302     uptr PushedBlocks = 0;
303     for (uptr I = 0; I < NumClasses; I++) {
304       SizeClassInfo *Sci = getSizeClassInfo(I);
305       ScopedLock L(Sci->Mutex);
306       TotalMapped += Sci->AllocatedUser;
307       PoppedBlocks += Sci->FreeListInfo.PoppedBlocks;
308       PushedBlocks += Sci->FreeListInfo.PushedBlocks;
309     }
310     Str->append("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
311                 "remains %zu\n",
312                 TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
313     for (uptr I = 0; I < NumClasses; I++) {
314       SizeClassInfo *Sci = getSizeClassInfo(I);
315       ScopedLock L(Sci->Mutex);
316       getStats(Str, I, Sci);
317     }
318   }
319 
getFragmentationInfo(ScopedString * Str)320   void getFragmentationInfo(ScopedString *Str) {
321     Str->append(
322         "Fragmentation Stats: SizeClassAllocator32: page size = %zu bytes\n",
323         getPageSizeCached());
324 
325     for (uptr I = 1; I < NumClasses; I++) {
326       SizeClassInfo *Sci = getSizeClassInfo(I);
327       ScopedLock L(Sci->Mutex);
328       getSizeClassFragmentationInfo(Sci, I, Str);
329     }
330   }
331 
getMemoryGroupFragmentationInfo(ScopedString * Str)332   void getMemoryGroupFragmentationInfo(ScopedString *Str) {
333     // Each region is also a memory group because region size is the same as
334     // group size.
335     getFragmentationInfo(Str);
336   }
337 
setOption(Option O,sptr Value)338   bool setOption(Option O, sptr Value) {
339     if (O == Option::ReleaseInterval) {
340       const s32 Interval = Max(
341           Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
342           Config::getMinReleaseToOsIntervalMs());
343       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
344       return true;
345     }
346     // Not supported by the Primary, but not an error either.
347     return true;
348   }
349 
tryReleaseToOS(uptr ClassId,ReleaseToOS ReleaseType)350   uptr tryReleaseToOS(uptr ClassId, ReleaseToOS ReleaseType) {
351     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
352     // TODO: Once we have separate locks like primary64, we may consider using
353     // tryLock() as well.
354     ScopedLock L(Sci->Mutex);
355     return releaseToOSMaybe(Sci, ClassId, ReleaseType);
356   }
357 
releaseToOS(ReleaseToOS ReleaseType)358   uptr releaseToOS(ReleaseToOS ReleaseType) {
359     uptr TotalReleasedBytes = 0;
360     for (uptr I = 0; I < NumClasses; I++) {
361       if (I == SizeClassMap::BatchClassId)
362         continue;
363       SizeClassInfo *Sci = getSizeClassInfo(I);
364       ScopedLock L(Sci->Mutex);
365       TotalReleasedBytes += releaseToOSMaybe(Sci, I, ReleaseType);
366     }
367     return TotalReleasedBytes;
368   }
369 
getRegionInfoArrayAddress()370   const char *getRegionInfoArrayAddress() const { return nullptr; }
getRegionInfoArraySize()371   static uptr getRegionInfoArraySize() { return 0; }
372 
findNearestBlock(UNUSED const char * RegionInfoData,UNUSED uptr Ptr)373   static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
374                                     UNUSED uptr Ptr) {
375     return {};
376   }
377 
378   AtomicOptions Options;
379 
380 private:
381   static const uptr NumClasses = SizeClassMap::NumClasses;
382   static const uptr RegionSize = 1UL << Config::getRegionSizeLog();
383   static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >>
384                                  Config::getRegionSizeLog();
385   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
386   typedef FlatByteMap<NumRegions> ByteMap;
387 
388   struct ReleaseToOsInfo {
389     uptr BytesInFreeListAtLastCheckpoint;
390     uptr RangesReleased;
391     uptr LastReleasedBytes;
392     u64 LastReleaseAtNs;
393   };
394 
395   struct BlocksInfo {
396     SinglyLinkedList<BatchGroupT> BlockList = {};
397     uptr PoppedBlocks = 0;
398     uptr PushedBlocks = 0;
399   };
400 
401   struct alignas(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
402     HybridMutex Mutex;
403     BlocksInfo FreeListInfo GUARDED_BY(Mutex);
404     uptr CurrentRegion GUARDED_BY(Mutex);
405     uptr CurrentRegionAllocated GUARDED_BY(Mutex);
406     u32 RandState;
407     uptr AllocatedUser GUARDED_BY(Mutex);
408     // Lowest & highest region index allocated for this size class, to avoid
409     // looping through the whole NumRegions.
410     uptr MinRegionIndex GUARDED_BY(Mutex);
411     uptr MaxRegionIndex GUARDED_BY(Mutex);
412     ReleaseToOsInfo ReleaseInfo GUARDED_BY(Mutex);
413   };
414   static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
415 
computeRegionId(uptr Mem)416   uptr computeRegionId(uptr Mem) {
417     const uptr Id = Mem >> Config::getRegionSizeLog();
418     CHECK_LT(Id, NumRegions);
419     return Id;
420   }
421 
allocateRegionSlow()422   uptr allocateRegionSlow() {
423     uptr MapSize = 2 * RegionSize;
424     const uptr MapBase = reinterpret_cast<uptr>(
425         map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
426     if (!MapBase)
427       return 0;
428     const uptr MapEnd = MapBase + MapSize;
429     uptr Region = MapBase;
430     if (isAligned(Region, RegionSize)) {
431       ScopedLock L(RegionsStashMutex);
432       if (NumberOfStashedRegions < MaxStashedRegions)
433         RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
434       else
435         MapSize = RegionSize;
436     } else {
437       Region = roundUp(MapBase, RegionSize);
438       unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
439       MapSize = RegionSize;
440     }
441     const uptr End = Region + MapSize;
442     if (End != MapEnd)
443       unmap(reinterpret_cast<void *>(End), MapEnd - End);
444 
445     DCHECK_EQ(Region % RegionSize, 0U);
446     static_assert(Config::getRegionSizeLog() == GroupSizeLog,
447                   "Memory group should be the same size as Region");
448 
449     return Region;
450   }
451 
allocateRegion(SizeClassInfo * Sci,uptr ClassId)452   uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) REQUIRES(Sci->Mutex) {
453     DCHECK_LT(ClassId, NumClasses);
454     uptr Region = 0;
455     {
456       ScopedLock L(RegionsStashMutex);
457       if (NumberOfStashedRegions > 0)
458         Region = RegionsStash[--NumberOfStashedRegions];
459     }
460     if (!Region)
461       Region = allocateRegionSlow();
462     if (LIKELY(Region)) {
463       // Sci->Mutex is held by the caller, updating the Min/Max is safe.
464       const uptr RegionIndex = computeRegionId(Region);
465       if (RegionIndex < Sci->MinRegionIndex)
466         Sci->MinRegionIndex = RegionIndex;
467       if (RegionIndex > Sci->MaxRegionIndex)
468         Sci->MaxRegionIndex = RegionIndex;
469       ScopedLock L(ByteMapMutex);
470       PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
471     }
472     return Region;
473   }
474 
getSizeClassInfo(uptr ClassId)475   SizeClassInfo *getSizeClassInfo(uptr ClassId) {
476     DCHECK_LT(ClassId, NumClasses);
477     return &SizeClassInfoArray[ClassId];
478   }
479 
pushBatchClassBlocks(SizeClassInfo * Sci,CompactPtrT * Array,u32 Size)480   void pushBatchClassBlocks(SizeClassInfo *Sci, CompactPtrT *Array, u32 Size)
481       REQUIRES(Sci->Mutex) {
482     DCHECK_EQ(Sci, getSizeClassInfo(SizeClassMap::BatchClassId));
483 
484     // Free blocks are recorded by TransferBatch in freelist for all
485     // size-classes. In addition, TransferBatch is allocated from BatchClassId.
486     // In order not to use additional block to record the free blocks in
487     // BatchClassId, they are self-contained. I.e., A TransferBatch records the
488     // block address of itself. See the figure below:
489     //
490     // TransferBatch at 0xABCD
491     // +----------------------------+
492     // | Free blocks' addr          |
493     // | +------+------+------+     |
494     // | |0xABCD|...   |...   |     |
495     // | +------+------+------+     |
496     // +----------------------------+
497     //
498     // When we allocate all the free blocks in the TransferBatch, the block used
499     // by TransferBatch is also free for use. We don't need to recycle the
500     // TransferBatch. Note that the correctness is maintained by the invariant,
501     //
502     //   Each popBlocks() request returns the entire TransferBatch. Returning
503     //   part of the blocks in a TransferBatch is invalid.
504     //
505     // This ensures that TransferBatch won't leak the address itself while it's
506     // still holding other valid data.
507     //
508     // Besides, BatchGroup is also allocated from BatchClassId and has its
509     // address recorded in the TransferBatch too. To maintain the correctness,
510     //
511     //   The address of BatchGroup is always recorded in the last TransferBatch
512     //   in the freelist (also imply that the freelist should only be
513     //   updated with push_front). Once the last TransferBatch is popped,
514     //   the block used by BatchGroup is also free for use.
515     //
516     // With this approach, the blocks used by BatchGroup and TransferBatch are
517     // reusable and don't need additional space for them.
518 
519     Sci->FreeListInfo.PushedBlocks += Size;
520     BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
521 
522     if (BG == nullptr) {
523       // Construct `BatchGroup` on the last element.
524       BG = reinterpret_cast<BatchGroupT *>(
525           decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
526       --Size;
527       BG->Batches.clear();
528       // BatchClass hasn't enabled memory group. Use `0` to indicate there's no
529       // memory group here.
530       BG->CompactPtrGroupBase = 0;
531       BG->BytesInBGAtLastCheckpoint = 0;
532       BG->MaxCachedPerBatch =
533           CacheT::getMaxCached(getSizeByClassId(SizeClassMap::BatchClassId));
534 
535       Sci->FreeListInfo.BlockList.push_front(BG);
536     }
537 
538     if (UNLIKELY(Size == 0))
539       return;
540 
541     // This happens under 2 cases.
542     //   1. just allocated a new `BatchGroup`.
543     //   2. Only 1 block is pushed when the freelist is empty.
544     if (BG->Batches.empty()) {
545       // Construct the `TransferBatch` on the last element.
546       TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(
547           decompactPtr(SizeClassMap::BatchClassId, Array[Size - 1]));
548       TB->clear();
549       // As mentioned above, addresses of `TransferBatch` and `BatchGroup` are
550       // recorded in the TransferBatch.
551       TB->add(Array[Size - 1]);
552       TB->add(
553           compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(BG)));
554       --Size;
555       BG->Batches.push_front(TB);
556     }
557 
558     TransferBatchT *CurBatch = BG->Batches.front();
559     DCHECK_NE(CurBatch, nullptr);
560 
561     for (u32 I = 0; I < Size;) {
562       u16 UnusedSlots =
563           static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
564       if (UnusedSlots == 0) {
565         CurBatch = reinterpret_cast<TransferBatchT *>(
566             decompactPtr(SizeClassMap::BatchClassId, Array[I]));
567         CurBatch->clear();
568         // Self-contained
569         CurBatch->add(Array[I]);
570         ++I;
571         // TODO(chiahungduan): Avoid the use of push_back() in `Batches` of
572         // BatchClassId.
573         BG->Batches.push_front(CurBatch);
574         UnusedSlots = static_cast<u16>(BG->MaxCachedPerBatch - 1);
575       }
576       // `UnusedSlots` is u16 so the result will be also fit in u16.
577       const u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
578       CurBatch->appendFromArray(&Array[I], AppendSize);
579       I += AppendSize;
580     }
581   }
582   // Push the blocks to their batch group. The layout will be like,
583   //
584   // FreeListInfo.BlockList - > BG -> BG -> BG
585   //                            |     |     |
586   //                            v     v     v
587   //                            TB    TB    TB
588   //                            |
589   //                            v
590   //                            TB
591   //
592   // Each BlockGroup(BG) will associate with unique group id and the free blocks
593   // are managed by a list of TransferBatch(TB). To reduce the time of inserting
594   // blocks, BGs are sorted and the input `Array` are supposed to be sorted so
595   // that we can get better performance of maintaining sorted property.
596   // Use `SameGroup=true` to indicate that all blocks in the array are from the
597   // same group then we will skip checking the group id of each block.
598   //
599   // The region mutex needs to be held while calling this method.
600   void pushBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
601                       CompactPtrT *Array, u32 Size, bool SameGroup = false)
602       REQUIRES(Sci->Mutex) {
603     DCHECK_NE(ClassId, SizeClassMap::BatchClassId);
604     DCHECK_GT(Size, 0U);
605 
606     auto CreateGroup = [&](uptr CompactPtrGroupBase) {
607       BatchGroupT *BG =
608           reinterpret_cast<BatchGroupT *>(C->getBatchClassBlock());
609       BG->Batches.clear();
610       TransferBatchT *TB =
611           reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
612       TB->clear();
613 
614       BG->CompactPtrGroupBase = CompactPtrGroupBase;
615       BG->Batches.push_front(TB);
616       BG->BytesInBGAtLastCheckpoint = 0;
617       BG->MaxCachedPerBatch = TransferBatchT::MaxNumCached;
618 
619       return BG;
620     };
621 
622     auto InsertBlocks = [&](BatchGroupT *BG, CompactPtrT *Array, u32 Size) {
623       SinglyLinkedList<TransferBatchT> &Batches = BG->Batches;
624       TransferBatchT *CurBatch = Batches.front();
625       DCHECK_NE(CurBatch, nullptr);
626 
627       for (u32 I = 0; I < Size;) {
628         DCHECK_GE(BG->MaxCachedPerBatch, CurBatch->getCount());
629         u16 UnusedSlots =
630             static_cast<u16>(BG->MaxCachedPerBatch - CurBatch->getCount());
631         if (UnusedSlots == 0) {
632           CurBatch =
633               reinterpret_cast<TransferBatchT *>(C->getBatchClassBlock());
634           CurBatch->clear();
635           Batches.push_front(CurBatch);
636           UnusedSlots = BG->MaxCachedPerBatch;
637         }
638         // `UnusedSlots` is u16 so the result will be also fit in u16.
639         u16 AppendSize = static_cast<u16>(Min<u32>(UnusedSlots, Size - I));
640         CurBatch->appendFromArray(&Array[I], AppendSize);
641         I += AppendSize;
642       }
643     };
644 
645     Sci->FreeListInfo.PushedBlocks += Size;
646     BatchGroupT *Cur = Sci->FreeListInfo.BlockList.front();
647 
648     // In the following, `Cur` always points to the BatchGroup for blocks that
649     // will be pushed next. `Prev` is the element right before `Cur`.
650     BatchGroupT *Prev = nullptr;
651 
652     while (Cur != nullptr &&
653            compactPtrGroupBase(Array[0]) > Cur->CompactPtrGroupBase) {
654       Prev = Cur;
655       Cur = Cur->Next;
656     }
657 
658     if (Cur == nullptr ||
659         compactPtrGroupBase(Array[0]) != Cur->CompactPtrGroupBase) {
660       Cur = CreateGroup(compactPtrGroupBase(Array[0]));
661       if (Prev == nullptr)
662         Sci->FreeListInfo.BlockList.push_front(Cur);
663       else
664         Sci->FreeListInfo.BlockList.insert(Prev, Cur);
665     }
666 
667     // All the blocks are from the same group, just push without checking group
668     // id.
669     if (SameGroup) {
670       for (u32 I = 0; I < Size; ++I)
671         DCHECK_EQ(compactPtrGroupBase(Array[I]), Cur->CompactPtrGroupBase);
672 
673       InsertBlocks(Cur, Array, Size);
674       return;
675     }
676 
677     // The blocks are sorted by group id. Determine the segment of group and
678     // push them to their group together.
679     u32 Count = 1;
680     for (u32 I = 1; I < Size; ++I) {
681       if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I])) {
682         DCHECK_EQ(compactPtrGroupBase(Array[I - 1]), Cur->CompactPtrGroupBase);
683         InsertBlocks(Cur, Array + I - Count, Count);
684 
685         while (Cur != nullptr &&
686                compactPtrGroupBase(Array[I]) > Cur->CompactPtrGroupBase) {
687           Prev = Cur;
688           Cur = Cur->Next;
689         }
690 
691         if (Cur == nullptr ||
692             compactPtrGroupBase(Array[I]) != Cur->CompactPtrGroupBase) {
693           Cur = CreateGroup(compactPtrGroupBase(Array[I]));
694           DCHECK_NE(Prev, nullptr);
695           Sci->FreeListInfo.BlockList.insert(Prev, Cur);
696         }
697 
698         Count = 1;
699       } else {
700         ++Count;
701       }
702     }
703 
704     InsertBlocks(Cur, Array + Size - Count, Count);
705   }
706 
popBlocksImpl(CacheT * C,uptr ClassId,SizeClassInfo * Sci,CompactPtrT * ToArray,const u16 MaxBlockCount)707   u16 popBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci,
708                     CompactPtrT *ToArray, const u16 MaxBlockCount)
709       REQUIRES(Sci->Mutex) {
710     if (Sci->FreeListInfo.BlockList.empty())
711       return 0U;
712 
713     SinglyLinkedList<TransferBatchT> &Batches =
714         Sci->FreeListInfo.BlockList.front()->Batches;
715 
716     if (Batches.empty()) {
717       DCHECK_EQ(ClassId, SizeClassMap::BatchClassId);
718       BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
719       Sci->FreeListInfo.BlockList.pop_front();
720 
721       // Block used by `BatchGroup` is from BatchClassId. Turn the block into
722       // `TransferBatch` with single block.
723       TransferBatchT *TB = reinterpret_cast<TransferBatchT *>(BG);
724       ToArray[0] =
725           compactPtr(SizeClassMap::BatchClassId, reinterpret_cast<uptr>(TB));
726       Sci->FreeListInfo.PoppedBlocks += 1;
727       return 1U;
728     }
729 
730     // So far, instead of always filling the blocks to `MaxBlockCount`, we only
731     // examine single `TransferBatch` to minimize the time spent on the primary
732     // allocator. Besides, the sizes of `TransferBatch` and
733     // `CacheT::getMaxCached()` may also impact the time spent on accessing the
734     // primary allocator.
735     // TODO(chiahungduan): Evaluate if we want to always prepare `MaxBlockCount`
736     // blocks and/or adjust the size of `TransferBatch` according to
737     // `CacheT::getMaxCached()`.
738     TransferBatchT *B = Batches.front();
739     DCHECK_NE(B, nullptr);
740     DCHECK_GT(B->getCount(), 0U);
741 
742     // BachClassId should always take all blocks in the TransferBatch. Read the
743     // comment in `pushBatchClassBlocks()` for more details.
744     const u16 PopCount = ClassId == SizeClassMap::BatchClassId
745                              ? B->getCount()
746                              : Min(MaxBlockCount, B->getCount());
747     B->moveNToArray(ToArray, PopCount);
748 
749     // TODO(chiahungduan): The deallocation of unused BatchClassId blocks can be
750     // done without holding `Mutex`.
751     if (B->empty()) {
752       Batches.pop_front();
753       // `TransferBatch` of BatchClassId is self-contained, no need to
754       // deallocate. Read the comment in `pushBatchClassBlocks()` for more
755       // details.
756       if (ClassId != SizeClassMap::BatchClassId)
757         C->deallocate(SizeClassMap::BatchClassId, B);
758 
759       if (Batches.empty()) {
760         BatchGroupT *BG = Sci->FreeListInfo.BlockList.front();
761         Sci->FreeListInfo.BlockList.pop_front();
762 
763         // We don't keep BatchGroup with zero blocks to avoid empty-checking
764         // while allocating. Note that block used for constructing BatchGroup is
765         // recorded as free blocks in the last element of BatchGroup::Batches.
766         // Which means, once we pop the last TransferBatch, the block is
767         // implicitly deallocated.
768         if (ClassId != SizeClassMap::BatchClassId)
769           C->deallocate(SizeClassMap::BatchClassId, BG);
770       }
771     }
772 
773     Sci->FreeListInfo.PoppedBlocks += PopCount;
774     return PopCount;
775   }
776 
populateFreeList(CacheT * C,uptr ClassId,SizeClassInfo * Sci)777   NOINLINE bool populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci)
778       REQUIRES(Sci->Mutex) {
779     uptr Region;
780     uptr Offset;
781     // If the size-class currently has a region associated to it, use it. The
782     // newly created blocks will be located after the currently allocated memory
783     // for that region (up to RegionSize). Otherwise, create a new region, where
784     // the new blocks will be carved from the beginning.
785     if (Sci->CurrentRegion) {
786       Region = Sci->CurrentRegion;
787       DCHECK_GT(Sci->CurrentRegionAllocated, 0U);
788       Offset = Sci->CurrentRegionAllocated;
789     } else {
790       DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
791       Region = allocateRegion(Sci, ClassId);
792       if (UNLIKELY(!Region))
793         return false;
794       C->getStats().add(StatMapped, RegionSize);
795       Sci->CurrentRegion = Region;
796       Offset = 0;
797     }
798 
799     const uptr Size = getSizeByClassId(ClassId);
800     const u16 MaxCount = CacheT::getMaxCached(Size);
801     DCHECK_GT(MaxCount, 0U);
802     // The maximum number of blocks we should carve in the region is dictated
803     // by the maximum number of batches we want to fill, and the amount of
804     // memory left in the current region (we use the lowest of the two). This
805     // will not be 0 as we ensure that a region can at least hold one block (via
806     // static_assert and at the end of this function).
807     const u32 NumberOfBlocks =
808         Min(MaxNumBatches * MaxCount,
809             static_cast<u32>((RegionSize - Offset) / Size));
810     DCHECK_GT(NumberOfBlocks, 0U);
811 
812     constexpr u32 ShuffleArraySize =
813         MaxNumBatches * TransferBatchT::MaxNumCached;
814     // Fill the transfer batches and put them in the size-class freelist. We
815     // need to randomize the blocks for security purposes, so we first fill a
816     // local array that we then shuffle before populating the batches.
817     CompactPtrT ShuffleArray[ShuffleArraySize];
818     DCHECK_LE(NumberOfBlocks, ShuffleArraySize);
819 
820     uptr P = Region + Offset;
821     for (u32 I = 0; I < NumberOfBlocks; I++, P += Size)
822       ShuffleArray[I] = reinterpret_cast<CompactPtrT>(P);
823 
824     if (ClassId != SizeClassMap::BatchClassId) {
825       u32 N = 1;
826       uptr CurGroup = compactPtrGroupBase(ShuffleArray[0]);
827       for (u32 I = 1; I < NumberOfBlocks; I++) {
828         if (UNLIKELY(compactPtrGroupBase(ShuffleArray[I]) != CurGroup)) {
829           shuffle(ShuffleArray + I - N, N, &Sci->RandState);
830           pushBlocksImpl(C, ClassId, Sci, ShuffleArray + I - N, N,
831                          /*SameGroup=*/true);
832           N = 1;
833           CurGroup = compactPtrGroupBase(ShuffleArray[I]);
834         } else {
835           ++N;
836         }
837       }
838 
839       shuffle(ShuffleArray + NumberOfBlocks - N, N, &Sci->RandState);
840       pushBlocksImpl(C, ClassId, Sci, &ShuffleArray[NumberOfBlocks - N], N,
841                      /*SameGroup=*/true);
842     } else {
843       pushBatchClassBlocks(Sci, ShuffleArray, NumberOfBlocks);
844     }
845 
846     // Note that `PushedBlocks` and `PoppedBlocks` are supposed to only record
847     // the requests from `PushBlocks` and `PopBatch` which are external
848     // interfaces. `populateFreeList` is the internal interface so we should set
849     // the values back to avoid incorrectly setting the stats.
850     Sci->FreeListInfo.PushedBlocks -= NumberOfBlocks;
851 
852     const uptr AllocatedUser = Size * NumberOfBlocks;
853     C->getStats().add(StatFree, AllocatedUser);
854     DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize);
855     // If there is not enough room in the region currently associated to fit
856     // more blocks, we deassociate the region by resetting CurrentRegion and
857     // CurrentRegionAllocated. Otherwise, update the allocated amount.
858     if (RegionSize - (Sci->CurrentRegionAllocated + AllocatedUser) < Size) {
859       Sci->CurrentRegion = 0;
860       Sci->CurrentRegionAllocated = 0;
861     } else {
862       Sci->CurrentRegionAllocated += AllocatedUser;
863     }
864     Sci->AllocatedUser += AllocatedUser;
865 
866     return true;
867   }
868 
getStats(ScopedString * Str,uptr ClassId,SizeClassInfo * Sci)869   void getStats(ScopedString *Str, uptr ClassId, SizeClassInfo *Sci)
870       REQUIRES(Sci->Mutex) {
871     if (Sci->AllocatedUser == 0)
872       return;
873     const uptr BlockSize = getSizeByClassId(ClassId);
874     const uptr InUse =
875         Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
876     const uptr BytesInFreeList = Sci->AllocatedUser - InUse * BlockSize;
877     uptr PushedBytesDelta = 0;
878     if (BytesInFreeList >= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint) {
879       PushedBytesDelta =
880           BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
881     }
882     const uptr AvailableChunks = Sci->AllocatedUser / BlockSize;
883     Str->append("  %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
884                 "inuse: %6zu avail: %6zu releases: %6zu last released: %6zuK "
885                 "latest pushed bytes: %6zuK\n",
886                 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
887                 Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks,
888                 InUse, AvailableChunks, Sci->ReleaseInfo.RangesReleased,
889                 Sci->ReleaseInfo.LastReleasedBytes >> 10,
890                 PushedBytesDelta >> 10);
891   }
892 
getSizeClassFragmentationInfo(SizeClassInfo * Sci,uptr ClassId,ScopedString * Str)893   void getSizeClassFragmentationInfo(SizeClassInfo *Sci, uptr ClassId,
894                                      ScopedString *Str) REQUIRES(Sci->Mutex) {
895     const uptr BlockSize = getSizeByClassId(ClassId);
896     const uptr First = Sci->MinRegionIndex;
897     const uptr Last = Sci->MaxRegionIndex;
898     const uptr Base = First * RegionSize;
899     const uptr NumberOfRegions = Last - First + 1U;
900     auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
901       ScopedLock L(ByteMapMutex);
902       return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
903     };
904 
905     FragmentationRecorder Recorder;
906     if (!Sci->FreeListInfo.BlockList.empty()) {
907       PageReleaseContext Context =
908           markFreeBlocks(Sci, ClassId, BlockSize, Base, NumberOfRegions,
909                          ReleaseToOS::ForceAll);
910       releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
911     }
912 
913     const uptr PageSize = getPageSizeCached();
914     const uptr TotalBlocks = Sci->AllocatedUser / BlockSize;
915     const uptr InUseBlocks =
916         Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks;
917     uptr AllocatedPagesCount = 0;
918     if (TotalBlocks != 0U) {
919       for (uptr I = 0; I < NumberOfRegions; ++I) {
920         if (SkipRegion(I))
921           continue;
922         AllocatedPagesCount += RegionSize / PageSize;
923       }
924 
925       DCHECK_NE(AllocatedPagesCount, 0U);
926     }
927 
928     DCHECK_GE(AllocatedPagesCount, Recorder.getReleasedPagesCount());
929     const uptr InUsePages =
930         AllocatedPagesCount - Recorder.getReleasedPagesCount();
931     const uptr InUseBytes = InUsePages * PageSize;
932 
933     uptr Integral;
934     uptr Fractional;
935     computePercentage(BlockSize * InUseBlocks, InUseBytes, &Integral,
936                       &Fractional);
937     Str->append("  %02zu (%6zu): inuse/total blocks: %6zu/%6zu inuse/total "
938                 "pages: %6zu/%6zu inuse bytes: %6zuK util: %3zu.%02zu%%\n",
939                 ClassId, BlockSize, InUseBlocks, TotalBlocks, InUsePages,
940                 AllocatedPagesCount, InUseBytes >> 10, Integral, Fractional);
941   }
942 
943   NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
944                                  ReleaseToOS ReleaseType = ReleaseToOS::Normal)
945       REQUIRES(Sci->Mutex) {
946     const uptr BlockSize = getSizeByClassId(ClassId);
947 
948     DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
949     const uptr BytesInFreeList =
950         Sci->AllocatedUser -
951         (Sci->FreeListInfo.PoppedBlocks - Sci->FreeListInfo.PushedBlocks) *
952             BlockSize;
953 
954     if (UNLIKELY(BytesInFreeList == 0))
955       return 0;
956 
957     // ====================================================================== //
958     // 1. Check if we have enough free blocks and if it's worth doing a page
959     // release.
960     // ====================================================================== //
961     if (ReleaseType != ReleaseToOS::ForceAll &&
962         !hasChanceToReleasePages(Sci, BlockSize, BytesInFreeList,
963                                  ReleaseType)) {
964       return 0;
965     }
966 
967     const uptr First = Sci->MinRegionIndex;
968     const uptr Last = Sci->MaxRegionIndex;
969     DCHECK_NE(Last, 0U);
970     DCHECK_LE(First, Last);
971     uptr TotalReleasedBytes = 0;
972     const uptr Base = First * RegionSize;
973     const uptr NumberOfRegions = Last - First + 1U;
974 
975     // ==================================================================== //
976     // 2. Mark the free blocks and we can tell which pages are in-use by
977     //    querying `PageReleaseContext`.
978     // ==================================================================== //
979     PageReleaseContext Context = markFreeBlocks(Sci, ClassId, BlockSize, Base,
980                                                 NumberOfRegions, ReleaseType);
981     if (!Context.hasBlockMarked())
982       return 0;
983 
984     // ==================================================================== //
985     // 3. Release the unused physical pages back to the OS.
986     // ==================================================================== //
987     ReleaseRecorder Recorder(Base);
988     auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
989       ScopedLock L(ByteMapMutex);
990       return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
991     };
992     releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
993 
994     if (Recorder.getReleasedRangesCount() > 0) {
995       Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
996       Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
997       Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
998       TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
999     }
1000     Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTimeFast();
1001 
1002     return TotalReleasedBytes;
1003   }
1004 
hasChanceToReleasePages(SizeClassInfo * Sci,uptr BlockSize,uptr BytesInFreeList,ReleaseToOS ReleaseType)1005   bool hasChanceToReleasePages(SizeClassInfo *Sci, uptr BlockSize,
1006                                uptr BytesInFreeList, ReleaseToOS ReleaseType)
1007       REQUIRES(Sci->Mutex) {
1008     DCHECK_GE(Sci->FreeListInfo.PoppedBlocks, Sci->FreeListInfo.PushedBlocks);
1009     const uptr PageSize = getPageSizeCached();
1010 
1011     if (BytesInFreeList <= Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint)
1012       Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint = BytesInFreeList;
1013 
1014     // Always update `BytesInFreeListAtLastCheckpoint` with the smallest value
1015     // so that we won't underestimate the releasable pages. For example, the
1016     // following is the region usage,
1017     //
1018     //  BytesInFreeListAtLastCheckpoint   AllocatedUser
1019     //                v                         v
1020     //  |--------------------------------------->
1021     //         ^                   ^
1022     //  BytesInFreeList     ReleaseThreshold
1023     //
1024     // In general, if we have collected enough bytes and the amount of free
1025     // bytes meets the ReleaseThreshold, we will try to do page release. If we
1026     // don't update `BytesInFreeListAtLastCheckpoint` when the current
1027     // `BytesInFreeList` is smaller, we may take longer time to wait for enough
1028     // freed blocks because we miss the bytes between
1029     // (BytesInFreeListAtLastCheckpoint - BytesInFreeList).
1030     const uptr PushedBytesDelta =
1031         BytesInFreeList - Sci->ReleaseInfo.BytesInFreeListAtLastCheckpoint;
1032     if (PushedBytesDelta < PageSize)
1033       return false;
1034 
1035     // Releasing smaller blocks is expensive, so we want to make sure that a
1036     // significant amount of bytes are free, and that there has been a good
1037     // amount of batches pushed to the freelist before attempting to release.
1038     if (isSmallBlock(BlockSize) && ReleaseType == ReleaseToOS::Normal)
1039       if (PushedBytesDelta < Sci->AllocatedUser / 16U)
1040         return false;
1041 
1042     if (ReleaseType == ReleaseToOS::Normal) {
1043       const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs);
1044       if (IntervalMs < 0)
1045         return false;
1046 
1047       // The constant 8 here is selected from profiling some apps and the number
1048       // of unreleased pages in the large size classes is around 16 pages or
1049       // more. Choose half of it as a heuristic and which also avoids page
1050       // release every time for every pushBlocks() attempt by large blocks.
1051       const bool ByPassReleaseInterval =
1052           isLargeBlock(BlockSize) && PushedBytesDelta > 8 * PageSize;
1053       if (!ByPassReleaseInterval) {
1054         if (Sci->ReleaseInfo.LastReleaseAtNs +
1055                 static_cast<u64>(IntervalMs) * 1000000 >
1056             getMonotonicTimeFast()) {
1057           // Memory was returned recently.
1058           return false;
1059         }
1060       }
1061     } // if (ReleaseType == ReleaseToOS::Normal)
1062 
1063     return true;
1064   }
1065 
markFreeBlocks(SizeClassInfo * Sci,const uptr ClassId,const uptr BlockSize,const uptr Base,const uptr NumberOfRegions,ReleaseToOS ReleaseType)1066   PageReleaseContext markFreeBlocks(SizeClassInfo *Sci, const uptr ClassId,
1067                                     const uptr BlockSize, const uptr Base,
1068                                     const uptr NumberOfRegions,
1069                                     ReleaseToOS ReleaseType)
1070       REQUIRES(Sci->Mutex) {
1071     const uptr PageSize = getPageSizeCached();
1072     const uptr GroupSize = (1UL << GroupSizeLog);
1073     const uptr CurGroupBase =
1074         compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion));
1075 
1076     PageReleaseContext Context(BlockSize, NumberOfRegions,
1077                                /*ReleaseSize=*/RegionSize);
1078 
1079     auto DecompactPtr = [](CompactPtrT CompactPtr) {
1080       return reinterpret_cast<uptr>(CompactPtr);
1081     };
1082     for (BatchGroupT &BG : Sci->FreeListInfo.BlockList) {
1083       const uptr GroupBase = decompactGroupBase(BG.CompactPtrGroupBase);
1084       // The `GroupSize` may not be divided by `BlockSize`, which means there is
1085       // an unused space at the end of Region. Exclude that space to avoid
1086       // unused page map entry.
1087       uptr AllocatedGroupSize = GroupBase == CurGroupBase
1088                                     ? Sci->CurrentRegionAllocated
1089                                     : roundDownSlow(GroupSize, BlockSize);
1090       if (AllocatedGroupSize == 0)
1091         continue;
1092 
1093       // TransferBatches are pushed in front of BG.Batches. The first one may
1094       // not have all caches used.
1095       const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
1096                              BG.Batches.front()->getCount();
1097       const uptr BytesInBG = NumBlocks * BlockSize;
1098 
1099       if (ReleaseType != ReleaseToOS::ForceAll) {
1100         if (BytesInBG <= BG.BytesInBGAtLastCheckpoint) {
1101           BG.BytesInBGAtLastCheckpoint = BytesInBG;
1102           continue;
1103         }
1104 
1105         const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint;
1106         if (PushedBytesDelta < PageSize)
1107           continue;
1108 
1109         // Given the randomness property, we try to release the pages only if
1110         // the bytes used by free blocks exceed certain proportion of allocated
1111         // spaces.
1112         if (isSmallBlock(BlockSize) && (BytesInBG * 100U) / AllocatedGroupSize <
1113                                            (100U - 1U - BlockSize / 16U)) {
1114           continue;
1115         }
1116       }
1117 
1118       // TODO: Consider updating this after page release if `ReleaseRecorder`
1119       // can tell the released bytes in each group.
1120       BG.BytesInBGAtLastCheckpoint = BytesInBG;
1121 
1122       const uptr MaxContainedBlocks = AllocatedGroupSize / BlockSize;
1123       const uptr RegionIndex = (GroupBase - Base) / RegionSize;
1124 
1125       if (NumBlocks == MaxContainedBlocks) {
1126         for (const auto &It : BG.Batches)
1127           for (u16 I = 0; I < It.getCount(); ++I)
1128             DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase);
1129 
1130         const uptr To = GroupBase + AllocatedGroupSize;
1131         Context.markRangeAsAllCounted(GroupBase, To, GroupBase, RegionIndex,
1132                                       AllocatedGroupSize);
1133       } else {
1134         DCHECK_LT(NumBlocks, MaxContainedBlocks);
1135 
1136         // Note that we don't always visit blocks in each BatchGroup so that we
1137         // may miss the chance of releasing certain pages that cross
1138         // BatchGroups.
1139         Context.markFreeBlocksInRegion(BG.Batches, DecompactPtr, GroupBase,
1140                                        RegionIndex, AllocatedGroupSize,
1141                                        /*MayContainLastBlockInRegion=*/true);
1142       }
1143 
1144       // We may not be able to do the page release In a rare case that we may
1145       // fail on PageMap allocation.
1146       if (UNLIKELY(!Context.hasBlockMarked()))
1147         break;
1148     }
1149 
1150     return Context;
1151   }
1152 
1153   SizeClassInfo SizeClassInfoArray[NumClasses] = {};
1154 
1155   HybridMutex ByteMapMutex;
1156   // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
1157   ByteMap PossibleRegions GUARDED_BY(ByteMapMutex) = {};
1158   atomic_s32 ReleaseToOsIntervalMs = {};
1159   // Unless several threads request regions simultaneously from different size
1160   // classes, the stash rarely contains more than 1 entry.
1161   static constexpr uptr MaxStashedRegions = 4;
1162   HybridMutex RegionsStashMutex;
1163   uptr NumberOfStashedRegions GUARDED_BY(RegionsStashMutex) = 0;
1164   uptr RegionsStash[MaxStashedRegions] GUARDED_BY(RegionsStashMutex) = {};
1165 };
1166 
1167 } // namespace scudo
1168 
1169 #endif // SCUDO_PRIMARY32_H_
1170