1 //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef SCUDO_SECONDARY_H_
10 #define SCUDO_SECONDARY_H_
11
12 #include "chunk.h"
13 #include "common.h"
14 #include "list.h"
15 #include "mem_map.h"
16 #include "memtag.h"
17 #include "mutex.h"
18 #include "options.h"
19 #include "stats.h"
20 #include "string_utils.h"
21 #include "thread_annotations.h"
22 #include "vector.h"
23
24 namespace scudo {
25
26 // This allocator wraps the platform allocation primitives, and as such is on
27 // the slower side and should preferably be used for larger sized allocations.
28 // Blocks allocated will be preceded and followed by a guard page, and hold
29 // their own header that is not checksummed: the guard pages and the Combined
30 // header should be enough for our purpose.
31
32 namespace LargeBlock {
33
34 struct alignas(Max<uptr>(archSupportsMemoryTagging()
35 ? archMemoryTagGranuleSize()
36 : 1,
37 1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
38 LargeBlock::Header *Prev;
39 LargeBlock::Header *Next;
40 uptr CommitBase;
41 uptr CommitSize;
42 MemMapT MemMap;
43 };
44
45 static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
46 static_assert(!archSupportsMemoryTagging() ||
47 sizeof(Header) % archMemoryTagGranuleSize() == 0,
48 "");
49
getHeaderSize()50 constexpr uptr getHeaderSize() { return sizeof(Header); }
51
addHeaderTag(uptr Ptr)52 template <typename Config> static uptr addHeaderTag(uptr Ptr) {
53 if (allocatorSupportsMemoryTagging<Config>())
54 return addFixedTag(Ptr, 1);
55 return Ptr;
56 }
57
getHeader(uptr Ptr)58 template <typename Config> static Header *getHeader(uptr Ptr) {
59 return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
60 }
61
getHeader(const void * Ptr)62 template <typename Config> static Header *getHeader(const void *Ptr) {
63 return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
64 }
65
66 } // namespace LargeBlock
67
unmap(MemMapT & MemMap)68 static inline void unmap(MemMapT &MemMap) { MemMap.unmap(); }
69
70 namespace {
71
72 struct CachedBlock {
73 static constexpr u16 CacheIndexMax = UINT16_MAX;
74 static constexpr u16 EndOfListVal = CacheIndexMax;
75
76 // We allow a certain amount of fragmentation and part of the fragmented bytes
77 // will be released by `releaseAndZeroPagesToOS()`. This increases the chance
78 // of cache hit rate and reduces the overhead to the RSS at the same time. See
79 // more details in the `MapAllocatorCache::retrieve()` section.
80 //
81 // We arrived at this default value after noticing that mapping in larger
82 // memory regions performs better than releasing memory and forcing a cache
83 // hit. According to the data, it suggests that beyond 4 pages, the release
84 // execution time is longer than the map execution time. In this way,
85 // the default is dependent on the platform.
86 static constexpr uptr MaxReleasedCachePages = 4U;
87
88 uptr CommitBase = 0;
89 uptr CommitSize = 0;
90 uptr BlockBegin = 0;
91 MemMapT MemMap = {};
92 u64 Time = 0;
93 u16 Next = 0;
94 u16 Prev = 0;
95
isValidCachedBlock96 bool isValid() { return CommitBase != 0; }
97
invalidateCachedBlock98 void invalidate() { CommitBase = 0; }
99 };
100 } // namespace
101
102 template <typename Config> class MapAllocatorNoCache {
103 public:
init(UNUSED s32 ReleaseToOsInterval)104 void init(UNUSED s32 ReleaseToOsInterval) {}
retrieve(UNUSED uptr MaxAllowedFragmentedBytes,UNUSED uptr Size,UNUSED uptr Alignment,UNUSED uptr HeadersSize,UNUSED uptr & EntryHeaderPos)105 CachedBlock retrieve(UNUSED uptr MaxAllowedFragmentedBytes, UNUSED uptr Size,
106 UNUSED uptr Alignment, UNUSED uptr HeadersSize,
107 UNUSED uptr &EntryHeaderPos) {
108 return {};
109 }
store(UNUSED Options Options,UNUSED uptr CommitBase,UNUSED uptr CommitSize,UNUSED uptr BlockBegin,UNUSED MemMapT MemMap)110 void store(UNUSED Options Options, UNUSED uptr CommitBase,
111 UNUSED uptr CommitSize, UNUSED uptr BlockBegin,
112 UNUSED MemMapT MemMap) {
113 // This should never be called since canCache always returns false.
114 UNREACHABLE(
115 "It is not valid to call store on MapAllocatorNoCache objects.");
116 }
117
canCache(UNUSED uptr Size)118 bool canCache(UNUSED uptr Size) { return false; }
disable()119 void disable() {}
enable()120 void enable() {}
releaseToOS()121 void releaseToOS() {}
disableMemoryTagging()122 void disableMemoryTagging() {}
unmapTestOnly()123 void unmapTestOnly() {}
setOption(Option O,UNUSED sptr Value)124 bool setOption(Option O, UNUSED sptr Value) {
125 if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
126 O == Option::MaxCacheEntrySize)
127 return false;
128 // Not supported by the Secondary Cache, but not an error either.
129 return true;
130 }
131
getStats(UNUSED ScopedString * Str)132 void getStats(UNUSED ScopedString *Str) {
133 Str->append("Secondary Cache Disabled\n");
134 }
135 };
136
137 static const uptr MaxUnreleasedCachePages = 4U;
138
139 template <typename Config>
mapSecondary(const Options & Options,uptr CommitBase,uptr CommitSize,uptr AllocPos,uptr Flags,MemMapT & MemMap)140 bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
141 uptr AllocPos, uptr Flags, MemMapT &MemMap) {
142 Flags |= MAP_RESIZABLE;
143 Flags |= MAP_ALLOWNOMEM;
144
145 const uptr PageSize = getPageSizeCached();
146 if (SCUDO_TRUSTY) {
147 /*
148 * On Trusty we need AllocPos to be usable for shared memory, which cannot
149 * cross multiple mappings. This means we need to split around AllocPos
150 * and not over it. We can only do this if the address is page-aligned.
151 */
152 const uptr TaggedSize = AllocPos - CommitBase;
153 if (useMemoryTagging<Config>(Options) && isAligned(TaggedSize, PageSize)) {
154 DCHECK_GT(TaggedSize, 0);
155 return MemMap.remap(CommitBase, TaggedSize, "scudo:secondary",
156 MAP_MEMTAG | Flags) &&
157 MemMap.remap(AllocPos, CommitSize - TaggedSize, "scudo:secondary",
158 Flags);
159 } else {
160 const uptr RemapFlags =
161 (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
162 return MemMap.remap(CommitBase, CommitSize, "scudo:secondary",
163 RemapFlags);
164 }
165 }
166
167 const uptr MaxUnreleasedCacheBytes = MaxUnreleasedCachePages * PageSize;
168 if (useMemoryTagging<Config>(Options) &&
169 CommitSize > MaxUnreleasedCacheBytes) {
170 const uptr UntaggedPos =
171 Max(AllocPos, CommitBase + MaxUnreleasedCacheBytes);
172 return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
173 MAP_MEMTAG | Flags) &&
174 MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
175 "scudo:secondary", Flags);
176 } else {
177 const uptr RemapFlags =
178 (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
179 return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
180 }
181 }
182
183 // Template specialization to avoid producing zero-length array
184 template <typename T, size_t Size> class NonZeroLengthArray {
185 public:
186 T &operator[](uptr Idx) { return values[Idx]; }
187
188 private:
189 T values[Size];
190 };
191 template <typename T> class NonZeroLengthArray<T, 0> {
192 public:
193 T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
194 };
195
196 // The default unmap callback is simply scudo::unmap.
197 // In testing, a different unmap callback is used to
198 // record information about unmaps in the cache
199 template <typename Config, void (*unmapCallBack)(MemMapT &) = unmap>
200 class MapAllocatorCache {
201 public:
getStats(ScopedString * Str)202 void getStats(ScopedString *Str) {
203 ScopedLock L(Mutex);
204 uptr Integral;
205 uptr Fractional;
206 computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
207 &Fractional);
208 const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
209 Str->append(
210 "Stats: MapAllocatorCache: EntriesCount: %zu, "
211 "MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n",
212 LRUEntries.size(), atomic_load_relaxed(&MaxEntriesCount),
213 atomic_load_relaxed(&MaxEntrySize), Interval >= 0 ? Interval : -1);
214 Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
215 "(%zu.%02zu%%)\n",
216 SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
217 Str->append("Cache Entry Info (Most Recent -> Least Recent):\n");
218
219 for (CachedBlock &Entry : LRUEntries) {
220 Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
221 "BlockSize: %zu %s\n",
222 Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
223 Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
224 }
225 }
226
227 // Ensure the default maximum specified fits the array.
228 static_assert(Config::getDefaultMaxEntriesCount() <=
229 Config::getEntriesArraySize(),
230 "");
231 // Ensure the cache entry array size fits in the LRU list Next and Prev
232 // index fields
233 static_assert(Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax,
234 "Cache entry array is too large to be indexed.");
235
init(s32 ReleaseToOsInterval)236 void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
237 DCHECK_EQ(LRUEntries.size(), 0U);
238 setOption(Option::MaxCacheEntriesCount,
239 static_cast<sptr>(Config::getDefaultMaxEntriesCount()));
240 setOption(Option::MaxCacheEntrySize,
241 static_cast<sptr>(Config::getDefaultMaxEntrySize()));
242 // The default value in the cache config has the higher priority.
243 if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
244 ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
245 setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
246
247 LRUEntries.clear();
248 LRUEntries.init(Entries, sizeof(Entries));
249
250 AvailEntries.clear();
251 AvailEntries.init(Entries, sizeof(Entries));
252 for (u32 I = 0; I < Config::getEntriesArraySize(); I++)
253 AvailEntries.push_back(&Entries[I]);
254 }
255
store(const Options & Options,uptr CommitBase,uptr CommitSize,uptr BlockBegin,MemMapT MemMap)256 void store(const Options &Options, uptr CommitBase, uptr CommitSize,
257 uptr BlockBegin, MemMapT MemMap) EXCLUDES(Mutex) {
258 DCHECK(canCache(CommitSize));
259
260 const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
261 u64 Time;
262 CachedBlock Entry;
263
264 Entry.CommitBase = CommitBase;
265 Entry.CommitSize = CommitSize;
266 Entry.BlockBegin = BlockBegin;
267 Entry.MemMap = MemMap;
268 Entry.Time = UINT64_MAX;
269
270 if (useMemoryTagging<Config>(Options)) {
271 if (Interval == 0 && !SCUDO_FUCHSIA) {
272 // Release the memory and make it inaccessible at the same time by
273 // creating a new MAP_NOACCESS mapping on top of the existing mapping.
274 // Fuchsia does not support replacing mappings by creating a new mapping
275 // on top so we just do the two syscalls there.
276 Entry.Time = 0;
277 mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
278 Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
279 } else {
280 Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
281 MAP_NOACCESS);
282 }
283 }
284
285 // Usually only one entry will be evicted from the cache.
286 // Only in the rare event that the cache shrinks in real-time
287 // due to a decrease in the configurable value MaxEntriesCount
288 // will more than one cache entry be evicted.
289 // The vector is used to save the MemMaps of evicted entries so
290 // that the unmap call can be performed outside the lock
291 Vector<MemMapT, 1U> EvictionMemMaps;
292
293 do {
294 ScopedLock L(Mutex);
295
296 // Time must be computed under the lock to ensure
297 // that the LRU cache remains sorted with respect to
298 // time in a multithreaded environment
299 Time = getMonotonicTimeFast();
300 if (Entry.Time != 0)
301 Entry.Time = Time;
302
303 if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
304 // If we get here then memory tagging was disabled in between when we
305 // read Options and when we locked Mutex. We can't insert our entry into
306 // the quarantine or the cache because the permissions would be wrong so
307 // just unmap it.
308 unmapCallBack(Entry.MemMap);
309 break;
310 }
311 if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
312 QuarantinePos =
313 (QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u);
314 if (!Quarantine[QuarantinePos].isValid()) {
315 Quarantine[QuarantinePos] = Entry;
316 return;
317 }
318 CachedBlock PrevEntry = Quarantine[QuarantinePos];
319 Quarantine[QuarantinePos] = Entry;
320 if (OldestTime == 0)
321 OldestTime = Entry.Time;
322 Entry = PrevEntry;
323 }
324
325 // All excess entries are evicted from the cache. Note that when
326 // `MaxEntriesCount` is zero, cache storing shouldn't happen and it's
327 // guarded by the `DCHECK(canCache(CommitSize))` above. As a result, we
328 // won't try to pop `LRUEntries` when it's empty.
329 while (LRUEntries.size() >= atomic_load_relaxed(&MaxEntriesCount)) {
330 // Save MemMaps of evicted entries to perform unmap outside of lock
331 CachedBlock *Entry = LRUEntries.back();
332 EvictionMemMaps.push_back(Entry->MemMap);
333 remove(Entry);
334 }
335
336 insert(Entry);
337
338 if (OldestTime == 0)
339 OldestTime = Entry.Time;
340 } while (0);
341
342 for (MemMapT &EvictMemMap : EvictionMemMaps)
343 unmapCallBack(EvictMemMap);
344
345 if (Interval >= 0) {
346 // TODO: Add ReleaseToOS logic to LRU algorithm
347 releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
348 }
349 }
350
retrieve(uptr MaxAllowedFragmentedPages,uptr Size,uptr Alignment,uptr HeadersSize,uptr & EntryHeaderPos)351 CachedBlock retrieve(uptr MaxAllowedFragmentedPages, uptr Size,
352 uptr Alignment, uptr HeadersSize, uptr &EntryHeaderPos)
353 EXCLUDES(Mutex) {
354 const uptr PageSize = getPageSizeCached();
355 // 10% of the requested size proved to be the optimal choice for
356 // retrieving cached blocks after testing several options.
357 constexpr u32 FragmentedBytesDivisor = 10;
358 CachedBlock Entry;
359 EntryHeaderPos = 0;
360 {
361 ScopedLock L(Mutex);
362 CallsToRetrieve++;
363 if (LRUEntries.size() == 0)
364 return {};
365 CachedBlock *RetrievedEntry = nullptr;
366 uptr MinDiff = UINTPTR_MAX;
367
368 // Since allocation sizes don't always match cached memory chunk sizes
369 // we allow some memory to be unused (called fragmented bytes). The
370 // amount of unused bytes is exactly EntryHeaderPos - CommitBase.
371 //
372 // CommitBase CommitBase + CommitSize
373 // V V
374 // +---+------------+-----------------+---+
375 // | | | | |
376 // +---+------------+-----------------+---+
377 // ^ ^ ^
378 // Guard EntryHeaderPos Guard-page-end
379 // page-begin
380 //
381 // [EntryHeaderPos, CommitBase + CommitSize) contains the user data as
382 // well as the header metadata. If EntryHeaderPos - CommitBase exceeds
383 // MaxAllowedFragmentedPages * PageSize, the cached memory chunk is
384 // not considered valid for retrieval.
385 for (CachedBlock &Entry : LRUEntries) {
386 const uptr CommitBase = Entry.CommitBase;
387 const uptr CommitSize = Entry.CommitSize;
388 const uptr AllocPos =
389 roundDown(CommitBase + CommitSize - Size, Alignment);
390 const uptr HeaderPos = AllocPos - HeadersSize;
391 const uptr MaxAllowedFragmentedBytes =
392 MaxAllowedFragmentedPages * PageSize;
393 if (HeaderPos > CommitBase + CommitSize)
394 continue;
395 // TODO: Remove AllocPos > CommitBase + MaxAllowedFragmentedBytes
396 // and replace with Diff > MaxAllowedFragmentedBytes
397 if (HeaderPos < CommitBase ||
398 AllocPos > CommitBase + MaxAllowedFragmentedBytes) {
399 continue;
400 }
401
402 const uptr Diff = roundDown(HeaderPos, PageSize) - CommitBase;
403
404 // Keep track of the smallest cached block
405 // that is greater than (AllocSize + HeaderSize)
406 if (Diff >= MinDiff)
407 continue;
408
409 MinDiff = Diff;
410 RetrievedEntry = &Entry;
411 EntryHeaderPos = HeaderPos;
412
413 // Immediately use a cached block if its size is close enough to the
414 // requested size
415 const uptr OptimalFitThesholdBytes =
416 (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
417 if (Diff <= OptimalFitThesholdBytes)
418 break;
419 }
420
421 if (RetrievedEntry != nullptr) {
422 Entry = *RetrievedEntry;
423 remove(RetrievedEntry);
424 SuccessfulRetrieves++;
425 }
426 }
427
428 // The difference between the retrieved memory chunk and the request
429 // size is at most MaxAllowedFragmentedPages
430 //
431 // +- MaxAllowedFragmentedPages * PageSize -+
432 // +--------------------------+-------------+
433 // | | |
434 // +--------------------------+-------------+
435 // \ Bytes to be released / ^
436 // |
437 // (may or may not be committed)
438 //
439 // The maximum number of bytes released to the OS is capped by
440 // MaxReleasedCachePages
441 //
442 // TODO : Consider making MaxReleasedCachePages configurable since
443 // the release to OS API can vary across systems.
444 if (Entry.Time != 0) {
445 const uptr FragmentedBytes =
446 roundDown(EntryHeaderPos, PageSize) - Entry.CommitBase;
447 const uptr MaxUnreleasedCacheBytes = MaxUnreleasedCachePages * PageSize;
448 if (FragmentedBytes > MaxUnreleasedCacheBytes) {
449 const uptr MaxReleasedCacheBytes =
450 CachedBlock::MaxReleasedCachePages * PageSize;
451 uptr BytesToRelease =
452 roundUp(Min<uptr>(MaxReleasedCacheBytes,
453 FragmentedBytes - MaxUnreleasedCacheBytes),
454 PageSize);
455 Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, BytesToRelease);
456 }
457 }
458
459 return Entry;
460 }
461
canCache(uptr Size)462 bool canCache(uptr Size) {
463 return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
464 Size <= atomic_load_relaxed(&MaxEntrySize);
465 }
466
setOption(Option O,sptr Value)467 bool setOption(Option O, sptr Value) {
468 if (O == Option::ReleaseInterval) {
469 const s32 Interval = Max(
470 Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
471 Config::getMinReleaseToOsIntervalMs());
472 atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
473 return true;
474 }
475 if (O == Option::MaxCacheEntriesCount) {
476 if (Value < 0)
477 return false;
478 atomic_store_relaxed(
479 &MaxEntriesCount,
480 Min<u32>(static_cast<u32>(Value), Config::getEntriesArraySize()));
481 return true;
482 }
483 if (O == Option::MaxCacheEntrySize) {
484 atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
485 return true;
486 }
487 // Not supported by the Secondary Cache, but not an error either.
488 return true;
489 }
490
releaseToOS()491 void releaseToOS() { releaseOlderThan(UINT64_MAX); }
492
disableMemoryTagging()493 void disableMemoryTagging() EXCLUDES(Mutex) {
494 ScopedLock L(Mutex);
495 for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
496 if (Quarantine[I].isValid()) {
497 MemMapT &MemMap = Quarantine[I].MemMap;
498 unmapCallBack(MemMap);
499 Quarantine[I].invalidate();
500 }
501 }
502 for (CachedBlock &Entry : LRUEntries)
503 Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
504 QuarantinePos = -1U;
505 }
506
disable()507 void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
508
enable()509 void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
510
unmapTestOnly()511 void unmapTestOnly() { empty(); }
512
513 private:
insert(const CachedBlock & Entry)514 void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
515 CachedBlock *AvailEntry = AvailEntries.front();
516 AvailEntries.pop_front();
517
518 *AvailEntry = Entry;
519 LRUEntries.push_front(AvailEntry);
520 }
521
remove(CachedBlock * Entry)522 void remove(CachedBlock *Entry) REQUIRES(Mutex) {
523 DCHECK(Entry->isValid());
524 LRUEntries.remove(Entry);
525 Entry->invalidate();
526 AvailEntries.push_front(Entry);
527 }
528
empty()529 void empty() {
530 MemMapT MapInfo[Config::getEntriesArraySize()];
531 uptr N = 0;
532 {
533 ScopedLock L(Mutex);
534
535 for (CachedBlock &Entry : LRUEntries)
536 MapInfo[N++] = Entry.MemMap;
537 LRUEntries.clear();
538 }
539 for (uptr I = 0; I < N; I++) {
540 MemMapT &MemMap = MapInfo[I];
541 unmapCallBack(MemMap);
542 }
543 }
544
releaseIfOlderThan(CachedBlock & Entry,u64 Time)545 void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
546 if (!Entry.isValid() || !Entry.Time)
547 return;
548 if (Entry.Time > Time) {
549 if (OldestTime == 0 || Entry.Time < OldestTime)
550 OldestTime = Entry.Time;
551 return;
552 }
553 Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
554 Entry.Time = 0;
555 }
556
releaseOlderThan(u64 Time)557 void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
558 ScopedLock L(Mutex);
559 if (!LRUEntries.size() || OldestTime == 0 || OldestTime > Time)
560 return;
561 OldestTime = 0;
562 for (uptr I = 0; I < Config::getQuarantineSize(); I++)
563 releaseIfOlderThan(Quarantine[I], Time);
564 for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
565 releaseIfOlderThan(Entries[I], Time);
566 }
567
568 HybridMutex Mutex;
569 u32 QuarantinePos GUARDED_BY(Mutex) = 0;
570 atomic_u32 MaxEntriesCount = {};
571 atomic_uptr MaxEntrySize = {};
572 u64 OldestTime GUARDED_BY(Mutex) = 0;
573 atomic_s32 ReleaseToOsIntervalMs = {};
574 u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
575 u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
576
577 CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
578 NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
579 Quarantine GUARDED_BY(Mutex) = {};
580
581 // Cached blocks stored in LRU order
582 DoublyLinkedList<CachedBlock> LRUEntries GUARDED_BY(Mutex);
583 // The unused Entries
584 SinglyLinkedList<CachedBlock> AvailEntries GUARDED_BY(Mutex);
585 };
586
587 template <typename Config> class MapAllocator {
588 public:
589 void init(GlobalStats *S,
590 s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
591 DCHECK_EQ(AllocatedBytes, 0U);
592 DCHECK_EQ(FreedBytes, 0U);
593 Cache.init(ReleaseToOsInterval);
594 Stats.init();
595 if (LIKELY(S))
596 S->link(&Stats);
597 }
598
599 void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
600 uptr *BlockEnd = nullptr,
601 FillContentsMode FillContents = NoFill);
602
603 void deallocate(const Options &Options, void *Ptr);
604
605 void *tryAllocateFromCache(const Options &Options, uptr Size, uptr Alignment,
606 uptr *BlockEndPtr, FillContentsMode FillContents);
607
getBlockEnd(void * Ptr)608 static uptr getBlockEnd(void *Ptr) {
609 auto *B = LargeBlock::getHeader<Config>(Ptr);
610 return B->CommitBase + B->CommitSize;
611 }
612
getBlockSize(void * Ptr)613 static uptr getBlockSize(void *Ptr) {
614 return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
615 }
616
getHeadersSize()617 static constexpr uptr getHeadersSize() {
618 return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
619 }
620
disable()621 void disable() NO_THREAD_SAFETY_ANALYSIS {
622 Mutex.lock();
623 Cache.disable();
624 }
625
enable()626 void enable() NO_THREAD_SAFETY_ANALYSIS {
627 Cache.enable();
628 Mutex.unlock();
629 }
630
iterateOverBlocks(F Callback)631 template <typename F> void iterateOverBlocks(F Callback) const {
632 Mutex.assertHeld();
633
634 for (const auto &H : InUseBlocks) {
635 uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
636 if (allocatorSupportsMemoryTagging<Config>())
637 Ptr = untagPointer(Ptr);
638 Callback(Ptr);
639 }
640 }
641
canCache(uptr Size)642 bool canCache(uptr Size) { return Cache.canCache(Size); }
643
setOption(Option O,sptr Value)644 bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
645
releaseToOS()646 void releaseToOS() { Cache.releaseToOS(); }
647
disableMemoryTagging()648 void disableMemoryTagging() { Cache.disableMemoryTagging(); }
649
unmapTestOnly()650 void unmapTestOnly() { Cache.unmapTestOnly(); }
651
652 void getStats(ScopedString *Str);
653
654 private:
655 typename Config::template CacheT<typename Config::CacheConfig> Cache;
656
657 mutable HybridMutex Mutex;
658 DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
659 uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
660 uptr FreedBytes GUARDED_BY(Mutex) = 0;
661 uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
662 uptr LargestSize GUARDED_BY(Mutex) = 0;
663 u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
664 u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
665 LocalStats Stats GUARDED_BY(Mutex);
666 };
667
668 template <typename Config>
669 void *
tryAllocateFromCache(const Options & Options,uptr Size,uptr Alignment,uptr * BlockEndPtr,FillContentsMode FillContents)670 MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
671 uptr Alignment, uptr *BlockEndPtr,
672 FillContentsMode FillContents) {
673 CachedBlock Entry;
674 uptr EntryHeaderPos;
675 uptr MaxAllowedFragmentedPages = MaxUnreleasedCachePages;
676
677 if (LIKELY(!useMemoryTagging<Config>(Options))) {
678 MaxAllowedFragmentedPages += CachedBlock::MaxReleasedCachePages;
679 } else {
680 // TODO: Enable MaxReleasedCachePages may result in pages for an entry being
681 // partially released and it erases the tag of those pages as well. To
682 // support this feature for MTE, we need to tag those pages again.
683 DCHECK_EQ(MaxAllowedFragmentedPages, MaxUnreleasedCachePages);
684 }
685
686 Entry = Cache.retrieve(MaxAllowedFragmentedPages, Size, Alignment,
687 getHeadersSize(), EntryHeaderPos);
688 if (!Entry.isValid())
689 return nullptr;
690
691 LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
692 LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
693 bool Zeroed = Entry.Time == 0;
694 if (useMemoryTagging<Config>(Options)) {
695 uptr NewBlockBegin = reinterpret_cast<uptr>(H + 1);
696 Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
697 if (Zeroed) {
698 storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
699 NewBlockBegin);
700 } else if (Entry.BlockBegin < NewBlockBegin) {
701 storeTags(Entry.BlockBegin, NewBlockBegin);
702 } else {
703 storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
704 }
705 }
706
707 H->CommitBase = Entry.CommitBase;
708 H->CommitSize = Entry.CommitSize;
709 H->MemMap = Entry.MemMap;
710
711 const uptr BlockEnd = H->CommitBase + H->CommitSize;
712 if (BlockEndPtr)
713 *BlockEndPtr = BlockEnd;
714 uptr HInt = reinterpret_cast<uptr>(H);
715 if (allocatorSupportsMemoryTagging<Config>())
716 HInt = untagPointer(HInt);
717 const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
718 void *Ptr = reinterpret_cast<void *>(PtrInt);
719 if (FillContents && !Zeroed)
720 memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
721 BlockEnd - PtrInt);
722 {
723 ScopedLock L(Mutex);
724 InUseBlocks.push_back(H);
725 AllocatedBytes += H->CommitSize;
726 FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
727 NumberOfAllocs++;
728 Stats.add(StatAllocated, H->CommitSize);
729 Stats.add(StatMapped, H->MemMap.getCapacity());
730 }
731 return Ptr;
732 }
733 // As with the Primary, the size passed to this function includes any desired
734 // alignment, so that the frontend can align the user allocation. The hint
735 // parameter allows us to unmap spurious memory when dealing with larger
736 // (greater than a page) alignments on 32-bit platforms.
737 // Due to the sparsity of address space available on those platforms, requesting
738 // an allocation from the Secondary with a large alignment would end up wasting
739 // VA space (even though we are not committing the whole thing), hence the need
740 // to trim off some of the reserved space.
741 // For allocations requested with an alignment greater than or equal to a page,
742 // the committed memory will amount to something close to Size - AlignmentHint
743 // (pending rounding and headers).
744 template <typename Config>
allocate(const Options & Options,uptr Size,uptr Alignment,uptr * BlockEndPtr,FillContentsMode FillContents)745 void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
746 uptr Alignment, uptr *BlockEndPtr,
747 FillContentsMode FillContents) {
748 if (Options.get(OptionBit::AddLargeAllocationSlack))
749 Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
750 Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
751 const uptr PageSize = getPageSizeCached();
752
753 // Note that cached blocks may have aligned address already. Thus we simply
754 // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
755 const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
756
757 if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
758 void *Ptr = tryAllocateFromCache(Options, Size, Alignment, BlockEndPtr,
759 FillContents);
760 if (Ptr != nullptr)
761 return Ptr;
762 }
763
764 uptr RoundedSize =
765 roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
766 if (Alignment > PageSize)
767 RoundedSize += Alignment - PageSize;
768
769 ReservedMemoryT ReservedMemory;
770 const uptr MapSize = RoundedSize + 2 * PageSize;
771 if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
772 MAP_ALLOWNOMEM))) {
773 return nullptr;
774 }
775
776 // Take the entire ownership of reserved region.
777 MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
778 ReservedMemory.getCapacity());
779 uptr MapBase = MemMap.getBase();
780 uptr CommitBase = MapBase + PageSize;
781 uptr MapEnd = MapBase + MapSize;
782
783 // In the unlikely event of alignments larger than a page, adjust the amount
784 // of memory we want to commit, and trim the extra memory.
785 if (UNLIKELY(Alignment >= PageSize)) {
786 // For alignments greater than or equal to a page, the user pointer (eg:
787 // the pointer that is returned by the C or C++ allocation APIs) ends up
788 // on a page boundary , and our headers will live in the preceding page.
789 CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
790 const uptr NewMapBase = CommitBase - PageSize;
791 DCHECK_GE(NewMapBase, MapBase);
792 // We only trim the extra memory on 32-bit platforms: 64-bit platforms
793 // are less constrained memory wise, and that saves us two syscalls.
794 if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
795 MemMap.unmap(MapBase, NewMapBase - MapBase);
796 MapBase = NewMapBase;
797 }
798 const uptr NewMapEnd =
799 CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
800 DCHECK_LE(NewMapEnd, MapEnd);
801 if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
802 MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
803 MapEnd = NewMapEnd;
804 }
805 }
806
807 const uptr CommitSize = MapEnd - PageSize - CommitBase;
808 const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
809 if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
810 MemMap)) {
811 unmap(MemMap);
812 return nullptr;
813 }
814 const uptr HeaderPos = AllocPos - getHeadersSize();
815 LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
816 LargeBlock::addHeaderTag<Config>(HeaderPos));
817 if (useMemoryTagging<Config>(Options))
818 storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
819 reinterpret_cast<uptr>(H + 1));
820 H->CommitBase = CommitBase;
821 H->CommitSize = CommitSize;
822 H->MemMap = MemMap;
823 if (BlockEndPtr)
824 *BlockEndPtr = CommitBase + CommitSize;
825 {
826 ScopedLock L(Mutex);
827 InUseBlocks.push_back(H);
828 AllocatedBytes += CommitSize;
829 FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
830 if (LargestSize < CommitSize)
831 LargestSize = CommitSize;
832 NumberOfAllocs++;
833 Stats.add(StatAllocated, CommitSize);
834 Stats.add(StatMapped, H->MemMap.getCapacity());
835 }
836 return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
837 }
838
839 template <typename Config>
deallocate(const Options & Options,void * Ptr)840 void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
841 EXCLUDES(Mutex) {
842 LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
843 const uptr CommitSize = H->CommitSize;
844 {
845 ScopedLock L(Mutex);
846 InUseBlocks.remove(H);
847 FreedBytes += CommitSize;
848 FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
849 NumberOfFrees++;
850 Stats.sub(StatAllocated, CommitSize);
851 Stats.sub(StatMapped, H->MemMap.getCapacity());
852 }
853
854 if (Cache.canCache(H->CommitSize)) {
855 Cache.store(Options, H->CommitBase, H->CommitSize,
856 reinterpret_cast<uptr>(H + 1), H->MemMap);
857 } else {
858 // Note that the `H->MemMap` is stored on the pages managed by itself. Take
859 // over the ownership before unmap() so that any operation along with
860 // unmap() won't touch inaccessible pages.
861 MemMapT MemMap = H->MemMap;
862 unmap(MemMap);
863 }
864 }
865
866 template <typename Config>
getStats(ScopedString * Str)867 void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
868 ScopedLock L(Mutex);
869 Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
870 "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
871 NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
872 FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
873 (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
874 FragmentedBytes >> 10);
875 Cache.getStats(Str);
876 }
877
878 } // namespace scudo
879
880 #endif // SCUDO_SECONDARY_H_
881