1*76559068SAndroid Build Coastguard Worker //===-- secondary.h ---------------------------------------------*- C++ -*-===//
2*76559068SAndroid Build Coastguard Worker //
3*76559068SAndroid Build Coastguard Worker // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*76559068SAndroid Build Coastguard Worker // See https://llvm.org/LICENSE.txt for license information.
5*76559068SAndroid Build Coastguard Worker // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*76559068SAndroid Build Coastguard Worker //
7*76559068SAndroid Build Coastguard Worker //===----------------------------------------------------------------------===//
8*76559068SAndroid Build Coastguard Worker
9*76559068SAndroid Build Coastguard Worker #ifndef SCUDO_SECONDARY_H_
10*76559068SAndroid Build Coastguard Worker #define SCUDO_SECONDARY_H_
11*76559068SAndroid Build Coastguard Worker
12*76559068SAndroid Build Coastguard Worker #include "chunk.h"
13*76559068SAndroid Build Coastguard Worker #include "common.h"
14*76559068SAndroid Build Coastguard Worker #include "list.h"
15*76559068SAndroid Build Coastguard Worker #include "mem_map.h"
16*76559068SAndroid Build Coastguard Worker #include "memtag.h"
17*76559068SAndroid Build Coastguard Worker #include "mutex.h"
18*76559068SAndroid Build Coastguard Worker #include "options.h"
19*76559068SAndroid Build Coastguard Worker #include "stats.h"
20*76559068SAndroid Build Coastguard Worker #include "string_utils.h"
21*76559068SAndroid Build Coastguard Worker #include "thread_annotations.h"
22*76559068SAndroid Build Coastguard Worker #include "vector.h"
23*76559068SAndroid Build Coastguard Worker
24*76559068SAndroid Build Coastguard Worker namespace scudo {
25*76559068SAndroid Build Coastguard Worker
26*76559068SAndroid Build Coastguard Worker // This allocator wraps the platform allocation primitives, and as such is on
27*76559068SAndroid Build Coastguard Worker // the slower side and should preferably be used for larger sized allocations.
28*76559068SAndroid Build Coastguard Worker // Blocks allocated will be preceded and followed by a guard page, and hold
29*76559068SAndroid Build Coastguard Worker // their own header that is not checksummed: the guard pages and the Combined
30*76559068SAndroid Build Coastguard Worker // header should be enough for our purpose.
31*76559068SAndroid Build Coastguard Worker
32*76559068SAndroid Build Coastguard Worker namespace LargeBlock {
33*76559068SAndroid Build Coastguard Worker
34*76559068SAndroid Build Coastguard Worker struct alignas(Max<uptr>(archSupportsMemoryTagging()
35*76559068SAndroid Build Coastguard Worker ? archMemoryTagGranuleSize()
36*76559068SAndroid Build Coastguard Worker : 1,
37*76559068SAndroid Build Coastguard Worker 1U << SCUDO_MIN_ALIGNMENT_LOG)) Header {
38*76559068SAndroid Build Coastguard Worker LargeBlock::Header *Prev;
39*76559068SAndroid Build Coastguard Worker LargeBlock::Header *Next;
40*76559068SAndroid Build Coastguard Worker uptr CommitBase;
41*76559068SAndroid Build Coastguard Worker uptr CommitSize;
42*76559068SAndroid Build Coastguard Worker MemMapT MemMap;
43*76559068SAndroid Build Coastguard Worker };
44*76559068SAndroid Build Coastguard Worker
45*76559068SAndroid Build Coastguard Worker static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
46*76559068SAndroid Build Coastguard Worker static_assert(!archSupportsMemoryTagging() ||
47*76559068SAndroid Build Coastguard Worker sizeof(Header) % archMemoryTagGranuleSize() == 0,
48*76559068SAndroid Build Coastguard Worker "");
49*76559068SAndroid Build Coastguard Worker
getHeaderSize()50*76559068SAndroid Build Coastguard Worker constexpr uptr getHeaderSize() { return sizeof(Header); }
51*76559068SAndroid Build Coastguard Worker
addHeaderTag(uptr Ptr)52*76559068SAndroid Build Coastguard Worker template <typename Config> static uptr addHeaderTag(uptr Ptr) {
53*76559068SAndroid Build Coastguard Worker if (allocatorSupportsMemoryTagging<Config>())
54*76559068SAndroid Build Coastguard Worker return addFixedTag(Ptr, 1);
55*76559068SAndroid Build Coastguard Worker return Ptr;
56*76559068SAndroid Build Coastguard Worker }
57*76559068SAndroid Build Coastguard Worker
getHeader(uptr Ptr)58*76559068SAndroid Build Coastguard Worker template <typename Config> static Header *getHeader(uptr Ptr) {
59*76559068SAndroid Build Coastguard Worker return reinterpret_cast<Header *>(addHeaderTag<Config>(Ptr)) - 1;
60*76559068SAndroid Build Coastguard Worker }
61*76559068SAndroid Build Coastguard Worker
getHeader(const void * Ptr)62*76559068SAndroid Build Coastguard Worker template <typename Config> static Header *getHeader(const void *Ptr) {
63*76559068SAndroid Build Coastguard Worker return getHeader<Config>(reinterpret_cast<uptr>(Ptr));
64*76559068SAndroid Build Coastguard Worker }
65*76559068SAndroid Build Coastguard Worker
66*76559068SAndroid Build Coastguard Worker } // namespace LargeBlock
67*76559068SAndroid Build Coastguard Worker
unmap(MemMapT & MemMap)68*76559068SAndroid Build Coastguard Worker static inline void unmap(MemMapT &MemMap) { MemMap.unmap(); }
69*76559068SAndroid Build Coastguard Worker
70*76559068SAndroid Build Coastguard Worker namespace {
71*76559068SAndroid Build Coastguard Worker
72*76559068SAndroid Build Coastguard Worker struct CachedBlock {
73*76559068SAndroid Build Coastguard Worker static constexpr u16 CacheIndexMax = UINT16_MAX;
74*76559068SAndroid Build Coastguard Worker static constexpr u16 EndOfListVal = CacheIndexMax;
75*76559068SAndroid Build Coastguard Worker
76*76559068SAndroid Build Coastguard Worker // We allow a certain amount of fragmentation and part of the fragmented bytes
77*76559068SAndroid Build Coastguard Worker // will be released by `releaseAndZeroPagesToOS()`. This increases the chance
78*76559068SAndroid Build Coastguard Worker // of cache hit rate and reduces the overhead to the RSS at the same time. See
79*76559068SAndroid Build Coastguard Worker // more details in the `MapAllocatorCache::retrieve()` section.
80*76559068SAndroid Build Coastguard Worker //
81*76559068SAndroid Build Coastguard Worker // We arrived at this default value after noticing that mapping in larger
82*76559068SAndroid Build Coastguard Worker // memory regions performs better than releasing memory and forcing a cache
83*76559068SAndroid Build Coastguard Worker // hit. According to the data, it suggests that beyond 4 pages, the release
84*76559068SAndroid Build Coastguard Worker // execution time is longer than the map execution time. In this way,
85*76559068SAndroid Build Coastguard Worker // the default is dependent on the platform.
86*76559068SAndroid Build Coastguard Worker static constexpr uptr MaxReleasedCachePages = 4U;
87*76559068SAndroid Build Coastguard Worker
88*76559068SAndroid Build Coastguard Worker uptr CommitBase = 0;
89*76559068SAndroid Build Coastguard Worker uptr CommitSize = 0;
90*76559068SAndroid Build Coastguard Worker uptr BlockBegin = 0;
91*76559068SAndroid Build Coastguard Worker MemMapT MemMap = {};
92*76559068SAndroid Build Coastguard Worker u64 Time = 0;
93*76559068SAndroid Build Coastguard Worker u16 Next = 0;
94*76559068SAndroid Build Coastguard Worker u16 Prev = 0;
95*76559068SAndroid Build Coastguard Worker
isValidCachedBlock96*76559068SAndroid Build Coastguard Worker bool isValid() { return CommitBase != 0; }
97*76559068SAndroid Build Coastguard Worker
invalidateCachedBlock98*76559068SAndroid Build Coastguard Worker void invalidate() { CommitBase = 0; }
99*76559068SAndroid Build Coastguard Worker };
100*76559068SAndroid Build Coastguard Worker } // namespace
101*76559068SAndroid Build Coastguard Worker
102*76559068SAndroid Build Coastguard Worker template <typename Config> class MapAllocatorNoCache {
103*76559068SAndroid Build Coastguard Worker public:
init(UNUSED s32 ReleaseToOsInterval)104*76559068SAndroid Build Coastguard Worker void init(UNUSED s32 ReleaseToOsInterval) {}
retrieve(UNUSED uptr MaxAllowedFragmentedBytes,UNUSED uptr Size,UNUSED uptr Alignment,UNUSED uptr HeadersSize,UNUSED uptr & EntryHeaderPos)105*76559068SAndroid Build Coastguard Worker CachedBlock retrieve(UNUSED uptr MaxAllowedFragmentedBytes, UNUSED uptr Size,
106*76559068SAndroid Build Coastguard Worker UNUSED uptr Alignment, UNUSED uptr HeadersSize,
107*76559068SAndroid Build Coastguard Worker UNUSED uptr &EntryHeaderPos) {
108*76559068SAndroid Build Coastguard Worker return {};
109*76559068SAndroid Build Coastguard Worker }
store(UNUSED Options Options,UNUSED uptr CommitBase,UNUSED uptr CommitSize,UNUSED uptr BlockBegin,UNUSED MemMapT MemMap)110*76559068SAndroid Build Coastguard Worker void store(UNUSED Options Options, UNUSED uptr CommitBase,
111*76559068SAndroid Build Coastguard Worker UNUSED uptr CommitSize, UNUSED uptr BlockBegin,
112*76559068SAndroid Build Coastguard Worker UNUSED MemMapT MemMap) {
113*76559068SAndroid Build Coastguard Worker // This should never be called since canCache always returns false.
114*76559068SAndroid Build Coastguard Worker UNREACHABLE(
115*76559068SAndroid Build Coastguard Worker "It is not valid to call store on MapAllocatorNoCache objects.");
116*76559068SAndroid Build Coastguard Worker }
117*76559068SAndroid Build Coastguard Worker
canCache(UNUSED uptr Size)118*76559068SAndroid Build Coastguard Worker bool canCache(UNUSED uptr Size) { return false; }
disable()119*76559068SAndroid Build Coastguard Worker void disable() {}
enable()120*76559068SAndroid Build Coastguard Worker void enable() {}
releaseToOS()121*76559068SAndroid Build Coastguard Worker void releaseToOS() {}
disableMemoryTagging()122*76559068SAndroid Build Coastguard Worker void disableMemoryTagging() {}
unmapTestOnly()123*76559068SAndroid Build Coastguard Worker void unmapTestOnly() {}
setOption(Option O,UNUSED sptr Value)124*76559068SAndroid Build Coastguard Worker bool setOption(Option O, UNUSED sptr Value) {
125*76559068SAndroid Build Coastguard Worker if (O == Option::ReleaseInterval || O == Option::MaxCacheEntriesCount ||
126*76559068SAndroid Build Coastguard Worker O == Option::MaxCacheEntrySize)
127*76559068SAndroid Build Coastguard Worker return false;
128*76559068SAndroid Build Coastguard Worker // Not supported by the Secondary Cache, but not an error either.
129*76559068SAndroid Build Coastguard Worker return true;
130*76559068SAndroid Build Coastguard Worker }
131*76559068SAndroid Build Coastguard Worker
getStats(UNUSED ScopedString * Str)132*76559068SAndroid Build Coastguard Worker void getStats(UNUSED ScopedString *Str) {
133*76559068SAndroid Build Coastguard Worker Str->append("Secondary Cache Disabled\n");
134*76559068SAndroid Build Coastguard Worker }
135*76559068SAndroid Build Coastguard Worker };
136*76559068SAndroid Build Coastguard Worker
137*76559068SAndroid Build Coastguard Worker static const uptr MaxUnreleasedCachePages = 4U;
138*76559068SAndroid Build Coastguard Worker
139*76559068SAndroid Build Coastguard Worker template <typename Config>
mapSecondary(const Options & Options,uptr CommitBase,uptr CommitSize,uptr AllocPos,uptr Flags,MemMapT & MemMap)140*76559068SAndroid Build Coastguard Worker bool mapSecondary(const Options &Options, uptr CommitBase, uptr CommitSize,
141*76559068SAndroid Build Coastguard Worker uptr AllocPos, uptr Flags, MemMapT &MemMap) {
142*76559068SAndroid Build Coastguard Worker Flags |= MAP_RESIZABLE;
143*76559068SAndroid Build Coastguard Worker Flags |= MAP_ALLOWNOMEM;
144*76559068SAndroid Build Coastguard Worker
145*76559068SAndroid Build Coastguard Worker const uptr PageSize = getPageSizeCached();
146*76559068SAndroid Build Coastguard Worker if (SCUDO_TRUSTY) {
147*76559068SAndroid Build Coastguard Worker /*
148*76559068SAndroid Build Coastguard Worker * On Trusty we need AllocPos to be usable for shared memory, which cannot
149*76559068SAndroid Build Coastguard Worker * cross multiple mappings. This means we need to split around AllocPos
150*76559068SAndroid Build Coastguard Worker * and not over it. We can only do this if the address is page-aligned.
151*76559068SAndroid Build Coastguard Worker */
152*76559068SAndroid Build Coastguard Worker const uptr TaggedSize = AllocPos - CommitBase;
153*76559068SAndroid Build Coastguard Worker if (useMemoryTagging<Config>(Options) && isAligned(TaggedSize, PageSize)) {
154*76559068SAndroid Build Coastguard Worker DCHECK_GT(TaggedSize, 0);
155*76559068SAndroid Build Coastguard Worker return MemMap.remap(CommitBase, TaggedSize, "scudo:secondary",
156*76559068SAndroid Build Coastguard Worker MAP_MEMTAG | Flags) &&
157*76559068SAndroid Build Coastguard Worker MemMap.remap(AllocPos, CommitSize - TaggedSize, "scudo:secondary",
158*76559068SAndroid Build Coastguard Worker Flags);
159*76559068SAndroid Build Coastguard Worker } else {
160*76559068SAndroid Build Coastguard Worker const uptr RemapFlags =
161*76559068SAndroid Build Coastguard Worker (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
162*76559068SAndroid Build Coastguard Worker return MemMap.remap(CommitBase, CommitSize, "scudo:secondary",
163*76559068SAndroid Build Coastguard Worker RemapFlags);
164*76559068SAndroid Build Coastguard Worker }
165*76559068SAndroid Build Coastguard Worker }
166*76559068SAndroid Build Coastguard Worker
167*76559068SAndroid Build Coastguard Worker const uptr MaxUnreleasedCacheBytes = MaxUnreleasedCachePages * PageSize;
168*76559068SAndroid Build Coastguard Worker if (useMemoryTagging<Config>(Options) &&
169*76559068SAndroid Build Coastguard Worker CommitSize > MaxUnreleasedCacheBytes) {
170*76559068SAndroid Build Coastguard Worker const uptr UntaggedPos =
171*76559068SAndroid Build Coastguard Worker Max(AllocPos, CommitBase + MaxUnreleasedCacheBytes);
172*76559068SAndroid Build Coastguard Worker return MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
173*76559068SAndroid Build Coastguard Worker MAP_MEMTAG | Flags) &&
174*76559068SAndroid Build Coastguard Worker MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
175*76559068SAndroid Build Coastguard Worker "scudo:secondary", Flags);
176*76559068SAndroid Build Coastguard Worker } else {
177*76559068SAndroid Build Coastguard Worker const uptr RemapFlags =
178*76559068SAndroid Build Coastguard Worker (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) | Flags;
179*76559068SAndroid Build Coastguard Worker return MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
180*76559068SAndroid Build Coastguard Worker }
181*76559068SAndroid Build Coastguard Worker }
182*76559068SAndroid Build Coastguard Worker
183*76559068SAndroid Build Coastguard Worker // Template specialization to avoid producing zero-length array
184*76559068SAndroid Build Coastguard Worker template <typename T, size_t Size> class NonZeroLengthArray {
185*76559068SAndroid Build Coastguard Worker public:
186*76559068SAndroid Build Coastguard Worker T &operator[](uptr Idx) { return values[Idx]; }
187*76559068SAndroid Build Coastguard Worker
188*76559068SAndroid Build Coastguard Worker private:
189*76559068SAndroid Build Coastguard Worker T values[Size];
190*76559068SAndroid Build Coastguard Worker };
191*76559068SAndroid Build Coastguard Worker template <typename T> class NonZeroLengthArray<T, 0> {
192*76559068SAndroid Build Coastguard Worker public:
193*76559068SAndroid Build Coastguard Worker T &operator[](uptr UNUSED Idx) { UNREACHABLE("Unsupported!"); }
194*76559068SAndroid Build Coastguard Worker };
195*76559068SAndroid Build Coastguard Worker
196*76559068SAndroid Build Coastguard Worker // The default unmap callback is simply scudo::unmap.
197*76559068SAndroid Build Coastguard Worker // In testing, a different unmap callback is used to
198*76559068SAndroid Build Coastguard Worker // record information about unmaps in the cache
199*76559068SAndroid Build Coastguard Worker template <typename Config, void (*unmapCallBack)(MemMapT &) = unmap>
200*76559068SAndroid Build Coastguard Worker class MapAllocatorCache {
201*76559068SAndroid Build Coastguard Worker public:
getStats(ScopedString * Str)202*76559068SAndroid Build Coastguard Worker void getStats(ScopedString *Str) {
203*76559068SAndroid Build Coastguard Worker ScopedLock L(Mutex);
204*76559068SAndroid Build Coastguard Worker uptr Integral;
205*76559068SAndroid Build Coastguard Worker uptr Fractional;
206*76559068SAndroid Build Coastguard Worker computePercentage(SuccessfulRetrieves, CallsToRetrieve, &Integral,
207*76559068SAndroid Build Coastguard Worker &Fractional);
208*76559068SAndroid Build Coastguard Worker const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
209*76559068SAndroid Build Coastguard Worker Str->append(
210*76559068SAndroid Build Coastguard Worker "Stats: MapAllocatorCache: EntriesCount: %zu, "
211*76559068SAndroid Build Coastguard Worker "MaxEntriesCount: %u, MaxEntrySize: %zu, ReleaseToOsIntervalMs = %d\n",
212*76559068SAndroid Build Coastguard Worker LRUEntries.size(), atomic_load_relaxed(&MaxEntriesCount),
213*76559068SAndroid Build Coastguard Worker atomic_load_relaxed(&MaxEntrySize), Interval >= 0 ? Interval : -1);
214*76559068SAndroid Build Coastguard Worker Str->append("Stats: CacheRetrievalStats: SuccessRate: %u/%u "
215*76559068SAndroid Build Coastguard Worker "(%zu.%02zu%%)\n",
216*76559068SAndroid Build Coastguard Worker SuccessfulRetrieves, CallsToRetrieve, Integral, Fractional);
217*76559068SAndroid Build Coastguard Worker Str->append("Cache Entry Info (Most Recent -> Least Recent):\n");
218*76559068SAndroid Build Coastguard Worker
219*76559068SAndroid Build Coastguard Worker for (CachedBlock &Entry : LRUEntries) {
220*76559068SAndroid Build Coastguard Worker Str->append(" StartBlockAddress: 0x%zx, EndBlockAddress: 0x%zx, "
221*76559068SAndroid Build Coastguard Worker "BlockSize: %zu %s\n",
222*76559068SAndroid Build Coastguard Worker Entry.CommitBase, Entry.CommitBase + Entry.CommitSize,
223*76559068SAndroid Build Coastguard Worker Entry.CommitSize, Entry.Time == 0 ? "[R]" : "");
224*76559068SAndroid Build Coastguard Worker }
225*76559068SAndroid Build Coastguard Worker }
226*76559068SAndroid Build Coastguard Worker
227*76559068SAndroid Build Coastguard Worker // Ensure the default maximum specified fits the array.
228*76559068SAndroid Build Coastguard Worker static_assert(Config::getDefaultMaxEntriesCount() <=
229*76559068SAndroid Build Coastguard Worker Config::getEntriesArraySize(),
230*76559068SAndroid Build Coastguard Worker "");
231*76559068SAndroid Build Coastguard Worker // Ensure the cache entry array size fits in the LRU list Next and Prev
232*76559068SAndroid Build Coastguard Worker // index fields
233*76559068SAndroid Build Coastguard Worker static_assert(Config::getEntriesArraySize() <= CachedBlock::CacheIndexMax,
234*76559068SAndroid Build Coastguard Worker "Cache entry array is too large to be indexed.");
235*76559068SAndroid Build Coastguard Worker
init(s32 ReleaseToOsInterval)236*76559068SAndroid Build Coastguard Worker void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
237*76559068SAndroid Build Coastguard Worker DCHECK_EQ(LRUEntries.size(), 0U);
238*76559068SAndroid Build Coastguard Worker setOption(Option::MaxCacheEntriesCount,
239*76559068SAndroid Build Coastguard Worker static_cast<sptr>(Config::getDefaultMaxEntriesCount()));
240*76559068SAndroid Build Coastguard Worker setOption(Option::MaxCacheEntrySize,
241*76559068SAndroid Build Coastguard Worker static_cast<sptr>(Config::getDefaultMaxEntrySize()));
242*76559068SAndroid Build Coastguard Worker // The default value in the cache config has the higher priority.
243*76559068SAndroid Build Coastguard Worker if (Config::getDefaultReleaseToOsIntervalMs() != INT32_MIN)
244*76559068SAndroid Build Coastguard Worker ReleaseToOsInterval = Config::getDefaultReleaseToOsIntervalMs();
245*76559068SAndroid Build Coastguard Worker setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
246*76559068SAndroid Build Coastguard Worker
247*76559068SAndroid Build Coastguard Worker LRUEntries.clear();
248*76559068SAndroid Build Coastguard Worker LRUEntries.init(Entries, sizeof(Entries));
249*76559068SAndroid Build Coastguard Worker
250*76559068SAndroid Build Coastguard Worker AvailEntries.clear();
251*76559068SAndroid Build Coastguard Worker AvailEntries.init(Entries, sizeof(Entries));
252*76559068SAndroid Build Coastguard Worker for (u32 I = 0; I < Config::getEntriesArraySize(); I++)
253*76559068SAndroid Build Coastguard Worker AvailEntries.push_back(&Entries[I]);
254*76559068SAndroid Build Coastguard Worker }
255*76559068SAndroid Build Coastguard Worker
store(const Options & Options,uptr CommitBase,uptr CommitSize,uptr BlockBegin,MemMapT MemMap)256*76559068SAndroid Build Coastguard Worker void store(const Options &Options, uptr CommitBase, uptr CommitSize,
257*76559068SAndroid Build Coastguard Worker uptr BlockBegin, MemMapT MemMap) EXCLUDES(Mutex) {
258*76559068SAndroid Build Coastguard Worker DCHECK(canCache(CommitSize));
259*76559068SAndroid Build Coastguard Worker
260*76559068SAndroid Build Coastguard Worker const s32 Interval = atomic_load_relaxed(&ReleaseToOsIntervalMs);
261*76559068SAndroid Build Coastguard Worker u64 Time;
262*76559068SAndroid Build Coastguard Worker CachedBlock Entry;
263*76559068SAndroid Build Coastguard Worker
264*76559068SAndroid Build Coastguard Worker Entry.CommitBase = CommitBase;
265*76559068SAndroid Build Coastguard Worker Entry.CommitSize = CommitSize;
266*76559068SAndroid Build Coastguard Worker Entry.BlockBegin = BlockBegin;
267*76559068SAndroid Build Coastguard Worker Entry.MemMap = MemMap;
268*76559068SAndroid Build Coastguard Worker Entry.Time = UINT64_MAX;
269*76559068SAndroid Build Coastguard Worker
270*76559068SAndroid Build Coastguard Worker if (useMemoryTagging<Config>(Options)) {
271*76559068SAndroid Build Coastguard Worker if (Interval == 0 && !SCUDO_FUCHSIA) {
272*76559068SAndroid Build Coastguard Worker // Release the memory and make it inaccessible at the same time by
273*76559068SAndroid Build Coastguard Worker // creating a new MAP_NOACCESS mapping on top of the existing mapping.
274*76559068SAndroid Build Coastguard Worker // Fuchsia does not support replacing mappings by creating a new mapping
275*76559068SAndroid Build Coastguard Worker // on top so we just do the two syscalls there.
276*76559068SAndroid Build Coastguard Worker Entry.Time = 0;
277*76559068SAndroid Build Coastguard Worker mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
278*76559068SAndroid Build Coastguard Worker Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
279*76559068SAndroid Build Coastguard Worker } else {
280*76559068SAndroid Build Coastguard Worker Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
281*76559068SAndroid Build Coastguard Worker MAP_NOACCESS);
282*76559068SAndroid Build Coastguard Worker }
283*76559068SAndroid Build Coastguard Worker }
284*76559068SAndroid Build Coastguard Worker
285*76559068SAndroid Build Coastguard Worker // Usually only one entry will be evicted from the cache.
286*76559068SAndroid Build Coastguard Worker // Only in the rare event that the cache shrinks in real-time
287*76559068SAndroid Build Coastguard Worker // due to a decrease in the configurable value MaxEntriesCount
288*76559068SAndroid Build Coastguard Worker // will more than one cache entry be evicted.
289*76559068SAndroid Build Coastguard Worker // The vector is used to save the MemMaps of evicted entries so
290*76559068SAndroid Build Coastguard Worker // that the unmap call can be performed outside the lock
291*76559068SAndroid Build Coastguard Worker Vector<MemMapT, 1U> EvictionMemMaps;
292*76559068SAndroid Build Coastguard Worker
293*76559068SAndroid Build Coastguard Worker do {
294*76559068SAndroid Build Coastguard Worker ScopedLock L(Mutex);
295*76559068SAndroid Build Coastguard Worker
296*76559068SAndroid Build Coastguard Worker // Time must be computed under the lock to ensure
297*76559068SAndroid Build Coastguard Worker // that the LRU cache remains sorted with respect to
298*76559068SAndroid Build Coastguard Worker // time in a multithreaded environment
299*76559068SAndroid Build Coastguard Worker Time = getMonotonicTimeFast();
300*76559068SAndroid Build Coastguard Worker if (Entry.Time != 0)
301*76559068SAndroid Build Coastguard Worker Entry.Time = Time;
302*76559068SAndroid Build Coastguard Worker
303*76559068SAndroid Build Coastguard Worker if (useMemoryTagging<Config>(Options) && QuarantinePos == -1U) {
304*76559068SAndroid Build Coastguard Worker // If we get here then memory tagging was disabled in between when we
305*76559068SAndroid Build Coastguard Worker // read Options and when we locked Mutex. We can't insert our entry into
306*76559068SAndroid Build Coastguard Worker // the quarantine or the cache because the permissions would be wrong so
307*76559068SAndroid Build Coastguard Worker // just unmap it.
308*76559068SAndroid Build Coastguard Worker unmapCallBack(Entry.MemMap);
309*76559068SAndroid Build Coastguard Worker break;
310*76559068SAndroid Build Coastguard Worker }
311*76559068SAndroid Build Coastguard Worker if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
312*76559068SAndroid Build Coastguard Worker QuarantinePos =
313*76559068SAndroid Build Coastguard Worker (QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u);
314*76559068SAndroid Build Coastguard Worker if (!Quarantine[QuarantinePos].isValid()) {
315*76559068SAndroid Build Coastguard Worker Quarantine[QuarantinePos] = Entry;
316*76559068SAndroid Build Coastguard Worker return;
317*76559068SAndroid Build Coastguard Worker }
318*76559068SAndroid Build Coastguard Worker CachedBlock PrevEntry = Quarantine[QuarantinePos];
319*76559068SAndroid Build Coastguard Worker Quarantine[QuarantinePos] = Entry;
320*76559068SAndroid Build Coastguard Worker if (OldestTime == 0)
321*76559068SAndroid Build Coastguard Worker OldestTime = Entry.Time;
322*76559068SAndroid Build Coastguard Worker Entry = PrevEntry;
323*76559068SAndroid Build Coastguard Worker }
324*76559068SAndroid Build Coastguard Worker
325*76559068SAndroid Build Coastguard Worker // All excess entries are evicted from the cache. Note that when
326*76559068SAndroid Build Coastguard Worker // `MaxEntriesCount` is zero, cache storing shouldn't happen and it's
327*76559068SAndroid Build Coastguard Worker // guarded by the `DCHECK(canCache(CommitSize))` above. As a result, we
328*76559068SAndroid Build Coastguard Worker // won't try to pop `LRUEntries` when it's empty.
329*76559068SAndroid Build Coastguard Worker while (LRUEntries.size() >= atomic_load_relaxed(&MaxEntriesCount)) {
330*76559068SAndroid Build Coastguard Worker // Save MemMaps of evicted entries to perform unmap outside of lock
331*76559068SAndroid Build Coastguard Worker CachedBlock *Entry = LRUEntries.back();
332*76559068SAndroid Build Coastguard Worker EvictionMemMaps.push_back(Entry->MemMap);
333*76559068SAndroid Build Coastguard Worker remove(Entry);
334*76559068SAndroid Build Coastguard Worker }
335*76559068SAndroid Build Coastguard Worker
336*76559068SAndroid Build Coastguard Worker insert(Entry);
337*76559068SAndroid Build Coastguard Worker
338*76559068SAndroid Build Coastguard Worker if (OldestTime == 0)
339*76559068SAndroid Build Coastguard Worker OldestTime = Entry.Time;
340*76559068SAndroid Build Coastguard Worker } while (0);
341*76559068SAndroid Build Coastguard Worker
342*76559068SAndroid Build Coastguard Worker for (MemMapT &EvictMemMap : EvictionMemMaps)
343*76559068SAndroid Build Coastguard Worker unmapCallBack(EvictMemMap);
344*76559068SAndroid Build Coastguard Worker
345*76559068SAndroid Build Coastguard Worker if (Interval >= 0) {
346*76559068SAndroid Build Coastguard Worker // TODO: Add ReleaseToOS logic to LRU algorithm
347*76559068SAndroid Build Coastguard Worker releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
348*76559068SAndroid Build Coastguard Worker }
349*76559068SAndroid Build Coastguard Worker }
350*76559068SAndroid Build Coastguard Worker
retrieve(uptr MaxAllowedFragmentedPages,uptr Size,uptr Alignment,uptr HeadersSize,uptr & EntryHeaderPos)351*76559068SAndroid Build Coastguard Worker CachedBlock retrieve(uptr MaxAllowedFragmentedPages, uptr Size,
352*76559068SAndroid Build Coastguard Worker uptr Alignment, uptr HeadersSize, uptr &EntryHeaderPos)
353*76559068SAndroid Build Coastguard Worker EXCLUDES(Mutex) {
354*76559068SAndroid Build Coastguard Worker const uptr PageSize = getPageSizeCached();
355*76559068SAndroid Build Coastguard Worker // 10% of the requested size proved to be the optimal choice for
356*76559068SAndroid Build Coastguard Worker // retrieving cached blocks after testing several options.
357*76559068SAndroid Build Coastguard Worker constexpr u32 FragmentedBytesDivisor = 10;
358*76559068SAndroid Build Coastguard Worker CachedBlock Entry;
359*76559068SAndroid Build Coastguard Worker EntryHeaderPos = 0;
360*76559068SAndroid Build Coastguard Worker {
361*76559068SAndroid Build Coastguard Worker ScopedLock L(Mutex);
362*76559068SAndroid Build Coastguard Worker CallsToRetrieve++;
363*76559068SAndroid Build Coastguard Worker if (LRUEntries.size() == 0)
364*76559068SAndroid Build Coastguard Worker return {};
365*76559068SAndroid Build Coastguard Worker CachedBlock *RetrievedEntry = nullptr;
366*76559068SAndroid Build Coastguard Worker uptr MinDiff = UINTPTR_MAX;
367*76559068SAndroid Build Coastguard Worker
368*76559068SAndroid Build Coastguard Worker // Since allocation sizes don't always match cached memory chunk sizes
369*76559068SAndroid Build Coastguard Worker // we allow some memory to be unused (called fragmented bytes). The
370*76559068SAndroid Build Coastguard Worker // amount of unused bytes is exactly EntryHeaderPos - CommitBase.
371*76559068SAndroid Build Coastguard Worker //
372*76559068SAndroid Build Coastguard Worker // CommitBase CommitBase + CommitSize
373*76559068SAndroid Build Coastguard Worker // V V
374*76559068SAndroid Build Coastguard Worker // +---+------------+-----------------+---+
375*76559068SAndroid Build Coastguard Worker // | | | | |
376*76559068SAndroid Build Coastguard Worker // +---+------------+-----------------+---+
377*76559068SAndroid Build Coastguard Worker // ^ ^ ^
378*76559068SAndroid Build Coastguard Worker // Guard EntryHeaderPos Guard-page-end
379*76559068SAndroid Build Coastguard Worker // page-begin
380*76559068SAndroid Build Coastguard Worker //
381*76559068SAndroid Build Coastguard Worker // [EntryHeaderPos, CommitBase + CommitSize) contains the user data as
382*76559068SAndroid Build Coastguard Worker // well as the header metadata. If EntryHeaderPos - CommitBase exceeds
383*76559068SAndroid Build Coastguard Worker // MaxAllowedFragmentedPages * PageSize, the cached memory chunk is
384*76559068SAndroid Build Coastguard Worker // not considered valid for retrieval.
385*76559068SAndroid Build Coastguard Worker for (CachedBlock &Entry : LRUEntries) {
386*76559068SAndroid Build Coastguard Worker const uptr CommitBase = Entry.CommitBase;
387*76559068SAndroid Build Coastguard Worker const uptr CommitSize = Entry.CommitSize;
388*76559068SAndroid Build Coastguard Worker const uptr AllocPos =
389*76559068SAndroid Build Coastguard Worker roundDown(CommitBase + CommitSize - Size, Alignment);
390*76559068SAndroid Build Coastguard Worker const uptr HeaderPos = AllocPos - HeadersSize;
391*76559068SAndroid Build Coastguard Worker const uptr MaxAllowedFragmentedBytes =
392*76559068SAndroid Build Coastguard Worker MaxAllowedFragmentedPages * PageSize;
393*76559068SAndroid Build Coastguard Worker if (HeaderPos > CommitBase + CommitSize)
394*76559068SAndroid Build Coastguard Worker continue;
395*76559068SAndroid Build Coastguard Worker // TODO: Remove AllocPos > CommitBase + MaxAllowedFragmentedBytes
396*76559068SAndroid Build Coastguard Worker // and replace with Diff > MaxAllowedFragmentedBytes
397*76559068SAndroid Build Coastguard Worker if (HeaderPos < CommitBase ||
398*76559068SAndroid Build Coastguard Worker AllocPos > CommitBase + MaxAllowedFragmentedBytes) {
399*76559068SAndroid Build Coastguard Worker continue;
400*76559068SAndroid Build Coastguard Worker }
401*76559068SAndroid Build Coastguard Worker
402*76559068SAndroid Build Coastguard Worker const uptr Diff = roundDown(HeaderPos, PageSize) - CommitBase;
403*76559068SAndroid Build Coastguard Worker
404*76559068SAndroid Build Coastguard Worker // Keep track of the smallest cached block
405*76559068SAndroid Build Coastguard Worker // that is greater than (AllocSize + HeaderSize)
406*76559068SAndroid Build Coastguard Worker if (Diff >= MinDiff)
407*76559068SAndroid Build Coastguard Worker continue;
408*76559068SAndroid Build Coastguard Worker
409*76559068SAndroid Build Coastguard Worker MinDiff = Diff;
410*76559068SAndroid Build Coastguard Worker RetrievedEntry = &Entry;
411*76559068SAndroid Build Coastguard Worker EntryHeaderPos = HeaderPos;
412*76559068SAndroid Build Coastguard Worker
413*76559068SAndroid Build Coastguard Worker // Immediately use a cached block if its size is close enough to the
414*76559068SAndroid Build Coastguard Worker // requested size
415*76559068SAndroid Build Coastguard Worker const uptr OptimalFitThesholdBytes =
416*76559068SAndroid Build Coastguard Worker (CommitBase + CommitSize - HeaderPos) / FragmentedBytesDivisor;
417*76559068SAndroid Build Coastguard Worker if (Diff <= OptimalFitThesholdBytes)
418*76559068SAndroid Build Coastguard Worker break;
419*76559068SAndroid Build Coastguard Worker }
420*76559068SAndroid Build Coastguard Worker
421*76559068SAndroid Build Coastguard Worker if (RetrievedEntry != nullptr) {
422*76559068SAndroid Build Coastguard Worker Entry = *RetrievedEntry;
423*76559068SAndroid Build Coastguard Worker remove(RetrievedEntry);
424*76559068SAndroid Build Coastguard Worker SuccessfulRetrieves++;
425*76559068SAndroid Build Coastguard Worker }
426*76559068SAndroid Build Coastguard Worker }
427*76559068SAndroid Build Coastguard Worker
428*76559068SAndroid Build Coastguard Worker // The difference between the retrieved memory chunk and the request
429*76559068SAndroid Build Coastguard Worker // size is at most MaxAllowedFragmentedPages
430*76559068SAndroid Build Coastguard Worker //
431*76559068SAndroid Build Coastguard Worker // +- MaxAllowedFragmentedPages * PageSize -+
432*76559068SAndroid Build Coastguard Worker // +--------------------------+-------------+
433*76559068SAndroid Build Coastguard Worker // | | |
434*76559068SAndroid Build Coastguard Worker // +--------------------------+-------------+
435*76559068SAndroid Build Coastguard Worker // \ Bytes to be released / ^
436*76559068SAndroid Build Coastguard Worker // |
437*76559068SAndroid Build Coastguard Worker // (may or may not be committed)
438*76559068SAndroid Build Coastguard Worker //
439*76559068SAndroid Build Coastguard Worker // The maximum number of bytes released to the OS is capped by
440*76559068SAndroid Build Coastguard Worker // MaxReleasedCachePages
441*76559068SAndroid Build Coastguard Worker //
442*76559068SAndroid Build Coastguard Worker // TODO : Consider making MaxReleasedCachePages configurable since
443*76559068SAndroid Build Coastguard Worker // the release to OS API can vary across systems.
444*76559068SAndroid Build Coastguard Worker if (Entry.Time != 0) {
445*76559068SAndroid Build Coastguard Worker const uptr FragmentedBytes =
446*76559068SAndroid Build Coastguard Worker roundDown(EntryHeaderPos, PageSize) - Entry.CommitBase;
447*76559068SAndroid Build Coastguard Worker const uptr MaxUnreleasedCacheBytes = MaxUnreleasedCachePages * PageSize;
448*76559068SAndroid Build Coastguard Worker if (FragmentedBytes > MaxUnreleasedCacheBytes) {
449*76559068SAndroid Build Coastguard Worker const uptr MaxReleasedCacheBytes =
450*76559068SAndroid Build Coastguard Worker CachedBlock::MaxReleasedCachePages * PageSize;
451*76559068SAndroid Build Coastguard Worker uptr BytesToRelease =
452*76559068SAndroid Build Coastguard Worker roundUp(Min<uptr>(MaxReleasedCacheBytes,
453*76559068SAndroid Build Coastguard Worker FragmentedBytes - MaxUnreleasedCacheBytes),
454*76559068SAndroid Build Coastguard Worker PageSize);
455*76559068SAndroid Build Coastguard Worker Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, BytesToRelease);
456*76559068SAndroid Build Coastguard Worker }
457*76559068SAndroid Build Coastguard Worker }
458*76559068SAndroid Build Coastguard Worker
459*76559068SAndroid Build Coastguard Worker return Entry;
460*76559068SAndroid Build Coastguard Worker }
461*76559068SAndroid Build Coastguard Worker
canCache(uptr Size)462*76559068SAndroid Build Coastguard Worker bool canCache(uptr Size) {
463*76559068SAndroid Build Coastguard Worker return atomic_load_relaxed(&MaxEntriesCount) != 0U &&
464*76559068SAndroid Build Coastguard Worker Size <= atomic_load_relaxed(&MaxEntrySize);
465*76559068SAndroid Build Coastguard Worker }
466*76559068SAndroid Build Coastguard Worker
setOption(Option O,sptr Value)467*76559068SAndroid Build Coastguard Worker bool setOption(Option O, sptr Value) {
468*76559068SAndroid Build Coastguard Worker if (O == Option::ReleaseInterval) {
469*76559068SAndroid Build Coastguard Worker const s32 Interval = Max(
470*76559068SAndroid Build Coastguard Worker Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
471*76559068SAndroid Build Coastguard Worker Config::getMinReleaseToOsIntervalMs());
472*76559068SAndroid Build Coastguard Worker atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
473*76559068SAndroid Build Coastguard Worker return true;
474*76559068SAndroid Build Coastguard Worker }
475*76559068SAndroid Build Coastguard Worker if (O == Option::MaxCacheEntriesCount) {
476*76559068SAndroid Build Coastguard Worker if (Value < 0)
477*76559068SAndroid Build Coastguard Worker return false;
478*76559068SAndroid Build Coastguard Worker atomic_store_relaxed(
479*76559068SAndroid Build Coastguard Worker &MaxEntriesCount,
480*76559068SAndroid Build Coastguard Worker Min<u32>(static_cast<u32>(Value), Config::getEntriesArraySize()));
481*76559068SAndroid Build Coastguard Worker return true;
482*76559068SAndroid Build Coastguard Worker }
483*76559068SAndroid Build Coastguard Worker if (O == Option::MaxCacheEntrySize) {
484*76559068SAndroid Build Coastguard Worker atomic_store_relaxed(&MaxEntrySize, static_cast<uptr>(Value));
485*76559068SAndroid Build Coastguard Worker return true;
486*76559068SAndroid Build Coastguard Worker }
487*76559068SAndroid Build Coastguard Worker // Not supported by the Secondary Cache, but not an error either.
488*76559068SAndroid Build Coastguard Worker return true;
489*76559068SAndroid Build Coastguard Worker }
490*76559068SAndroid Build Coastguard Worker
releaseToOS()491*76559068SAndroid Build Coastguard Worker void releaseToOS() { releaseOlderThan(UINT64_MAX); }
492*76559068SAndroid Build Coastguard Worker
disableMemoryTagging()493*76559068SAndroid Build Coastguard Worker void disableMemoryTagging() EXCLUDES(Mutex) {
494*76559068SAndroid Build Coastguard Worker ScopedLock L(Mutex);
495*76559068SAndroid Build Coastguard Worker for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
496*76559068SAndroid Build Coastguard Worker if (Quarantine[I].isValid()) {
497*76559068SAndroid Build Coastguard Worker MemMapT &MemMap = Quarantine[I].MemMap;
498*76559068SAndroid Build Coastguard Worker unmapCallBack(MemMap);
499*76559068SAndroid Build Coastguard Worker Quarantine[I].invalidate();
500*76559068SAndroid Build Coastguard Worker }
501*76559068SAndroid Build Coastguard Worker }
502*76559068SAndroid Build Coastguard Worker for (CachedBlock &Entry : LRUEntries)
503*76559068SAndroid Build Coastguard Worker Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
504*76559068SAndroid Build Coastguard Worker QuarantinePos = -1U;
505*76559068SAndroid Build Coastguard Worker }
506*76559068SAndroid Build Coastguard Worker
disable()507*76559068SAndroid Build Coastguard Worker void disable() NO_THREAD_SAFETY_ANALYSIS { Mutex.lock(); }
508*76559068SAndroid Build Coastguard Worker
enable()509*76559068SAndroid Build Coastguard Worker void enable() NO_THREAD_SAFETY_ANALYSIS { Mutex.unlock(); }
510*76559068SAndroid Build Coastguard Worker
unmapTestOnly()511*76559068SAndroid Build Coastguard Worker void unmapTestOnly() { empty(); }
512*76559068SAndroid Build Coastguard Worker
513*76559068SAndroid Build Coastguard Worker private:
insert(const CachedBlock & Entry)514*76559068SAndroid Build Coastguard Worker void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
515*76559068SAndroid Build Coastguard Worker CachedBlock *AvailEntry = AvailEntries.front();
516*76559068SAndroid Build Coastguard Worker AvailEntries.pop_front();
517*76559068SAndroid Build Coastguard Worker
518*76559068SAndroid Build Coastguard Worker *AvailEntry = Entry;
519*76559068SAndroid Build Coastguard Worker LRUEntries.push_front(AvailEntry);
520*76559068SAndroid Build Coastguard Worker }
521*76559068SAndroid Build Coastguard Worker
remove(CachedBlock * Entry)522*76559068SAndroid Build Coastguard Worker void remove(CachedBlock *Entry) REQUIRES(Mutex) {
523*76559068SAndroid Build Coastguard Worker DCHECK(Entry->isValid());
524*76559068SAndroid Build Coastguard Worker LRUEntries.remove(Entry);
525*76559068SAndroid Build Coastguard Worker Entry->invalidate();
526*76559068SAndroid Build Coastguard Worker AvailEntries.push_front(Entry);
527*76559068SAndroid Build Coastguard Worker }
528*76559068SAndroid Build Coastguard Worker
empty()529*76559068SAndroid Build Coastguard Worker void empty() {
530*76559068SAndroid Build Coastguard Worker MemMapT MapInfo[Config::getEntriesArraySize()];
531*76559068SAndroid Build Coastguard Worker uptr N = 0;
532*76559068SAndroid Build Coastguard Worker {
533*76559068SAndroid Build Coastguard Worker ScopedLock L(Mutex);
534*76559068SAndroid Build Coastguard Worker
535*76559068SAndroid Build Coastguard Worker for (CachedBlock &Entry : LRUEntries)
536*76559068SAndroid Build Coastguard Worker MapInfo[N++] = Entry.MemMap;
537*76559068SAndroid Build Coastguard Worker LRUEntries.clear();
538*76559068SAndroid Build Coastguard Worker }
539*76559068SAndroid Build Coastguard Worker for (uptr I = 0; I < N; I++) {
540*76559068SAndroid Build Coastguard Worker MemMapT &MemMap = MapInfo[I];
541*76559068SAndroid Build Coastguard Worker unmapCallBack(MemMap);
542*76559068SAndroid Build Coastguard Worker }
543*76559068SAndroid Build Coastguard Worker }
544*76559068SAndroid Build Coastguard Worker
releaseIfOlderThan(CachedBlock & Entry,u64 Time)545*76559068SAndroid Build Coastguard Worker void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
546*76559068SAndroid Build Coastguard Worker if (!Entry.isValid() || !Entry.Time)
547*76559068SAndroid Build Coastguard Worker return;
548*76559068SAndroid Build Coastguard Worker if (Entry.Time > Time) {
549*76559068SAndroid Build Coastguard Worker if (OldestTime == 0 || Entry.Time < OldestTime)
550*76559068SAndroid Build Coastguard Worker OldestTime = Entry.Time;
551*76559068SAndroid Build Coastguard Worker return;
552*76559068SAndroid Build Coastguard Worker }
553*76559068SAndroid Build Coastguard Worker Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
554*76559068SAndroid Build Coastguard Worker Entry.Time = 0;
555*76559068SAndroid Build Coastguard Worker }
556*76559068SAndroid Build Coastguard Worker
releaseOlderThan(u64 Time)557*76559068SAndroid Build Coastguard Worker void releaseOlderThan(u64 Time) EXCLUDES(Mutex) {
558*76559068SAndroid Build Coastguard Worker ScopedLock L(Mutex);
559*76559068SAndroid Build Coastguard Worker if (!LRUEntries.size() || OldestTime == 0 || OldestTime > Time)
560*76559068SAndroid Build Coastguard Worker return;
561*76559068SAndroid Build Coastguard Worker OldestTime = 0;
562*76559068SAndroid Build Coastguard Worker for (uptr I = 0; I < Config::getQuarantineSize(); I++)
563*76559068SAndroid Build Coastguard Worker releaseIfOlderThan(Quarantine[I], Time);
564*76559068SAndroid Build Coastguard Worker for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
565*76559068SAndroid Build Coastguard Worker releaseIfOlderThan(Entries[I], Time);
566*76559068SAndroid Build Coastguard Worker }
567*76559068SAndroid Build Coastguard Worker
568*76559068SAndroid Build Coastguard Worker HybridMutex Mutex;
569*76559068SAndroid Build Coastguard Worker u32 QuarantinePos GUARDED_BY(Mutex) = 0;
570*76559068SAndroid Build Coastguard Worker atomic_u32 MaxEntriesCount = {};
571*76559068SAndroid Build Coastguard Worker atomic_uptr MaxEntrySize = {};
572*76559068SAndroid Build Coastguard Worker u64 OldestTime GUARDED_BY(Mutex) = 0;
573*76559068SAndroid Build Coastguard Worker atomic_s32 ReleaseToOsIntervalMs = {};
574*76559068SAndroid Build Coastguard Worker u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
575*76559068SAndroid Build Coastguard Worker u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
576*76559068SAndroid Build Coastguard Worker
577*76559068SAndroid Build Coastguard Worker CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
578*76559068SAndroid Build Coastguard Worker NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
579*76559068SAndroid Build Coastguard Worker Quarantine GUARDED_BY(Mutex) = {};
580*76559068SAndroid Build Coastguard Worker
581*76559068SAndroid Build Coastguard Worker // Cached blocks stored in LRU order
582*76559068SAndroid Build Coastguard Worker DoublyLinkedList<CachedBlock> LRUEntries GUARDED_BY(Mutex);
583*76559068SAndroid Build Coastguard Worker // The unused Entries
584*76559068SAndroid Build Coastguard Worker SinglyLinkedList<CachedBlock> AvailEntries GUARDED_BY(Mutex);
585*76559068SAndroid Build Coastguard Worker };
586*76559068SAndroid Build Coastguard Worker
587*76559068SAndroid Build Coastguard Worker template <typename Config> class MapAllocator {
588*76559068SAndroid Build Coastguard Worker public:
589*76559068SAndroid Build Coastguard Worker void init(GlobalStats *S,
590*76559068SAndroid Build Coastguard Worker s32 ReleaseToOsInterval = -1) NO_THREAD_SAFETY_ANALYSIS {
591*76559068SAndroid Build Coastguard Worker DCHECK_EQ(AllocatedBytes, 0U);
592*76559068SAndroid Build Coastguard Worker DCHECK_EQ(FreedBytes, 0U);
593*76559068SAndroid Build Coastguard Worker Cache.init(ReleaseToOsInterval);
594*76559068SAndroid Build Coastguard Worker Stats.init();
595*76559068SAndroid Build Coastguard Worker if (LIKELY(S))
596*76559068SAndroid Build Coastguard Worker S->link(&Stats);
597*76559068SAndroid Build Coastguard Worker }
598*76559068SAndroid Build Coastguard Worker
599*76559068SAndroid Build Coastguard Worker void *allocate(const Options &Options, uptr Size, uptr AlignmentHint = 0,
600*76559068SAndroid Build Coastguard Worker uptr *BlockEnd = nullptr,
601*76559068SAndroid Build Coastguard Worker FillContentsMode FillContents = NoFill);
602*76559068SAndroid Build Coastguard Worker
603*76559068SAndroid Build Coastguard Worker void deallocate(const Options &Options, void *Ptr);
604*76559068SAndroid Build Coastguard Worker
605*76559068SAndroid Build Coastguard Worker void *tryAllocateFromCache(const Options &Options, uptr Size, uptr Alignment,
606*76559068SAndroid Build Coastguard Worker uptr *BlockEndPtr, FillContentsMode FillContents);
607*76559068SAndroid Build Coastguard Worker
getBlockEnd(void * Ptr)608*76559068SAndroid Build Coastguard Worker static uptr getBlockEnd(void *Ptr) {
609*76559068SAndroid Build Coastguard Worker auto *B = LargeBlock::getHeader<Config>(Ptr);
610*76559068SAndroid Build Coastguard Worker return B->CommitBase + B->CommitSize;
611*76559068SAndroid Build Coastguard Worker }
612*76559068SAndroid Build Coastguard Worker
getBlockSize(void * Ptr)613*76559068SAndroid Build Coastguard Worker static uptr getBlockSize(void *Ptr) {
614*76559068SAndroid Build Coastguard Worker return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
615*76559068SAndroid Build Coastguard Worker }
616*76559068SAndroid Build Coastguard Worker
getHeadersSize()617*76559068SAndroid Build Coastguard Worker static constexpr uptr getHeadersSize() {
618*76559068SAndroid Build Coastguard Worker return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
619*76559068SAndroid Build Coastguard Worker }
620*76559068SAndroid Build Coastguard Worker
disable()621*76559068SAndroid Build Coastguard Worker void disable() NO_THREAD_SAFETY_ANALYSIS {
622*76559068SAndroid Build Coastguard Worker Mutex.lock();
623*76559068SAndroid Build Coastguard Worker Cache.disable();
624*76559068SAndroid Build Coastguard Worker }
625*76559068SAndroid Build Coastguard Worker
enable()626*76559068SAndroid Build Coastguard Worker void enable() NO_THREAD_SAFETY_ANALYSIS {
627*76559068SAndroid Build Coastguard Worker Cache.enable();
628*76559068SAndroid Build Coastguard Worker Mutex.unlock();
629*76559068SAndroid Build Coastguard Worker }
630*76559068SAndroid Build Coastguard Worker
iterateOverBlocks(F Callback)631*76559068SAndroid Build Coastguard Worker template <typename F> void iterateOverBlocks(F Callback) const {
632*76559068SAndroid Build Coastguard Worker Mutex.assertHeld();
633*76559068SAndroid Build Coastguard Worker
634*76559068SAndroid Build Coastguard Worker for (const auto &H : InUseBlocks) {
635*76559068SAndroid Build Coastguard Worker uptr Ptr = reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize();
636*76559068SAndroid Build Coastguard Worker if (allocatorSupportsMemoryTagging<Config>())
637*76559068SAndroid Build Coastguard Worker Ptr = untagPointer(Ptr);
638*76559068SAndroid Build Coastguard Worker Callback(Ptr);
639*76559068SAndroid Build Coastguard Worker }
640*76559068SAndroid Build Coastguard Worker }
641*76559068SAndroid Build Coastguard Worker
canCache(uptr Size)642*76559068SAndroid Build Coastguard Worker bool canCache(uptr Size) { return Cache.canCache(Size); }
643*76559068SAndroid Build Coastguard Worker
setOption(Option O,sptr Value)644*76559068SAndroid Build Coastguard Worker bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
645*76559068SAndroid Build Coastguard Worker
releaseToOS()646*76559068SAndroid Build Coastguard Worker void releaseToOS() { Cache.releaseToOS(); }
647*76559068SAndroid Build Coastguard Worker
disableMemoryTagging()648*76559068SAndroid Build Coastguard Worker void disableMemoryTagging() { Cache.disableMemoryTagging(); }
649*76559068SAndroid Build Coastguard Worker
unmapTestOnly()650*76559068SAndroid Build Coastguard Worker void unmapTestOnly() { Cache.unmapTestOnly(); }
651*76559068SAndroid Build Coastguard Worker
652*76559068SAndroid Build Coastguard Worker void getStats(ScopedString *Str);
653*76559068SAndroid Build Coastguard Worker
654*76559068SAndroid Build Coastguard Worker private:
655*76559068SAndroid Build Coastguard Worker typename Config::template CacheT<typename Config::CacheConfig> Cache;
656*76559068SAndroid Build Coastguard Worker
657*76559068SAndroid Build Coastguard Worker mutable HybridMutex Mutex;
658*76559068SAndroid Build Coastguard Worker DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
659*76559068SAndroid Build Coastguard Worker uptr AllocatedBytes GUARDED_BY(Mutex) = 0;
660*76559068SAndroid Build Coastguard Worker uptr FreedBytes GUARDED_BY(Mutex) = 0;
661*76559068SAndroid Build Coastguard Worker uptr FragmentedBytes GUARDED_BY(Mutex) = 0;
662*76559068SAndroid Build Coastguard Worker uptr LargestSize GUARDED_BY(Mutex) = 0;
663*76559068SAndroid Build Coastguard Worker u32 NumberOfAllocs GUARDED_BY(Mutex) = 0;
664*76559068SAndroid Build Coastguard Worker u32 NumberOfFrees GUARDED_BY(Mutex) = 0;
665*76559068SAndroid Build Coastguard Worker LocalStats Stats GUARDED_BY(Mutex);
666*76559068SAndroid Build Coastguard Worker };
667*76559068SAndroid Build Coastguard Worker
668*76559068SAndroid Build Coastguard Worker template <typename Config>
669*76559068SAndroid Build Coastguard Worker void *
tryAllocateFromCache(const Options & Options,uptr Size,uptr Alignment,uptr * BlockEndPtr,FillContentsMode FillContents)670*76559068SAndroid Build Coastguard Worker MapAllocator<Config>::tryAllocateFromCache(const Options &Options, uptr Size,
671*76559068SAndroid Build Coastguard Worker uptr Alignment, uptr *BlockEndPtr,
672*76559068SAndroid Build Coastguard Worker FillContentsMode FillContents) {
673*76559068SAndroid Build Coastguard Worker CachedBlock Entry;
674*76559068SAndroid Build Coastguard Worker uptr EntryHeaderPos;
675*76559068SAndroid Build Coastguard Worker uptr MaxAllowedFragmentedPages = MaxUnreleasedCachePages;
676*76559068SAndroid Build Coastguard Worker
677*76559068SAndroid Build Coastguard Worker if (LIKELY(!useMemoryTagging<Config>(Options))) {
678*76559068SAndroid Build Coastguard Worker MaxAllowedFragmentedPages += CachedBlock::MaxReleasedCachePages;
679*76559068SAndroid Build Coastguard Worker } else {
680*76559068SAndroid Build Coastguard Worker // TODO: Enable MaxReleasedCachePages may result in pages for an entry being
681*76559068SAndroid Build Coastguard Worker // partially released and it erases the tag of those pages as well. To
682*76559068SAndroid Build Coastguard Worker // support this feature for MTE, we need to tag those pages again.
683*76559068SAndroid Build Coastguard Worker DCHECK_EQ(MaxAllowedFragmentedPages, MaxUnreleasedCachePages);
684*76559068SAndroid Build Coastguard Worker }
685*76559068SAndroid Build Coastguard Worker
686*76559068SAndroid Build Coastguard Worker Entry = Cache.retrieve(MaxAllowedFragmentedPages, Size, Alignment,
687*76559068SAndroid Build Coastguard Worker getHeadersSize(), EntryHeaderPos);
688*76559068SAndroid Build Coastguard Worker if (!Entry.isValid())
689*76559068SAndroid Build Coastguard Worker return nullptr;
690*76559068SAndroid Build Coastguard Worker
691*76559068SAndroid Build Coastguard Worker LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
692*76559068SAndroid Build Coastguard Worker LargeBlock::addHeaderTag<Config>(EntryHeaderPos));
693*76559068SAndroid Build Coastguard Worker bool Zeroed = Entry.Time == 0;
694*76559068SAndroid Build Coastguard Worker if (useMemoryTagging<Config>(Options)) {
695*76559068SAndroid Build Coastguard Worker uptr NewBlockBegin = reinterpret_cast<uptr>(H + 1);
696*76559068SAndroid Build Coastguard Worker Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
697*76559068SAndroid Build Coastguard Worker if (Zeroed) {
698*76559068SAndroid Build Coastguard Worker storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
699*76559068SAndroid Build Coastguard Worker NewBlockBegin);
700*76559068SAndroid Build Coastguard Worker } else if (Entry.BlockBegin < NewBlockBegin) {
701*76559068SAndroid Build Coastguard Worker storeTags(Entry.BlockBegin, NewBlockBegin);
702*76559068SAndroid Build Coastguard Worker } else {
703*76559068SAndroid Build Coastguard Worker storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
704*76559068SAndroid Build Coastguard Worker }
705*76559068SAndroid Build Coastguard Worker }
706*76559068SAndroid Build Coastguard Worker
707*76559068SAndroid Build Coastguard Worker H->CommitBase = Entry.CommitBase;
708*76559068SAndroid Build Coastguard Worker H->CommitSize = Entry.CommitSize;
709*76559068SAndroid Build Coastguard Worker H->MemMap = Entry.MemMap;
710*76559068SAndroid Build Coastguard Worker
711*76559068SAndroid Build Coastguard Worker const uptr BlockEnd = H->CommitBase + H->CommitSize;
712*76559068SAndroid Build Coastguard Worker if (BlockEndPtr)
713*76559068SAndroid Build Coastguard Worker *BlockEndPtr = BlockEnd;
714*76559068SAndroid Build Coastguard Worker uptr HInt = reinterpret_cast<uptr>(H);
715*76559068SAndroid Build Coastguard Worker if (allocatorSupportsMemoryTagging<Config>())
716*76559068SAndroid Build Coastguard Worker HInt = untagPointer(HInt);
717*76559068SAndroid Build Coastguard Worker const uptr PtrInt = HInt + LargeBlock::getHeaderSize();
718*76559068SAndroid Build Coastguard Worker void *Ptr = reinterpret_cast<void *>(PtrInt);
719*76559068SAndroid Build Coastguard Worker if (FillContents && !Zeroed)
720*76559068SAndroid Build Coastguard Worker memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
721*76559068SAndroid Build Coastguard Worker BlockEnd - PtrInt);
722*76559068SAndroid Build Coastguard Worker {
723*76559068SAndroid Build Coastguard Worker ScopedLock L(Mutex);
724*76559068SAndroid Build Coastguard Worker InUseBlocks.push_back(H);
725*76559068SAndroid Build Coastguard Worker AllocatedBytes += H->CommitSize;
726*76559068SAndroid Build Coastguard Worker FragmentedBytes += H->MemMap.getCapacity() - H->CommitSize;
727*76559068SAndroid Build Coastguard Worker NumberOfAllocs++;
728*76559068SAndroid Build Coastguard Worker Stats.add(StatAllocated, H->CommitSize);
729*76559068SAndroid Build Coastguard Worker Stats.add(StatMapped, H->MemMap.getCapacity());
730*76559068SAndroid Build Coastguard Worker }
731*76559068SAndroid Build Coastguard Worker return Ptr;
732*76559068SAndroid Build Coastguard Worker }
733*76559068SAndroid Build Coastguard Worker // As with the Primary, the size passed to this function includes any desired
734*76559068SAndroid Build Coastguard Worker // alignment, so that the frontend can align the user allocation. The hint
735*76559068SAndroid Build Coastguard Worker // parameter allows us to unmap spurious memory when dealing with larger
736*76559068SAndroid Build Coastguard Worker // (greater than a page) alignments on 32-bit platforms.
737*76559068SAndroid Build Coastguard Worker // Due to the sparsity of address space available on those platforms, requesting
738*76559068SAndroid Build Coastguard Worker // an allocation from the Secondary with a large alignment would end up wasting
739*76559068SAndroid Build Coastguard Worker // VA space (even though we are not committing the whole thing), hence the need
740*76559068SAndroid Build Coastguard Worker // to trim off some of the reserved space.
741*76559068SAndroid Build Coastguard Worker // For allocations requested with an alignment greater than or equal to a page,
742*76559068SAndroid Build Coastguard Worker // the committed memory will amount to something close to Size - AlignmentHint
743*76559068SAndroid Build Coastguard Worker // (pending rounding and headers).
744*76559068SAndroid Build Coastguard Worker template <typename Config>
allocate(const Options & Options,uptr Size,uptr Alignment,uptr * BlockEndPtr,FillContentsMode FillContents)745*76559068SAndroid Build Coastguard Worker void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
746*76559068SAndroid Build Coastguard Worker uptr Alignment, uptr *BlockEndPtr,
747*76559068SAndroid Build Coastguard Worker FillContentsMode FillContents) {
748*76559068SAndroid Build Coastguard Worker if (Options.get(OptionBit::AddLargeAllocationSlack))
749*76559068SAndroid Build Coastguard Worker Size += 1UL << SCUDO_MIN_ALIGNMENT_LOG;
750*76559068SAndroid Build Coastguard Worker Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG);
751*76559068SAndroid Build Coastguard Worker const uptr PageSize = getPageSizeCached();
752*76559068SAndroid Build Coastguard Worker
753*76559068SAndroid Build Coastguard Worker // Note that cached blocks may have aligned address already. Thus we simply
754*76559068SAndroid Build Coastguard Worker // pass the required size (`Size` + `getHeadersSize()`) to do cache look up.
755*76559068SAndroid Build Coastguard Worker const uptr MinNeededSizeForCache = roundUp(Size + getHeadersSize(), PageSize);
756*76559068SAndroid Build Coastguard Worker
757*76559068SAndroid Build Coastguard Worker if (Alignment < PageSize && Cache.canCache(MinNeededSizeForCache)) {
758*76559068SAndroid Build Coastguard Worker void *Ptr = tryAllocateFromCache(Options, Size, Alignment, BlockEndPtr,
759*76559068SAndroid Build Coastguard Worker FillContents);
760*76559068SAndroid Build Coastguard Worker if (Ptr != nullptr)
761*76559068SAndroid Build Coastguard Worker return Ptr;
762*76559068SAndroid Build Coastguard Worker }
763*76559068SAndroid Build Coastguard Worker
764*76559068SAndroid Build Coastguard Worker uptr RoundedSize =
765*76559068SAndroid Build Coastguard Worker roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
766*76559068SAndroid Build Coastguard Worker if (Alignment > PageSize)
767*76559068SAndroid Build Coastguard Worker RoundedSize += Alignment - PageSize;
768*76559068SAndroid Build Coastguard Worker
769*76559068SAndroid Build Coastguard Worker ReservedMemoryT ReservedMemory;
770*76559068SAndroid Build Coastguard Worker const uptr MapSize = RoundedSize + 2 * PageSize;
771*76559068SAndroid Build Coastguard Worker if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
772*76559068SAndroid Build Coastguard Worker MAP_ALLOWNOMEM))) {
773*76559068SAndroid Build Coastguard Worker return nullptr;
774*76559068SAndroid Build Coastguard Worker }
775*76559068SAndroid Build Coastguard Worker
776*76559068SAndroid Build Coastguard Worker // Take the entire ownership of reserved region.
777*76559068SAndroid Build Coastguard Worker MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
778*76559068SAndroid Build Coastguard Worker ReservedMemory.getCapacity());
779*76559068SAndroid Build Coastguard Worker uptr MapBase = MemMap.getBase();
780*76559068SAndroid Build Coastguard Worker uptr CommitBase = MapBase + PageSize;
781*76559068SAndroid Build Coastguard Worker uptr MapEnd = MapBase + MapSize;
782*76559068SAndroid Build Coastguard Worker
783*76559068SAndroid Build Coastguard Worker // In the unlikely event of alignments larger than a page, adjust the amount
784*76559068SAndroid Build Coastguard Worker // of memory we want to commit, and trim the extra memory.
785*76559068SAndroid Build Coastguard Worker if (UNLIKELY(Alignment >= PageSize)) {
786*76559068SAndroid Build Coastguard Worker // For alignments greater than or equal to a page, the user pointer (eg:
787*76559068SAndroid Build Coastguard Worker // the pointer that is returned by the C or C++ allocation APIs) ends up
788*76559068SAndroid Build Coastguard Worker // on a page boundary , and our headers will live in the preceding page.
789*76559068SAndroid Build Coastguard Worker CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
790*76559068SAndroid Build Coastguard Worker const uptr NewMapBase = CommitBase - PageSize;
791*76559068SAndroid Build Coastguard Worker DCHECK_GE(NewMapBase, MapBase);
792*76559068SAndroid Build Coastguard Worker // We only trim the extra memory on 32-bit platforms: 64-bit platforms
793*76559068SAndroid Build Coastguard Worker // are less constrained memory wise, and that saves us two syscalls.
794*76559068SAndroid Build Coastguard Worker if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
795*76559068SAndroid Build Coastguard Worker MemMap.unmap(MapBase, NewMapBase - MapBase);
796*76559068SAndroid Build Coastguard Worker MapBase = NewMapBase;
797*76559068SAndroid Build Coastguard Worker }
798*76559068SAndroid Build Coastguard Worker const uptr NewMapEnd =
799*76559068SAndroid Build Coastguard Worker CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
800*76559068SAndroid Build Coastguard Worker DCHECK_LE(NewMapEnd, MapEnd);
801*76559068SAndroid Build Coastguard Worker if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
802*76559068SAndroid Build Coastguard Worker MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
803*76559068SAndroid Build Coastguard Worker MapEnd = NewMapEnd;
804*76559068SAndroid Build Coastguard Worker }
805*76559068SAndroid Build Coastguard Worker }
806*76559068SAndroid Build Coastguard Worker
807*76559068SAndroid Build Coastguard Worker const uptr CommitSize = MapEnd - PageSize - CommitBase;
808*76559068SAndroid Build Coastguard Worker const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
809*76559068SAndroid Build Coastguard Worker if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
810*76559068SAndroid Build Coastguard Worker MemMap)) {
811*76559068SAndroid Build Coastguard Worker unmap(MemMap);
812*76559068SAndroid Build Coastguard Worker return nullptr;
813*76559068SAndroid Build Coastguard Worker }
814*76559068SAndroid Build Coastguard Worker const uptr HeaderPos = AllocPos - getHeadersSize();
815*76559068SAndroid Build Coastguard Worker LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
816*76559068SAndroid Build Coastguard Worker LargeBlock::addHeaderTag<Config>(HeaderPos));
817*76559068SAndroid Build Coastguard Worker if (useMemoryTagging<Config>(Options))
818*76559068SAndroid Build Coastguard Worker storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
819*76559068SAndroid Build Coastguard Worker reinterpret_cast<uptr>(H + 1));
820*76559068SAndroid Build Coastguard Worker H->CommitBase = CommitBase;
821*76559068SAndroid Build Coastguard Worker H->CommitSize = CommitSize;
822*76559068SAndroid Build Coastguard Worker H->MemMap = MemMap;
823*76559068SAndroid Build Coastguard Worker if (BlockEndPtr)
824*76559068SAndroid Build Coastguard Worker *BlockEndPtr = CommitBase + CommitSize;
825*76559068SAndroid Build Coastguard Worker {
826*76559068SAndroid Build Coastguard Worker ScopedLock L(Mutex);
827*76559068SAndroid Build Coastguard Worker InUseBlocks.push_back(H);
828*76559068SAndroid Build Coastguard Worker AllocatedBytes += CommitSize;
829*76559068SAndroid Build Coastguard Worker FragmentedBytes += H->MemMap.getCapacity() - CommitSize;
830*76559068SAndroid Build Coastguard Worker if (LargestSize < CommitSize)
831*76559068SAndroid Build Coastguard Worker LargestSize = CommitSize;
832*76559068SAndroid Build Coastguard Worker NumberOfAllocs++;
833*76559068SAndroid Build Coastguard Worker Stats.add(StatAllocated, CommitSize);
834*76559068SAndroid Build Coastguard Worker Stats.add(StatMapped, H->MemMap.getCapacity());
835*76559068SAndroid Build Coastguard Worker }
836*76559068SAndroid Build Coastguard Worker return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
837*76559068SAndroid Build Coastguard Worker }
838*76559068SAndroid Build Coastguard Worker
839*76559068SAndroid Build Coastguard Worker template <typename Config>
deallocate(const Options & Options,void * Ptr)840*76559068SAndroid Build Coastguard Worker void MapAllocator<Config>::deallocate(const Options &Options, void *Ptr)
841*76559068SAndroid Build Coastguard Worker EXCLUDES(Mutex) {
842*76559068SAndroid Build Coastguard Worker LargeBlock::Header *H = LargeBlock::getHeader<Config>(Ptr);
843*76559068SAndroid Build Coastguard Worker const uptr CommitSize = H->CommitSize;
844*76559068SAndroid Build Coastguard Worker {
845*76559068SAndroid Build Coastguard Worker ScopedLock L(Mutex);
846*76559068SAndroid Build Coastguard Worker InUseBlocks.remove(H);
847*76559068SAndroid Build Coastguard Worker FreedBytes += CommitSize;
848*76559068SAndroid Build Coastguard Worker FragmentedBytes -= H->MemMap.getCapacity() - CommitSize;
849*76559068SAndroid Build Coastguard Worker NumberOfFrees++;
850*76559068SAndroid Build Coastguard Worker Stats.sub(StatAllocated, CommitSize);
851*76559068SAndroid Build Coastguard Worker Stats.sub(StatMapped, H->MemMap.getCapacity());
852*76559068SAndroid Build Coastguard Worker }
853*76559068SAndroid Build Coastguard Worker
854*76559068SAndroid Build Coastguard Worker if (Cache.canCache(H->CommitSize)) {
855*76559068SAndroid Build Coastguard Worker Cache.store(Options, H->CommitBase, H->CommitSize,
856*76559068SAndroid Build Coastguard Worker reinterpret_cast<uptr>(H + 1), H->MemMap);
857*76559068SAndroid Build Coastguard Worker } else {
858*76559068SAndroid Build Coastguard Worker // Note that the `H->MemMap` is stored on the pages managed by itself. Take
859*76559068SAndroid Build Coastguard Worker // over the ownership before unmap() so that any operation along with
860*76559068SAndroid Build Coastguard Worker // unmap() won't touch inaccessible pages.
861*76559068SAndroid Build Coastguard Worker MemMapT MemMap = H->MemMap;
862*76559068SAndroid Build Coastguard Worker unmap(MemMap);
863*76559068SAndroid Build Coastguard Worker }
864*76559068SAndroid Build Coastguard Worker }
865*76559068SAndroid Build Coastguard Worker
866*76559068SAndroid Build Coastguard Worker template <typename Config>
getStats(ScopedString * Str)867*76559068SAndroid Build Coastguard Worker void MapAllocator<Config>::getStats(ScopedString *Str) EXCLUDES(Mutex) {
868*76559068SAndroid Build Coastguard Worker ScopedLock L(Mutex);
869*76559068SAndroid Build Coastguard Worker Str->append("Stats: MapAllocator: allocated %u times (%zuK), freed %u times "
870*76559068SAndroid Build Coastguard Worker "(%zuK), remains %u (%zuK) max %zuM, Fragmented %zuK\n",
871*76559068SAndroid Build Coastguard Worker NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
872*76559068SAndroid Build Coastguard Worker FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
873*76559068SAndroid Build Coastguard Worker (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20,
874*76559068SAndroid Build Coastguard Worker FragmentedBytes >> 10);
875*76559068SAndroid Build Coastguard Worker Cache.getStats(Str);
876*76559068SAndroid Build Coastguard Worker }
877*76559068SAndroid Build Coastguard Worker
878*76559068SAndroid Build Coastguard Worker } // namespace scudo
879*76559068SAndroid Build Coastguard Worker
880*76559068SAndroid Build Coastguard Worker #endif // SCUDO_SECONDARY_H_
881