1// Copyright 2019 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Page allocator.
6//
7// The page allocator manages mapped pages (defined by pageSize, NOT
8// physPageSize) for allocation and re-use. It is embedded into mheap.
9//
10// Pages are managed using a bitmap that is sharded into chunks.
11// In the bitmap, 1 means in-use, and 0 means free. The bitmap spans the
12// process's address space. Chunks are managed in a sparse-array-style structure
13// similar to mheap.arenas, since the bitmap may be large on some systems.
14//
15// The bitmap is efficiently searched by using a radix tree in combination
16// with fast bit-wise intrinsics. Allocation is performed using an address-ordered
17// first-fit approach.
18//
19// Each entry in the radix tree is a summary that describes three properties of
20// a particular region of the address space: the number of contiguous free pages
21// at the start and end of the region it represents, and the maximum number of
22// contiguous free pages found anywhere in that region.
23//
24// Each level of the radix tree is stored as one contiguous array, which represents
25// a different granularity of subdivision of the processes' address space. Thus, this
26// radix tree is actually implicit in these large arrays, as opposed to having explicit
27// dynamically-allocated pointer-based node structures. Naturally, these arrays may be
28// quite large for system with large address spaces, so in these cases they are mapped
29// into memory as needed. The leaf summaries of the tree correspond to a bitmap chunk.
30//
31// The root level (referred to as L0 and index 0 in pageAlloc.summary) has each
32// summary represent the largest section of address space (16 GiB on 64-bit systems),
33// with each subsequent level representing successively smaller subsections until we
34// reach the finest granularity at the leaves, a chunk.
35//
36// More specifically, each summary in each level (except for leaf summaries)
37// represents some number of entries in the following level. For example, each
38// summary in the root level may represent a 16 GiB region of address space,
39// and in the next level there could be 8 corresponding entries which represent 2
40// GiB subsections of that 16 GiB region, each of which could correspond to 8
41// entries in the next level which each represent 256 MiB regions, and so on.
42//
43// Thus, this design only scales to heaps so large, but can always be extended to
44// larger heaps by simply adding levels to the radix tree, which mostly costs
45// additional virtual address space. The choice of managing large arrays also means
46// that a large amount of virtual address space may be reserved by the runtime.
47
48package runtime
49
50import (
51	"internal/runtime/atomic"
52	"unsafe"
53)
54
55const (
56	// The size of a bitmap chunk, i.e. the amount of bits (that is, pages) to consider
57	// in the bitmap at once.
58	pallocChunkPages    = 1 << logPallocChunkPages
59	pallocChunkBytes    = pallocChunkPages * pageSize
60	logPallocChunkPages = 9
61	logPallocChunkBytes = logPallocChunkPages + pageShift
62
63	// The number of radix bits for each level.
64	//
65	// The value of 3 is chosen such that the block of summaries we need to scan at
66	// each level fits in 64 bytes (2^3 summaries * 8 bytes per summary), which is
67	// close to the L1 cache line width on many systems. Also, a value of 3 fits 4 tree
68	// levels perfectly into the 21-bit pallocBits summary field at the root level.
69	//
70	// The following equation explains how each of the constants relate:
71	// summaryL0Bits + (summaryLevels-1)*summaryLevelBits + logPallocChunkBytes = heapAddrBits
72	//
73	// summaryLevels is an architecture-dependent value defined in mpagealloc_*.go.
74	summaryLevelBits = 3
75	summaryL0Bits    = heapAddrBits - logPallocChunkBytes - (summaryLevels-1)*summaryLevelBits
76
77	// pallocChunksL2Bits is the number of bits of the chunk index number
78	// covered by the second level of the chunks map.
79	//
80	// See (*pageAlloc).chunks for more details. Update the documentation
81	// there should this change.
82	pallocChunksL2Bits  = heapAddrBits - logPallocChunkBytes - pallocChunksL1Bits
83	pallocChunksL1Shift = pallocChunksL2Bits
84)
85
86// maxSearchAddr returns the maximum searchAddr value, which indicates
87// that the heap has no free space.
88//
89// This function exists just to make it clear that this is the maximum address
90// for the page allocator's search space. See maxOffAddr for details.
91//
92// It's a function (rather than a variable) because it needs to be
93// usable before package runtime's dynamic initialization is complete.
94// See #51913 for details.
95func maxSearchAddr() offAddr { return maxOffAddr }
96
97// Global chunk index.
98//
99// Represents an index into the leaf level of the radix tree.
100// Similar to arenaIndex, except instead of arenas, it divides the address
101// space into chunks.
102type chunkIdx uint
103
104// chunkIndex returns the global index of the palloc chunk containing the
105// pointer p.
106func chunkIndex(p uintptr) chunkIdx {
107	return chunkIdx((p - arenaBaseOffset) / pallocChunkBytes)
108}
109
110// chunkBase returns the base address of the palloc chunk at index ci.
111func chunkBase(ci chunkIdx) uintptr {
112	return uintptr(ci)*pallocChunkBytes + arenaBaseOffset
113}
114
115// chunkPageIndex computes the index of the page that contains p,
116// relative to the chunk which contains p.
117func chunkPageIndex(p uintptr) uint {
118	return uint(p % pallocChunkBytes / pageSize)
119}
120
121// l1 returns the index into the first level of (*pageAlloc).chunks.
122func (i chunkIdx) l1() uint {
123	if pallocChunksL1Bits == 0 {
124		// Let the compiler optimize this away if there's no
125		// L1 map.
126		return 0
127	} else {
128		return uint(i) >> pallocChunksL1Shift
129	}
130}
131
132// l2 returns the index into the second level of (*pageAlloc).chunks.
133func (i chunkIdx) l2() uint {
134	if pallocChunksL1Bits == 0 {
135		return uint(i)
136	} else {
137		return uint(i) & (1<<pallocChunksL2Bits - 1)
138	}
139}
140
141// offAddrToLevelIndex converts an address in the offset address space
142// to the index into summary[level] containing addr.
143func offAddrToLevelIndex(level int, addr offAddr) int {
144	return int((addr.a - arenaBaseOffset) >> levelShift[level])
145}
146
147// levelIndexToOffAddr converts an index into summary[level] into
148// the corresponding address in the offset address space.
149func levelIndexToOffAddr(level, idx int) offAddr {
150	return offAddr{(uintptr(idx) << levelShift[level]) + arenaBaseOffset}
151}
152
153// addrsToSummaryRange converts base and limit pointers into a range
154// of entries for the given summary level.
155//
156// The returned range is inclusive on the lower bound and exclusive on
157// the upper bound.
158func addrsToSummaryRange(level int, base, limit uintptr) (lo int, hi int) {
159	// This is slightly more nuanced than just a shift for the exclusive
160	// upper-bound. Note that the exclusive upper bound may be within a
161	// summary at this level, meaning if we just do the obvious computation
162	// hi will end up being an inclusive upper bound. Unfortunately, just
163	// adding 1 to that is too broad since we might be on the very edge
164	// of a summary's max page count boundary for this level
165	// (1 << levelLogPages[level]). So, make limit an inclusive upper bound
166	// then shift, then add 1, so we get an exclusive upper bound at the end.
167	lo = int((base - arenaBaseOffset) >> levelShift[level])
168	hi = int(((limit-1)-arenaBaseOffset)>>levelShift[level]) + 1
169	return
170}
171
172// blockAlignSummaryRange aligns indices into the given level to that
173// level's block width (1 << levelBits[level]). It assumes lo is inclusive
174// and hi is exclusive, and so aligns them down and up respectively.
175func blockAlignSummaryRange(level int, lo, hi int) (int, int) {
176	e := uintptr(1) << levelBits[level]
177	return int(alignDown(uintptr(lo), e)), int(alignUp(uintptr(hi), e))
178}
179
180type pageAlloc struct {
181	// Radix tree of summaries.
182	//
183	// Each slice's cap represents the whole memory reservation.
184	// Each slice's len reflects the allocator's maximum known
185	// mapped heap address for that level.
186	//
187	// The backing store of each summary level is reserved in init
188	// and may or may not be committed in grow (small address spaces
189	// may commit all the memory in init).
190	//
191	// The purpose of keeping len <= cap is to enforce bounds checks
192	// on the top end of the slice so that instead of an unknown
193	// runtime segmentation fault, we get a much friendlier out-of-bounds
194	// error.
195	//
196	// To iterate over a summary level, use inUse to determine which ranges
197	// are currently available. Otherwise one might try to access
198	// memory which is only Reserved which may result in a hard fault.
199	//
200	// We may still get segmentation faults < len since some of that
201	// memory may not be committed yet.
202	summary [summaryLevels][]pallocSum
203
204	// chunks is a slice of bitmap chunks.
205	//
206	// The total size of chunks is quite large on most 64-bit platforms
207	// (O(GiB) or more) if flattened, so rather than making one large mapping
208	// (which has problems on some platforms, even when PROT_NONE) we use a
209	// two-level sparse array approach similar to the arena index in mheap.
210	//
211	// To find the chunk containing a memory address `a`, do:
212	//   chunkOf(chunkIndex(a))
213	//
214	// Below is a table describing the configuration for chunks for various
215	// heapAddrBits supported by the runtime.
216	//
217	// heapAddrBits | L1 Bits | L2 Bits | L2 Entry Size
218	// ------------------------------------------------
219	// 32           | 0       | 10      | 128 KiB
220	// 33 (iOS)     | 0       | 11      | 256 KiB
221	// 48           | 13      | 13      | 1 MiB
222	//
223	// There's no reason to use the L1 part of chunks on 32-bit, the
224	// address space is small so the L2 is small. For platforms with a
225	// 48-bit address space, we pick the L1 such that the L2 is 1 MiB
226	// in size, which is a good balance between low granularity without
227	// making the impact on BSS too high (note the L1 is stored directly
228	// in pageAlloc).
229	//
230	// To iterate over the bitmap, use inUse to determine which ranges
231	// are currently available. Otherwise one might iterate over unused
232	// ranges.
233	//
234	// Protected by mheapLock.
235	//
236	// TODO(mknyszek): Consider changing the definition of the bitmap
237	// such that 1 means free and 0 means in-use so that summaries and
238	// the bitmaps align better on zero-values.
239	chunks [1 << pallocChunksL1Bits]*[1 << pallocChunksL2Bits]pallocData
240
241	// The address to start an allocation search with. It must never
242	// point to any memory that is not contained in inUse, i.e.
243	// inUse.contains(searchAddr.addr()) must always be true. The one
244	// exception to this rule is that it may take on the value of
245	// maxOffAddr to indicate that the heap is exhausted.
246	//
247	// We guarantee that all valid heap addresses below this value
248	// are allocated and not worth searching.
249	searchAddr offAddr
250
251	// start and end represent the chunk indices
252	// which pageAlloc knows about. It assumes
253	// chunks in the range [start, end) are
254	// currently ready to use.
255	start, end chunkIdx
256
257	// inUse is a slice of ranges of address space which are
258	// known by the page allocator to be currently in-use (passed
259	// to grow).
260	//
261	// We care much more about having a contiguous heap in these cases
262	// and take additional measures to ensure that, so in nearly all
263	// cases this should have just 1 element.
264	//
265	// All access is protected by the mheapLock.
266	inUse addrRanges
267
268	// scav stores the scavenger state.
269	scav struct {
270		// index is an efficient index of chunks that have pages available to
271		// scavenge.
272		index scavengeIndex
273
274		// releasedBg is the amount of memory released in the background this
275		// scavenge cycle.
276		releasedBg atomic.Uintptr
277
278		// releasedEager is the amount of memory released eagerly this scavenge
279		// cycle.
280		releasedEager atomic.Uintptr
281	}
282
283	// mheap_.lock. This level of indirection makes it possible
284	// to test pageAlloc independently of the runtime allocator.
285	mheapLock *mutex
286
287	// sysStat is the runtime memstat to update when new system
288	// memory is committed by the pageAlloc for allocation metadata.
289	sysStat *sysMemStat
290
291	// summaryMappedReady is the number of bytes mapped in the Ready state
292	// in the summary structure. Used only for testing currently.
293	//
294	// Protected by mheapLock.
295	summaryMappedReady uintptr
296
297	// chunkHugePages indicates whether page bitmap chunks should be backed
298	// by huge pages.
299	chunkHugePages bool
300
301	// Whether or not this struct is being used in tests.
302	test bool
303}
304
305func (p *pageAlloc) init(mheapLock *mutex, sysStat *sysMemStat, test bool) {
306	if levelLogPages[0] > logMaxPackedValue {
307		// We can't represent 1<<levelLogPages[0] pages, the maximum number
308		// of pages we need to represent at the root level, in a summary, which
309		// is a big problem. Throw.
310		print("runtime: root level max pages = ", 1<<levelLogPages[0], "\n")
311		print("runtime: summary max pages = ", maxPackedValue, "\n")
312		throw("root level max pages doesn't fit in summary")
313	}
314	p.sysStat = sysStat
315
316	// Initialize p.inUse.
317	p.inUse.init(sysStat)
318
319	// System-dependent initialization.
320	p.sysInit(test)
321
322	// Start with the searchAddr in a state indicating there's no free memory.
323	p.searchAddr = maxSearchAddr()
324
325	// Set the mheapLock.
326	p.mheapLock = mheapLock
327
328	// Initialize the scavenge index.
329	p.summaryMappedReady += p.scav.index.init(test, sysStat)
330
331	// Set if we're in a test.
332	p.test = test
333}
334
335// tryChunkOf returns the bitmap data for the given chunk.
336//
337// Returns nil if the chunk data has not been mapped.
338func (p *pageAlloc) tryChunkOf(ci chunkIdx) *pallocData {
339	l2 := p.chunks[ci.l1()]
340	if l2 == nil {
341		return nil
342	}
343	return &l2[ci.l2()]
344}
345
346// chunkOf returns the chunk at the given chunk index.
347//
348// The chunk index must be valid or this method may throw.
349func (p *pageAlloc) chunkOf(ci chunkIdx) *pallocData {
350	return &p.chunks[ci.l1()][ci.l2()]
351}
352
353// grow sets up the metadata for the address range [base, base+size).
354// It may allocate metadata, in which case *p.sysStat will be updated.
355//
356// p.mheapLock must be held.
357func (p *pageAlloc) grow(base, size uintptr) {
358	assertLockHeld(p.mheapLock)
359
360	// Round up to chunks, since we can't deal with increments smaller
361	// than chunks. Also, sysGrow expects aligned values.
362	limit := alignUp(base+size, pallocChunkBytes)
363	base = alignDown(base, pallocChunkBytes)
364
365	// Grow the summary levels in a system-dependent manner.
366	// We just update a bunch of additional metadata here.
367	p.sysGrow(base, limit)
368
369	// Grow the scavenge index.
370	p.summaryMappedReady += p.scav.index.grow(base, limit, p.sysStat)
371
372	// Update p.start and p.end.
373	// If no growth happened yet, start == 0. This is generally
374	// safe since the zero page is unmapped.
375	firstGrowth := p.start == 0
376	start, end := chunkIndex(base), chunkIndex(limit)
377	if firstGrowth || start < p.start {
378		p.start = start
379	}
380	if end > p.end {
381		p.end = end
382	}
383	// Note that [base, limit) will never overlap with any existing
384	// range inUse because grow only ever adds never-used memory
385	// regions to the page allocator.
386	p.inUse.add(makeAddrRange(base, limit))
387
388	// A grow operation is a lot like a free operation, so if our
389	// chunk ends up below p.searchAddr, update p.searchAddr to the
390	// new address, just like in free.
391	if b := (offAddr{base}); b.lessThan(p.searchAddr) {
392		p.searchAddr = b
393	}
394
395	// Add entries into chunks, which is sparse, if needed. Then,
396	// initialize the bitmap.
397	//
398	// Newly-grown memory is always considered scavenged.
399	// Set all the bits in the scavenged bitmaps high.
400	for c := chunkIndex(base); c < chunkIndex(limit); c++ {
401		if p.chunks[c.l1()] == nil {
402			// Create the necessary l2 entry.
403			const l2Size = unsafe.Sizeof(*p.chunks[0])
404			r := sysAlloc(l2Size, p.sysStat)
405			if r == nil {
406				throw("pageAlloc: out of memory")
407			}
408			if !p.test {
409				// Make the chunk mapping eligible or ineligible
410				// for huge pages, depending on what our current
411				// state is.
412				if p.chunkHugePages {
413					sysHugePage(r, l2Size)
414				} else {
415					sysNoHugePage(r, l2Size)
416				}
417			}
418			// Store the new chunk block but avoid a write barrier.
419			// grow is used in call chains that disallow write barriers.
420			*(*uintptr)(unsafe.Pointer(&p.chunks[c.l1()])) = uintptr(r)
421		}
422		p.chunkOf(c).scavenged.setRange(0, pallocChunkPages)
423	}
424
425	// Update summaries accordingly. The grow acts like a free, so
426	// we need to ensure this newly-free memory is visible in the
427	// summaries.
428	p.update(base, size/pageSize, true, false)
429}
430
431// enableChunkHugePages enables huge pages for the chunk bitmap mappings (disabled by default).
432//
433// This function is idempotent.
434//
435// A note on latency: for sufficiently small heaps (<10s of GiB) this function will take constant
436// time, but may take time proportional to the size of the mapped heap beyond that.
437//
438// The heap lock must not be held over this operation, since it will briefly acquire
439// the heap lock.
440//
441// Must be called on the system stack because it acquires the heap lock.
442//
443//go:systemstack
444func (p *pageAlloc) enableChunkHugePages() {
445	// Grab the heap lock to turn on huge pages for new chunks and clone the current
446	// heap address space ranges.
447	//
448	// After the lock is released, we can be sure that bitmaps for any new chunks may
449	// be backed with huge pages, and we have the address space for the rest of the
450	// chunks. At the end of this function, all chunk metadata should be backed by huge
451	// pages.
452	lock(&mheap_.lock)
453	if p.chunkHugePages {
454		unlock(&mheap_.lock)
455		return
456	}
457	p.chunkHugePages = true
458	var inUse addrRanges
459	inUse.sysStat = p.sysStat
460	p.inUse.cloneInto(&inUse)
461	unlock(&mheap_.lock)
462
463	// This might seem like a lot of work, but all these loops are for generality.
464	//
465	// For a 1 GiB contiguous heap, a 48-bit address space, 13 L1 bits, a palloc chunk size
466	// of 4 MiB, and adherence to the default set of heap address hints, this will result in
467	// exactly 1 call to sysHugePage.
468	for _, r := range p.inUse.ranges {
469		for i := chunkIndex(r.base.addr()).l1(); i < chunkIndex(r.limit.addr()-1).l1(); i++ {
470			// N.B. We can assume that p.chunks[i] is non-nil and in a mapped part of p.chunks
471			// because it's derived from inUse, which never shrinks.
472			sysHugePage(unsafe.Pointer(p.chunks[i]), unsafe.Sizeof(*p.chunks[0]))
473		}
474	}
475}
476
477// update updates heap metadata. It must be called each time the bitmap
478// is updated.
479//
480// If contig is true, update does some optimizations assuming that there was
481// a contiguous allocation or free between addr and addr+npages. alloc indicates
482// whether the operation performed was an allocation or a free.
483//
484// p.mheapLock must be held.
485func (p *pageAlloc) update(base, npages uintptr, contig, alloc bool) {
486	assertLockHeld(p.mheapLock)
487
488	// base, limit, start, and end are inclusive.
489	limit := base + npages*pageSize - 1
490	sc, ec := chunkIndex(base), chunkIndex(limit)
491
492	// Handle updating the lowest level first.
493	if sc == ec {
494		// Fast path: the allocation doesn't span more than one chunk,
495		// so update this one and if the summary didn't change, return.
496		x := p.summary[len(p.summary)-1][sc]
497		y := p.chunkOf(sc).summarize()
498		if x == y {
499			return
500		}
501		p.summary[len(p.summary)-1][sc] = y
502	} else if contig {
503		// Slow contiguous path: the allocation spans more than one chunk
504		// and at least one summary is guaranteed to change.
505		summary := p.summary[len(p.summary)-1]
506
507		// Update the summary for chunk sc.
508		summary[sc] = p.chunkOf(sc).summarize()
509
510		// Update the summaries for chunks in between, which are
511		// either totally allocated or freed.
512		whole := p.summary[len(p.summary)-1][sc+1 : ec]
513		if alloc {
514			clear(whole)
515		} else {
516			for i := range whole {
517				whole[i] = freeChunkSum
518			}
519		}
520
521		// Update the summary for chunk ec.
522		summary[ec] = p.chunkOf(ec).summarize()
523	} else {
524		// Slow general path: the allocation spans more than one chunk
525		// and at least one summary is guaranteed to change.
526		//
527		// We can't assume a contiguous allocation happened, so walk over
528		// every chunk in the range and manually recompute the summary.
529		summary := p.summary[len(p.summary)-1]
530		for c := sc; c <= ec; c++ {
531			summary[c] = p.chunkOf(c).summarize()
532		}
533	}
534
535	// Walk up the radix tree and update the summaries appropriately.
536	changed := true
537	for l := len(p.summary) - 2; l >= 0 && changed; l-- {
538		// Update summaries at level l from summaries at level l+1.
539		changed = false
540
541		// "Constants" for the previous level which we
542		// need to compute the summary from that level.
543		logEntriesPerBlock := levelBits[l+1]
544		logMaxPages := levelLogPages[l+1]
545
546		// lo and hi describe all the parts of the level we need to look at.
547		lo, hi := addrsToSummaryRange(l, base, limit+1)
548
549		// Iterate over each block, updating the corresponding summary in the less-granular level.
550		for i := lo; i < hi; i++ {
551			children := p.summary[l+1][i<<logEntriesPerBlock : (i+1)<<logEntriesPerBlock]
552			sum := mergeSummaries(children, logMaxPages)
553			old := p.summary[l][i]
554			if old != sum {
555				changed = true
556				p.summary[l][i] = sum
557			}
558		}
559	}
560}
561
562// allocRange marks the range of memory [base, base+npages*pageSize) as
563// allocated. It also updates the summaries to reflect the newly-updated
564// bitmap.
565//
566// Returns the amount of scavenged memory in bytes present in the
567// allocated range.
568//
569// p.mheapLock must be held.
570func (p *pageAlloc) allocRange(base, npages uintptr) uintptr {
571	assertLockHeld(p.mheapLock)
572
573	limit := base + npages*pageSize - 1
574	sc, ec := chunkIndex(base), chunkIndex(limit)
575	si, ei := chunkPageIndex(base), chunkPageIndex(limit)
576
577	scav := uint(0)
578	if sc == ec {
579		// The range doesn't cross any chunk boundaries.
580		chunk := p.chunkOf(sc)
581		scav += chunk.scavenged.popcntRange(si, ei+1-si)
582		chunk.allocRange(si, ei+1-si)
583		p.scav.index.alloc(sc, ei+1-si)
584	} else {
585		// The range crosses at least one chunk boundary.
586		chunk := p.chunkOf(sc)
587		scav += chunk.scavenged.popcntRange(si, pallocChunkPages-si)
588		chunk.allocRange(si, pallocChunkPages-si)
589		p.scav.index.alloc(sc, pallocChunkPages-si)
590		for c := sc + 1; c < ec; c++ {
591			chunk := p.chunkOf(c)
592			scav += chunk.scavenged.popcntRange(0, pallocChunkPages)
593			chunk.allocAll()
594			p.scav.index.alloc(c, pallocChunkPages)
595		}
596		chunk = p.chunkOf(ec)
597		scav += chunk.scavenged.popcntRange(0, ei+1)
598		chunk.allocRange(0, ei+1)
599		p.scav.index.alloc(ec, ei+1)
600	}
601	p.update(base, npages, true, true)
602	return uintptr(scav) * pageSize
603}
604
605// findMappedAddr returns the smallest mapped offAddr that is
606// >= addr. That is, if addr refers to mapped memory, then it is
607// returned. If addr is higher than any mapped region, then
608// it returns maxOffAddr.
609//
610// p.mheapLock must be held.
611func (p *pageAlloc) findMappedAddr(addr offAddr) offAddr {
612	assertLockHeld(p.mheapLock)
613
614	// If we're not in a test, validate first by checking mheap_.arenas.
615	// This is a fast path which is only safe to use outside of testing.
616	ai := arenaIndex(addr.addr())
617	if p.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil {
618		vAddr, ok := p.inUse.findAddrGreaterEqual(addr.addr())
619		if ok {
620			return offAddr{vAddr}
621		} else {
622			// The candidate search address is greater than any
623			// known address, which means we definitely have no
624			// free memory left.
625			return maxOffAddr
626		}
627	}
628	return addr
629}
630
631// find searches for the first (address-ordered) contiguous free region of
632// npages in size and returns a base address for that region.
633//
634// It uses p.searchAddr to prune its search and assumes that no palloc chunks
635// below chunkIndex(p.searchAddr) contain any free memory at all.
636//
637// find also computes and returns a candidate p.searchAddr, which may or
638// may not prune more of the address space than p.searchAddr already does.
639// This candidate is always a valid p.searchAddr.
640//
641// find represents the slow path and the full radix tree search.
642//
643// Returns a base address of 0 on failure, in which case the candidate
644// searchAddr returned is invalid and must be ignored.
645//
646// p.mheapLock must be held.
647func (p *pageAlloc) find(npages uintptr) (uintptr, offAddr) {
648	assertLockHeld(p.mheapLock)
649
650	// Search algorithm.
651	//
652	// This algorithm walks each level l of the radix tree from the root level
653	// to the leaf level. It iterates over at most 1 << levelBits[l] of entries
654	// in a given level in the radix tree, and uses the summary information to
655	// find either:
656	//  1) That a given subtree contains a large enough contiguous region, at
657	//     which point it continues iterating on the next level, or
658	//  2) That there are enough contiguous boundary-crossing bits to satisfy
659	//     the allocation, at which point it knows exactly where to start
660	//     allocating from.
661	//
662	// i tracks the index into the current level l's structure for the
663	// contiguous 1 << levelBits[l] entries we're actually interested in.
664	//
665	// NOTE: Technically this search could allocate a region which crosses
666	// the arenaBaseOffset boundary, which when arenaBaseOffset != 0, is
667	// a discontinuity. However, the only way this could happen is if the
668	// page at the zero address is mapped, and this is impossible on
669	// every system we support where arenaBaseOffset != 0. So, the
670	// discontinuity is already encoded in the fact that the OS will never
671	// map the zero page for us, and this function doesn't try to handle
672	// this case in any way.
673
674	// i is the beginning of the block of entries we're searching at the
675	// current level.
676	i := 0
677
678	// firstFree is the region of address space that we are certain to
679	// find the first free page in the heap. base and bound are the inclusive
680	// bounds of this window, and both are addresses in the linearized, contiguous
681	// view of the address space (with arenaBaseOffset pre-added). At each level,
682	// this window is narrowed as we find the memory region containing the
683	// first free page of memory. To begin with, the range reflects the
684	// full process address space.
685	//
686	// firstFree is updated by calling foundFree each time free space in the
687	// heap is discovered.
688	//
689	// At the end of the search, base.addr() is the best new
690	// searchAddr we could deduce in this search.
691	firstFree := struct {
692		base, bound offAddr
693	}{
694		base:  minOffAddr,
695		bound: maxOffAddr,
696	}
697	// foundFree takes the given address range [addr, addr+size) and
698	// updates firstFree if it is a narrower range. The input range must
699	// either be fully contained within firstFree or not overlap with it
700	// at all.
701	//
702	// This way, we'll record the first summary we find with any free
703	// pages on the root level and narrow that down if we descend into
704	// that summary. But as soon as we need to iterate beyond that summary
705	// in a level to find a large enough range, we'll stop narrowing.
706	foundFree := func(addr offAddr, size uintptr) {
707		if firstFree.base.lessEqual(addr) && addr.add(size-1).lessEqual(firstFree.bound) {
708			// This range fits within the current firstFree window, so narrow
709			// down the firstFree window to the base and bound of this range.
710			firstFree.base = addr
711			firstFree.bound = addr.add(size - 1)
712		} else if !(addr.add(size-1).lessThan(firstFree.base) || firstFree.bound.lessThan(addr)) {
713			// This range only partially overlaps with the firstFree range,
714			// so throw.
715			print("runtime: addr = ", hex(addr.addr()), ", size = ", size, "\n")
716			print("runtime: base = ", hex(firstFree.base.addr()), ", bound = ", hex(firstFree.bound.addr()), "\n")
717			throw("range partially overlaps")
718		}
719	}
720
721	// lastSum is the summary which we saw on the previous level that made us
722	// move on to the next level. Used to print additional information in the
723	// case of a catastrophic failure.
724	// lastSumIdx is that summary's index in the previous level.
725	lastSum := packPallocSum(0, 0, 0)
726	lastSumIdx := -1
727
728nextLevel:
729	for l := 0; l < len(p.summary); l++ {
730		// For the root level, entriesPerBlock is the whole level.
731		entriesPerBlock := 1 << levelBits[l]
732		logMaxPages := levelLogPages[l]
733
734		// We've moved into a new level, so let's update i to our new
735		// starting index. This is a no-op for level 0.
736		i <<= levelBits[l]
737
738		// Slice out the block of entries we care about.
739		entries := p.summary[l][i : i+entriesPerBlock]
740
741		// Determine j0, the first index we should start iterating from.
742		// The searchAddr may help us eliminate iterations if we followed the
743		// searchAddr on the previous level or we're on the root level, in which
744		// case the searchAddr should be the same as i after levelShift.
745		j0 := 0
746		if searchIdx := offAddrToLevelIndex(l, p.searchAddr); searchIdx&^(entriesPerBlock-1) == i {
747			j0 = searchIdx & (entriesPerBlock - 1)
748		}
749
750		// Run over the level entries looking for
751		// a contiguous run of at least npages either
752		// within an entry or across entries.
753		//
754		// base contains the page index (relative to
755		// the first entry's first page) of the currently
756		// considered run of consecutive pages.
757		//
758		// size contains the size of the currently considered
759		// run of consecutive pages.
760		var base, size uint
761		for j := j0; j < len(entries); j++ {
762			sum := entries[j]
763			if sum == 0 {
764				// A full entry means we broke any streak and
765				// that we should skip it altogether.
766				size = 0
767				continue
768			}
769
770			// We've encountered a non-zero summary which means
771			// free memory, so update firstFree.
772			foundFree(levelIndexToOffAddr(l, i+j), (uintptr(1)<<logMaxPages)*pageSize)
773
774			s := sum.start()
775			if size+s >= uint(npages) {
776				// If size == 0 we don't have a run yet,
777				// which means base isn't valid. So, set
778				// base to the first page in this block.
779				if size == 0 {
780					base = uint(j) << logMaxPages
781				}
782				// We hit npages; we're done!
783				size += s
784				break
785			}
786			if sum.max() >= uint(npages) {
787				// The entry itself contains npages contiguous
788				// free pages, so continue on the next level
789				// to find that run.
790				i += j
791				lastSumIdx = i
792				lastSum = sum
793				continue nextLevel
794			}
795			if size == 0 || s < 1<<logMaxPages {
796				// We either don't have a current run started, or this entry
797				// isn't totally free (meaning we can't continue the current
798				// one), so try to begin a new run by setting size and base
799				// based on sum.end.
800				size = sum.end()
801				base = uint(j+1)<<logMaxPages - size
802				continue
803			}
804			// The entry is completely free, so continue the run.
805			size += 1 << logMaxPages
806		}
807		if size >= uint(npages) {
808			// We found a sufficiently large run of free pages straddling
809			// some boundary, so compute the address and return it.
810			addr := levelIndexToOffAddr(l, i).add(uintptr(base) * pageSize).addr()
811			return addr, p.findMappedAddr(firstFree.base)
812		}
813		if l == 0 {
814			// We're at level zero, so that means we've exhausted our search.
815			return 0, maxSearchAddr()
816		}
817
818		// We're not at level zero, and we exhausted the level we were looking in.
819		// This means that either our calculations were wrong or the level above
820		// lied to us. In either case, dump some useful state and throw.
821		print("runtime: summary[", l-1, "][", lastSumIdx, "] = ", lastSum.start(), ", ", lastSum.max(), ", ", lastSum.end(), "\n")
822		print("runtime: level = ", l, ", npages = ", npages, ", j0 = ", j0, "\n")
823		print("runtime: p.searchAddr = ", hex(p.searchAddr.addr()), ", i = ", i, "\n")
824		print("runtime: levelShift[level] = ", levelShift[l], ", levelBits[level] = ", levelBits[l], "\n")
825		for j := 0; j < len(entries); j++ {
826			sum := entries[j]
827			print("runtime: summary[", l, "][", i+j, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n")
828		}
829		throw("bad summary data")
830	}
831
832	// Since we've gotten to this point, that means we haven't found a
833	// sufficiently-sized free region straddling some boundary (chunk or larger).
834	// This means the last summary we inspected must have had a large enough "max"
835	// value, so look inside the chunk to find a suitable run.
836	//
837	// After iterating over all levels, i must contain a chunk index which
838	// is what the final level represents.
839	ci := chunkIdx(i)
840	j, searchIdx := p.chunkOf(ci).find(npages, 0)
841	if j == ^uint(0) {
842		// We couldn't find any space in this chunk despite the summaries telling
843		// us it should be there. There's likely a bug, so dump some state and throw.
844		sum := p.summary[len(p.summary)-1][i]
845		print("runtime: summary[", len(p.summary)-1, "][", i, "] = (", sum.start(), ", ", sum.max(), ", ", sum.end(), ")\n")
846		print("runtime: npages = ", npages, "\n")
847		throw("bad summary data")
848	}
849
850	// Compute the address at which the free space starts.
851	addr := chunkBase(ci) + uintptr(j)*pageSize
852
853	// Since we actually searched the chunk, we may have
854	// found an even narrower free window.
855	searchAddr := chunkBase(ci) + uintptr(searchIdx)*pageSize
856	foundFree(offAddr{searchAddr}, chunkBase(ci+1)-searchAddr)
857	return addr, p.findMappedAddr(firstFree.base)
858}
859
860// alloc allocates npages worth of memory from the page heap, returning the base
861// address for the allocation and the amount of scavenged memory in bytes
862// contained in the region [base address, base address + npages*pageSize).
863//
864// Returns a 0 base address on failure, in which case other returned values
865// should be ignored.
866//
867// p.mheapLock must be held.
868//
869// Must run on the system stack because p.mheapLock must be held.
870//
871//go:systemstack
872func (p *pageAlloc) alloc(npages uintptr) (addr uintptr, scav uintptr) {
873	assertLockHeld(p.mheapLock)
874
875	// If the searchAddr refers to a region which has a higher address than
876	// any known chunk, then we know we're out of memory.
877	if chunkIndex(p.searchAddr.addr()) >= p.end {
878		return 0, 0
879	}
880
881	// If npages has a chance of fitting in the chunk where the searchAddr is,
882	// search it directly.
883	searchAddr := minOffAddr
884	if pallocChunkPages-chunkPageIndex(p.searchAddr.addr()) >= uint(npages) {
885		// npages is guaranteed to be no greater than pallocChunkPages here.
886		i := chunkIndex(p.searchAddr.addr())
887		if max := p.summary[len(p.summary)-1][i].max(); max >= uint(npages) {
888			j, searchIdx := p.chunkOf(i).find(npages, chunkPageIndex(p.searchAddr.addr()))
889			if j == ^uint(0) {
890				print("runtime: max = ", max, ", npages = ", npages, "\n")
891				print("runtime: searchIdx = ", chunkPageIndex(p.searchAddr.addr()), ", p.searchAddr = ", hex(p.searchAddr.addr()), "\n")
892				throw("bad summary data")
893			}
894			addr = chunkBase(i) + uintptr(j)*pageSize
895			searchAddr = offAddr{chunkBase(i) + uintptr(searchIdx)*pageSize}
896			goto Found
897		}
898	}
899	// We failed to use a searchAddr for one reason or another, so try
900	// the slow path.
901	addr, searchAddr = p.find(npages)
902	if addr == 0 {
903		if npages == 1 {
904			// We failed to find a single free page, the smallest unit
905			// of allocation. This means we know the heap is completely
906			// exhausted. Otherwise, the heap still might have free
907			// space in it, just not enough contiguous space to
908			// accommodate npages.
909			p.searchAddr = maxSearchAddr()
910		}
911		return 0, 0
912	}
913Found:
914	// Go ahead and actually mark the bits now that we have an address.
915	scav = p.allocRange(addr, npages)
916
917	// If we found a higher searchAddr, we know that all the
918	// heap memory before that searchAddr in an offset address space is
919	// allocated, so bump p.searchAddr up to the new one.
920	if p.searchAddr.lessThan(searchAddr) {
921		p.searchAddr = searchAddr
922	}
923	return addr, scav
924}
925
926// free returns npages worth of memory starting at base back to the page heap.
927//
928// p.mheapLock must be held.
929//
930// Must run on the system stack because p.mheapLock must be held.
931//
932//go:systemstack
933func (p *pageAlloc) free(base, npages uintptr) {
934	assertLockHeld(p.mheapLock)
935
936	// If we're freeing pages below the p.searchAddr, update searchAddr.
937	if b := (offAddr{base}); b.lessThan(p.searchAddr) {
938		p.searchAddr = b
939	}
940	limit := base + npages*pageSize - 1
941	if npages == 1 {
942		// Fast path: we're clearing a single bit, and we know exactly
943		// where it is, so mark it directly.
944		i := chunkIndex(base)
945		pi := chunkPageIndex(base)
946		p.chunkOf(i).free1(pi)
947		p.scav.index.free(i, pi, 1)
948	} else {
949		// Slow path: we're clearing more bits so we may need to iterate.
950		sc, ec := chunkIndex(base), chunkIndex(limit)
951		si, ei := chunkPageIndex(base), chunkPageIndex(limit)
952
953		if sc == ec {
954			// The range doesn't cross any chunk boundaries.
955			p.chunkOf(sc).free(si, ei+1-si)
956			p.scav.index.free(sc, si, ei+1-si)
957		} else {
958			// The range crosses at least one chunk boundary.
959			p.chunkOf(sc).free(si, pallocChunkPages-si)
960			p.scav.index.free(sc, si, pallocChunkPages-si)
961			for c := sc + 1; c < ec; c++ {
962				p.chunkOf(c).freeAll()
963				p.scav.index.free(c, 0, pallocChunkPages)
964			}
965			p.chunkOf(ec).free(0, ei+1)
966			p.scav.index.free(ec, 0, ei+1)
967		}
968	}
969	p.update(base, npages, true, false)
970}
971
972const (
973	pallocSumBytes = unsafe.Sizeof(pallocSum(0))
974
975	// maxPackedValue is the maximum value that any of the three fields in
976	// the pallocSum may take on.
977	maxPackedValue    = 1 << logMaxPackedValue
978	logMaxPackedValue = logPallocChunkPages + (summaryLevels-1)*summaryLevelBits
979
980	freeChunkSum = pallocSum(uint64(pallocChunkPages) |
981		uint64(pallocChunkPages<<logMaxPackedValue) |
982		uint64(pallocChunkPages<<(2*logMaxPackedValue)))
983)
984
985// pallocSum is a packed summary type which packs three numbers: start, max,
986// and end into a single 8-byte value. Each of these values are a summary of
987// a bitmap and are thus counts, each of which may have a maximum value of
988// 2^21 - 1, or all three may be equal to 2^21. The latter case is represented
989// by just setting the 64th bit.
990type pallocSum uint64
991
992// packPallocSum takes a start, max, and end value and produces a pallocSum.
993func packPallocSum(start, max, end uint) pallocSum {
994	if max == maxPackedValue {
995		return pallocSum(uint64(1 << 63))
996	}
997	return pallocSum((uint64(start) & (maxPackedValue - 1)) |
998		((uint64(max) & (maxPackedValue - 1)) << logMaxPackedValue) |
999		((uint64(end) & (maxPackedValue - 1)) << (2 * logMaxPackedValue)))
1000}
1001
1002// start extracts the start value from a packed sum.
1003func (p pallocSum) start() uint {
1004	if uint64(p)&uint64(1<<63) != 0 {
1005		return maxPackedValue
1006	}
1007	return uint(uint64(p) & (maxPackedValue - 1))
1008}
1009
1010// max extracts the max value from a packed sum.
1011func (p pallocSum) max() uint {
1012	if uint64(p)&uint64(1<<63) != 0 {
1013		return maxPackedValue
1014	}
1015	return uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1))
1016}
1017
1018// end extracts the end value from a packed sum.
1019func (p pallocSum) end() uint {
1020	if uint64(p)&uint64(1<<63) != 0 {
1021		return maxPackedValue
1022	}
1023	return uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1))
1024}
1025
1026// unpack unpacks all three values from the summary.
1027func (p pallocSum) unpack() (uint, uint, uint) {
1028	if uint64(p)&uint64(1<<63) != 0 {
1029		return maxPackedValue, maxPackedValue, maxPackedValue
1030	}
1031	return uint(uint64(p) & (maxPackedValue - 1)),
1032		uint((uint64(p) >> logMaxPackedValue) & (maxPackedValue - 1)),
1033		uint((uint64(p) >> (2 * logMaxPackedValue)) & (maxPackedValue - 1))
1034}
1035
1036// mergeSummaries merges consecutive summaries which may each represent at
1037// most 1 << logMaxPagesPerSum pages each together into one.
1038func mergeSummaries(sums []pallocSum, logMaxPagesPerSum uint) pallocSum {
1039	// Merge the summaries in sums into one.
1040	//
1041	// We do this by keeping a running summary representing the merged
1042	// summaries of sums[:i] in start, most, and end.
1043	start, most, end := sums[0].unpack()
1044	for i := 1; i < len(sums); i++ {
1045		// Merge in sums[i].
1046		si, mi, ei := sums[i].unpack()
1047
1048		// Merge in sums[i].start only if the running summary is
1049		// completely free, otherwise this summary's start
1050		// plays no role in the combined sum.
1051		if start == uint(i)<<logMaxPagesPerSum {
1052			start += si
1053		}
1054
1055		// Recompute the max value of the running sum by looking
1056		// across the boundary between the running sum and sums[i]
1057		// and at the max sums[i], taking the greatest of those two
1058		// and the max of the running sum.
1059		most = max(most, end+si, mi)
1060
1061		// Merge in end by checking if this new summary is totally
1062		// free. If it is, then we want to extend the running sum's
1063		// end by the new summary. If not, then we have some alloc'd
1064		// pages in there and we just want to take the end value in
1065		// sums[i].
1066		if ei == 1<<logMaxPagesPerSum {
1067			end += 1 << logMaxPagesPerSum
1068		} else {
1069			end = ei
1070		}
1071	}
1072	return packPallocSum(start, most, end)
1073}
1074