1// Copyright 2019 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"fmt"
9	"internal/goos"
10	"internal/runtime/atomic"
11	"math"
12	"math/rand"
13	. "runtime"
14	"testing"
15	"time"
16)
17
18// makePallocData produces an initialized PallocData by setting
19// the ranges of described in alloc and scavenge.
20func makePallocData(alloc, scavenged []BitRange) *PallocData {
21	b := new(PallocData)
22	for _, v := range alloc {
23		if v.N == 0 {
24			// Skip N==0. It's harmless and allocRange doesn't
25			// handle this case.
26			continue
27		}
28		b.AllocRange(v.I, v.N)
29	}
30	for _, v := range scavenged {
31		if v.N == 0 {
32			// See the previous loop.
33			continue
34		}
35		b.ScavengedSetRange(v.I, v.N)
36	}
37	return b
38}
39
40func TestFillAligned(t *testing.T) {
41	fillAlignedSlow := func(x uint64, m uint) uint64 {
42		if m == 1 {
43			return x
44		}
45		out := uint64(0)
46		for i := uint(0); i < 64; i += m {
47			for j := uint(0); j < m; j++ {
48				if x&(uint64(1)<<(i+j)) != 0 {
49					out |= ((uint64(1) << m) - 1) << i
50					break
51				}
52			}
53		}
54		return out
55	}
56	check := func(x uint64, m uint) {
57		want := fillAlignedSlow(x, m)
58		if got := FillAligned(x, m); got != want {
59			t.Logf("got:  %064b", got)
60			t.Logf("want: %064b", want)
61			t.Errorf("bad fillAligned(%016x, %d)", x, m)
62		}
63	}
64	for m := uint(1); m <= 64; m *= 2 {
65		tests := []uint64{
66			0x0000000000000000,
67			0x00000000ffffffff,
68			0xffffffff00000000,
69			0x8000000000000001,
70			0xf00000000000000f,
71			0xf00000010050000f,
72			0xffffffffffffffff,
73			0x0000000000000001,
74			0x0000000000000002,
75			0x0000000000000008,
76			uint64(1) << (m - 1),
77			uint64(1) << m,
78			// Try a few fixed arbitrary examples.
79			0xb02b9effcf137016,
80			0x3975a076a9fbff18,
81			0x0f8c88ec3b81506e,
82			0x60f14d80ef2fa0e6,
83		}
84		for _, test := range tests {
85			check(test, m)
86		}
87		for i := 0; i < 1000; i++ {
88			// Try a pseudo-random numbers.
89			check(rand.Uint64(), m)
90
91			if m > 1 {
92				// For m != 1, let's construct a slightly more interesting
93				// random test. Generate a bitmap which is either 0 or
94				// randomly set bits for each m-aligned group of m bits.
95				val := uint64(0)
96				for n := uint(0); n < 64; n += m {
97					// For each group of m bits, flip a coin:
98					// * Leave them as zero.
99					// * Set them randomly.
100					if rand.Uint64()%2 == 0 {
101						val |= (rand.Uint64() & ((1 << m) - 1)) << n
102					}
103				}
104				check(val, m)
105			}
106		}
107	}
108}
109
110func TestPallocDataFindScavengeCandidate(t *testing.T) {
111	type test struct {
112		alloc, scavenged []BitRange
113		min, max         uintptr
114		want             BitRange
115	}
116	tests := map[string]test{
117		"MixedMin1": {
118			alloc:     []BitRange{{0, 40}, {42, PallocChunkPages - 42}},
119			scavenged: []BitRange{{0, 41}, {42, PallocChunkPages - 42}},
120			min:       1,
121			max:       PallocChunkPages,
122			want:      BitRange{41, 1},
123		},
124		"MultiMin1": {
125			alloc:     []BitRange{{0, 63}, {65, 20}, {87, PallocChunkPages - 87}},
126			scavenged: []BitRange{{86, 1}},
127			min:       1,
128			max:       PallocChunkPages,
129			want:      BitRange{85, 1},
130		},
131	}
132	// Try out different page minimums.
133	for m := uintptr(1); m <= 64; m *= 2 {
134		suffix := fmt.Sprintf("Min%d", m)
135		tests["AllFree"+suffix] = test{
136			min:  m,
137			max:  PallocChunkPages,
138			want: BitRange{0, PallocChunkPages},
139		}
140		tests["AllScavenged"+suffix] = test{
141			scavenged: []BitRange{{0, PallocChunkPages}},
142			min:       m,
143			max:       PallocChunkPages,
144			want:      BitRange{0, 0},
145		}
146		tests["NoneFree"+suffix] = test{
147			alloc:     []BitRange{{0, PallocChunkPages}},
148			scavenged: []BitRange{{PallocChunkPages / 2, PallocChunkPages / 2}},
149			min:       m,
150			max:       PallocChunkPages,
151			want:      BitRange{0, 0},
152		}
153		tests["StartFree"+suffix] = test{
154			alloc: []BitRange{{uint(m), PallocChunkPages - uint(m)}},
155			min:   m,
156			max:   PallocChunkPages,
157			want:  BitRange{0, uint(m)},
158		}
159		tests["EndFree"+suffix] = test{
160			alloc: []BitRange{{0, PallocChunkPages - uint(m)}},
161			min:   m,
162			max:   PallocChunkPages,
163			want:  BitRange{PallocChunkPages - uint(m), uint(m)},
164		}
165		tests["Straddle64"+suffix] = test{
166			alloc: []BitRange{{0, 64 - uint(m)}, {64 + uint(m), PallocChunkPages - (64 + uint(m))}},
167			min:   m,
168			max:   2 * m,
169			want:  BitRange{64 - uint(m), 2 * uint(m)},
170		}
171		tests["BottomEdge64WithFull"+suffix] = test{
172			alloc:     []BitRange{{64, 64}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}},
173			scavenged: []BitRange{{1, 10}},
174			min:       m,
175			max:       3 * m,
176			want:      BitRange{128, 3 * uint(m)},
177		}
178		tests["BottomEdge64WithPocket"+suffix] = test{
179			alloc:     []BitRange{{64, 62}, {127, 1}, {128 + 3*uint(m), PallocChunkPages - (128 + 3*uint(m))}},
180			scavenged: []BitRange{{1, 10}},
181			min:       m,
182			max:       3 * m,
183			want:      BitRange{128, 3 * uint(m)},
184		}
185		tests["Max0"+suffix] = test{
186			scavenged: []BitRange{{0, PallocChunkPages - uint(m)}},
187			min:       m,
188			max:       0,
189			want:      BitRange{PallocChunkPages - uint(m), uint(m)},
190		}
191		if m <= 8 {
192			tests["OneFree"] = test{
193				alloc: []BitRange{{0, 40}, {40 + uint(m), PallocChunkPages - (40 + uint(m))}},
194				min:   m,
195				max:   PallocChunkPages,
196				want:  BitRange{40, uint(m)},
197			}
198			tests["OneScavenged"] = test{
199				alloc:     []BitRange{{0, 40}, {40 + uint(m), PallocChunkPages - (40 + uint(m))}},
200				scavenged: []BitRange{{40, 1}},
201				min:       m,
202				max:       PallocChunkPages,
203				want:      BitRange{0, 0},
204			}
205		}
206		if m > 1 {
207			tests["MaxUnaligned"+suffix] = test{
208				scavenged: []BitRange{{0, PallocChunkPages - uint(m*2-1)}},
209				min:       m,
210				max:       m - 2,
211				want:      BitRange{PallocChunkPages - uint(m), uint(m)},
212			}
213			tests["SkipSmall"+suffix] = test{
214				alloc: []BitRange{{0, 64 - uint(m)}, {64, 5}, {70, 11}, {82, PallocChunkPages - 82}},
215				min:   m,
216				max:   m,
217				want:  BitRange{64 - uint(m), uint(m)},
218			}
219			tests["SkipMisaligned"+suffix] = test{
220				alloc: []BitRange{{0, 64 - uint(m)}, {64, 63}, {127 + uint(m), PallocChunkPages - (127 + uint(m))}},
221				min:   m,
222				max:   m,
223				want:  BitRange{64 - uint(m), uint(m)},
224			}
225			tests["MaxLessThan"+suffix] = test{
226				scavenged: []BitRange{{0, PallocChunkPages - uint(m)}},
227				min:       m,
228				max:       1,
229				want:      BitRange{PallocChunkPages - uint(m), uint(m)},
230			}
231		}
232	}
233	if PhysHugePageSize > uintptr(PageSize) {
234		// Check hugepage preserving behavior.
235		bits := uint(PhysHugePageSize / uintptr(PageSize))
236		if bits < PallocChunkPages {
237			tests["PreserveHugePageBottom"] = test{
238				alloc: []BitRange{{bits + 2, PallocChunkPages - (bits + 2)}},
239				min:   1,
240				max:   3, // Make it so that max would have us try to break the huge page.
241				want:  BitRange{0, bits + 2},
242			}
243			if 3*bits < PallocChunkPages {
244				// We need at least 3 huge pages in a chunk for this test to make sense.
245				tests["PreserveHugePageMiddle"] = test{
246					alloc: []BitRange{{0, bits - 10}, {2*bits + 10, PallocChunkPages - (2*bits + 10)}},
247					min:   1,
248					max:   12, // Make it so that max would have us try to break the huge page.
249					want:  BitRange{bits, bits + 10},
250				}
251			}
252			tests["PreserveHugePageTop"] = test{
253				alloc: []BitRange{{0, PallocChunkPages - bits}},
254				min:   1,
255				max:   1, // Even one page would break a huge page in this case.
256				want:  BitRange{PallocChunkPages - bits, bits},
257			}
258		} else if bits == PallocChunkPages {
259			tests["PreserveHugePageAll"] = test{
260				min:  1,
261				max:  1, // Even one page would break a huge page in this case.
262				want: BitRange{0, PallocChunkPages},
263			}
264		} else {
265			// The huge page size is greater than pallocChunkPages, so it should
266			// be effectively disabled. There's no way we can possible scavenge
267			// a huge page out of this bitmap chunk.
268			tests["PreserveHugePageNone"] = test{
269				min:  1,
270				max:  1,
271				want: BitRange{PallocChunkPages - 1, 1},
272			}
273		}
274	}
275	for name, v := range tests {
276		v := v
277		t.Run(name, func(t *testing.T) {
278			b := makePallocData(v.alloc, v.scavenged)
279			start, size := b.FindScavengeCandidate(PallocChunkPages-1, v.min, v.max)
280			got := BitRange{start, size}
281			if !(got.N == 0 && v.want.N == 0) && got != v.want {
282				t.Fatalf("candidate mismatch: got %v, want %v", got, v.want)
283			}
284		})
285	}
286}
287
288// Tests end-to-end scavenging on a pageAlloc.
289func TestPageAllocScavenge(t *testing.T) {
290	if GOOS == "openbsd" && testing.Short() {
291		t.Skip("skipping because virtual memory is limited; see #36210")
292	}
293	type test struct {
294		request, expect uintptr
295	}
296	minPages := PhysPageSize / PageSize
297	if minPages < 1 {
298		minPages = 1
299	}
300	type setup struct {
301		beforeAlloc map[ChunkIdx][]BitRange
302		beforeScav  map[ChunkIdx][]BitRange
303		expect      []test
304		afterScav   map[ChunkIdx][]BitRange
305	}
306	tests := map[string]setup{
307		"AllFreeUnscavExhaust": {
308			beforeAlloc: map[ChunkIdx][]BitRange{
309				BaseChunkIdx:     {},
310				BaseChunkIdx + 1: {},
311				BaseChunkIdx + 2: {},
312			},
313			beforeScav: map[ChunkIdx][]BitRange{
314				BaseChunkIdx:     {},
315				BaseChunkIdx + 1: {},
316				BaseChunkIdx + 2: {},
317			},
318			expect: []test{
319				{^uintptr(0), 3 * PallocChunkPages * PageSize},
320			},
321			afterScav: map[ChunkIdx][]BitRange{
322				BaseChunkIdx:     {{0, PallocChunkPages}},
323				BaseChunkIdx + 1: {{0, PallocChunkPages}},
324				BaseChunkIdx + 2: {{0, PallocChunkPages}},
325			},
326		},
327		"NoneFreeUnscavExhaust": {
328			beforeAlloc: map[ChunkIdx][]BitRange{
329				BaseChunkIdx:     {{0, PallocChunkPages}},
330				BaseChunkIdx + 1: {},
331				BaseChunkIdx + 2: {{0, PallocChunkPages}},
332			},
333			beforeScav: map[ChunkIdx][]BitRange{
334				BaseChunkIdx:     {},
335				BaseChunkIdx + 1: {{0, PallocChunkPages}},
336				BaseChunkIdx + 2: {},
337			},
338			expect: []test{
339				{^uintptr(0), 0},
340			},
341			afterScav: map[ChunkIdx][]BitRange{
342				BaseChunkIdx:     {},
343				BaseChunkIdx + 1: {{0, PallocChunkPages}},
344				BaseChunkIdx + 2: {},
345			},
346		},
347		"ScavHighestPageFirst": {
348			beforeAlloc: map[ChunkIdx][]BitRange{
349				BaseChunkIdx: {},
350			},
351			beforeScav: map[ChunkIdx][]BitRange{
352				BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
353			},
354			expect: []test{
355				{1, minPages * PageSize},
356			},
357			afterScav: map[ChunkIdx][]BitRange{
358				BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(minPages)}},
359			},
360		},
361		"ScavMultiple": {
362			beforeAlloc: map[ChunkIdx][]BitRange{
363				BaseChunkIdx: {},
364			},
365			beforeScav: map[ChunkIdx][]BitRange{
366				BaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
367			},
368			expect: []test{
369				{minPages * PageSize, minPages * PageSize},
370				{minPages * PageSize, minPages * PageSize},
371			},
372			afterScav: map[ChunkIdx][]BitRange{
373				BaseChunkIdx: {{0, PallocChunkPages}},
374			},
375		},
376		"ScavMultiple2": {
377			beforeAlloc: map[ChunkIdx][]BitRange{
378				BaseChunkIdx:     {},
379				BaseChunkIdx + 1: {},
380			},
381			beforeScav: map[ChunkIdx][]BitRange{
382				BaseChunkIdx:     {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
383				BaseChunkIdx + 1: {{0, PallocChunkPages - uint(2*minPages)}},
384			},
385			expect: []test{
386				{2 * minPages * PageSize, 2 * minPages * PageSize},
387				{minPages * PageSize, minPages * PageSize},
388				{minPages * PageSize, minPages * PageSize},
389			},
390			afterScav: map[ChunkIdx][]BitRange{
391				BaseChunkIdx:     {{0, PallocChunkPages}},
392				BaseChunkIdx + 1: {{0, PallocChunkPages}},
393			},
394		},
395		"ScavDiscontiguous": {
396			beforeAlloc: map[ChunkIdx][]BitRange{
397				BaseChunkIdx:       {},
398				BaseChunkIdx + 0xe: {},
399			},
400			beforeScav: map[ChunkIdx][]BitRange{
401				BaseChunkIdx:       {{uint(minPages), PallocChunkPages - uint(2*minPages)}},
402				BaseChunkIdx + 0xe: {{uint(2 * minPages), PallocChunkPages - uint(2*minPages)}},
403			},
404			expect: []test{
405				{2 * minPages * PageSize, 2 * minPages * PageSize},
406				{^uintptr(0), 2 * minPages * PageSize},
407				{^uintptr(0), 0},
408			},
409			afterScav: map[ChunkIdx][]BitRange{
410				BaseChunkIdx:       {{0, PallocChunkPages}},
411				BaseChunkIdx + 0xe: {{0, PallocChunkPages}},
412			},
413		},
414	}
415	// Disable these tests on iOS since we have a small address space.
416	// See #46860.
417	if PageAlloc64Bit != 0 && goos.IsIos == 0 {
418		tests["ScavAllVeryDiscontiguous"] = setup{
419			beforeAlloc: map[ChunkIdx][]BitRange{
420				BaseChunkIdx:          {},
421				BaseChunkIdx + 0x1000: {},
422			},
423			beforeScav: map[ChunkIdx][]BitRange{
424				BaseChunkIdx:          {},
425				BaseChunkIdx + 0x1000: {},
426			},
427			expect: []test{
428				{^uintptr(0), 2 * PallocChunkPages * PageSize},
429				{^uintptr(0), 0},
430			},
431			afterScav: map[ChunkIdx][]BitRange{
432				BaseChunkIdx:          {{0, PallocChunkPages}},
433				BaseChunkIdx + 0x1000: {{0, PallocChunkPages}},
434			},
435		}
436	}
437	for name, v := range tests {
438		v := v
439		t.Run(name, func(t *testing.T) {
440			b := NewPageAlloc(v.beforeAlloc, v.beforeScav)
441			defer FreePageAlloc(b)
442
443			for iter, h := range v.expect {
444				if got := b.Scavenge(h.request); got != h.expect {
445					t.Fatalf("bad scavenge #%d: want %d, got %d", iter+1, h.expect, got)
446				}
447			}
448			want := NewPageAlloc(v.beforeAlloc, v.afterScav)
449			defer FreePageAlloc(want)
450
451			checkPageAlloc(t, want, b)
452		})
453	}
454}
455
456func TestScavenger(t *testing.T) {
457	// workedTime is a standard conversion of bytes of scavenge
458	// work to time elapsed.
459	workedTime := func(bytes uintptr) int64 {
460		return int64((bytes+4095)/4096) * int64(10*time.Microsecond)
461	}
462
463	// Set up a bunch of state that we're going to track and verify
464	// throughout the test.
465	totalWork := uint64(64<<20 - 3*PhysPageSize)
466	var totalSlept, totalWorked atomic.Int64
467	var availableWork atomic.Uint64
468	var stopAt atomic.Uint64 // How much available work to stop at.
469
470	// Set up the scavenger.
471	var s Scavenger
472	s.Sleep = func(ns int64) int64 {
473		totalSlept.Add(ns)
474		return ns
475	}
476	s.Scavenge = func(bytes uintptr) (uintptr, int64) {
477		avail := availableWork.Load()
478		if uint64(bytes) > avail {
479			bytes = uintptr(avail)
480		}
481		t := workedTime(bytes)
482		if bytes != 0 {
483			availableWork.Add(-int64(bytes))
484			totalWorked.Add(t)
485		}
486		return bytes, t
487	}
488	s.ShouldStop = func() bool {
489		if availableWork.Load() <= stopAt.Load() {
490			return true
491		}
492		return false
493	}
494	s.GoMaxProcs = func() int32 {
495		return 1
496	}
497
498	// Define a helper for verifying that various properties hold.
499	verifyScavengerState := func(t *testing.T, expWork uint64) {
500		t.Helper()
501
502		// Check to make sure it did the amount of work we expected.
503		if workDone := uint64(s.Released()); workDone != expWork {
504			t.Errorf("want %d bytes of work done, got %d", expWork, workDone)
505		}
506		// Check to make sure the scavenger is meeting its CPU target.
507		idealFraction := float64(ScavengePercent) / 100.0
508		cpuFraction := float64(totalWorked.Load()) / float64(totalWorked.Load()+totalSlept.Load())
509		if cpuFraction < idealFraction-0.005 || cpuFraction > idealFraction+0.005 {
510			t.Errorf("want %f CPU fraction, got %f", idealFraction, cpuFraction)
511		}
512	}
513
514	// Start the scavenger.
515	s.Start()
516
517	// Set up some work and let the scavenger run to completion.
518	availableWork.Store(totalWork)
519	s.Wake()
520	if !s.BlockUntilParked(2e9 /* 2 seconds */) {
521		t.Fatal("timed out waiting for scavenger to run to completion")
522	}
523	// Run a check.
524	verifyScavengerState(t, totalWork)
525
526	// Now let's do it again and see what happens when we have no work to do.
527	// It should've gone right back to sleep.
528	s.Wake()
529	if !s.BlockUntilParked(2e9 /* 2 seconds */) {
530		t.Fatal("timed out waiting for scavenger to run to completion")
531	}
532	// Run another check.
533	verifyScavengerState(t, totalWork)
534
535	// One more time, this time doing the same amount of work as the first time.
536	// Let's see if we can get the scavenger to continue.
537	availableWork.Store(totalWork)
538	s.Wake()
539	if !s.BlockUntilParked(2e9 /* 2 seconds */) {
540		t.Fatal("timed out waiting for scavenger to run to completion")
541	}
542	// Run another check.
543	verifyScavengerState(t, 2*totalWork)
544
545	// This time, let's stop after a certain amount of work.
546	//
547	// Pick a stopping point such that when subtracted from totalWork
548	// we get a multiple of a relatively large power of 2. verifyScavengerState
549	// always makes an exact check, but the scavenger might go a little over,
550	// which is OK. If this breaks often or gets annoying to maintain, modify
551	// verifyScavengerState.
552	availableWork.Store(totalWork)
553	stoppingPoint := uint64(1<<20 - 3*PhysPageSize)
554	stopAt.Store(stoppingPoint)
555	s.Wake()
556	if !s.BlockUntilParked(2e9 /* 2 seconds */) {
557		t.Fatal("timed out waiting for scavenger to run to completion")
558	}
559	// Run another check.
560	verifyScavengerState(t, 2*totalWork+(totalWork-stoppingPoint))
561
562	// Clean up.
563	s.Stop()
564}
565
566func TestScavengeIndex(t *testing.T) {
567	// This test suite tests the scavengeIndex data structure.
568
569	// markFunc is a function that makes the address range [base, limit)
570	// available for scavenging in a test index.
571	type markFunc func(base, limit uintptr)
572
573	// findFunc is a function that searches for the next available page
574	// to scavenge in the index. It asserts that the page is found in
575	// chunk "ci" at page "offset."
576	type findFunc func(ci ChunkIdx, offset uint)
577
578	// The structure of the tests below is as follows:
579	//
580	// setup creates a fake scavengeIndex that can be mutated and queried by
581	// the functions it returns. Those functions capture the testing.T that
582	// setup is called with, so they're bound to the subtest they're created in.
583	//
584	// Tests are then organized into test cases which mark some pages as
585	// scavenge-able then try to find them. Tests expect that the initial
586	// state of the scavengeIndex has all of the chunks as dense in the last
587	// generation and empty to the scavenger.
588	//
589	// There are a few additional tests that interleave mark and find operations,
590	// so they're defined separately, but use the same infrastructure.
591	setup := func(t *testing.T, force bool) (mark markFunc, find findFunc, nextGen func()) {
592		t.Helper()
593
594		// Pick some reasonable bounds. We don't need a huge range just to test.
595		si := NewScavengeIndex(BaseChunkIdx, BaseChunkIdx+64)
596
597		// Initialize all the chunks as dense and empty.
598		//
599		// Also, reset search addresses so that we can get page offsets.
600		si.AllocRange(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+64, 0))
601		si.NextGen()
602		si.FreeRange(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+64, 0))
603		for ci := BaseChunkIdx; ci < BaseChunkIdx+64; ci++ {
604			si.SetEmpty(ci)
605		}
606		si.ResetSearchAddrs()
607
608		// Create and return test functions.
609		mark = func(base, limit uintptr) {
610			t.Helper()
611
612			si.AllocRange(base, limit)
613			si.FreeRange(base, limit)
614		}
615		find = func(want ChunkIdx, wantOffset uint) {
616			t.Helper()
617
618			got, gotOffset := si.Find(force)
619			if want != got {
620				t.Errorf("find: wanted chunk index %d, got %d", want, got)
621			}
622			if wantOffset != gotOffset {
623				t.Errorf("find: wanted page offset %d, got %d", wantOffset, gotOffset)
624			}
625			if t.Failed() {
626				t.FailNow()
627			}
628			si.SetEmpty(got)
629		}
630		nextGen = func() {
631			t.Helper()
632
633			si.NextGen()
634		}
635		return
636	}
637
638	// Each of these test cases calls mark and then find once.
639	type testCase struct {
640		name string
641		mark func(markFunc)
642		find func(findFunc)
643	}
644	for _, test := range []testCase{
645		{
646			name: "Uninitialized",
647			mark: func(_ markFunc) {},
648			find: func(_ findFunc) {},
649		},
650		{
651			name: "OnePage",
652			mark: func(mark markFunc) {
653				mark(PageBase(BaseChunkIdx, 3), PageBase(BaseChunkIdx, 4))
654			},
655			find: func(find findFunc) {
656				find(BaseChunkIdx, 3)
657			},
658		},
659		{
660			name: "FirstPage",
661			mark: func(mark markFunc) {
662				mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx, 1))
663			},
664			find: func(find findFunc) {
665				find(BaseChunkIdx, 0)
666			},
667		},
668		{
669			name: "SeveralPages",
670			mark: func(mark markFunc) {
671				mark(PageBase(BaseChunkIdx, 9), PageBase(BaseChunkIdx, 14))
672			},
673			find: func(find findFunc) {
674				find(BaseChunkIdx, 13)
675			},
676		},
677		{
678			name: "WholeChunk",
679			mark: func(mark markFunc) {
680				mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0))
681			},
682			find: func(find findFunc) {
683				find(BaseChunkIdx, PallocChunkPages-1)
684			},
685		},
686		{
687			name: "LastPage",
688			mark: func(mark markFunc) {
689				mark(PageBase(BaseChunkIdx, PallocChunkPages-1), PageBase(BaseChunkIdx+1, 0))
690			},
691			find: func(find findFunc) {
692				find(BaseChunkIdx, PallocChunkPages-1)
693			},
694		},
695		{
696			name: "TwoChunks",
697			mark: func(mark markFunc) {
698				mark(PageBase(BaseChunkIdx, 128), PageBase(BaseChunkIdx+1, 128))
699			},
700			find: func(find findFunc) {
701				find(BaseChunkIdx+1, 127)
702				find(BaseChunkIdx, PallocChunkPages-1)
703			},
704		},
705		{
706			name: "TwoChunksOffset",
707			mark: func(mark markFunc) {
708				mark(PageBase(BaseChunkIdx+7, 128), PageBase(BaseChunkIdx+8, 129))
709			},
710			find: func(find findFunc) {
711				find(BaseChunkIdx+8, 128)
712				find(BaseChunkIdx+7, PallocChunkPages-1)
713			},
714		},
715		{
716			name: "SevenChunksOffset",
717			mark: func(mark markFunc) {
718				mark(PageBase(BaseChunkIdx+6, 11), PageBase(BaseChunkIdx+13, 15))
719			},
720			find: func(find findFunc) {
721				find(BaseChunkIdx+13, 14)
722				for i := BaseChunkIdx + 12; i >= BaseChunkIdx+6; i-- {
723					find(i, PallocChunkPages-1)
724				}
725			},
726		},
727		{
728			name: "ThirtyTwoChunks",
729			mark: func(mark markFunc) {
730				mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+32, 0))
731			},
732			find: func(find findFunc) {
733				for i := BaseChunkIdx + 31; i >= BaseChunkIdx; i-- {
734					find(i, PallocChunkPages-1)
735				}
736			},
737		},
738		{
739			name: "ThirtyTwoChunksOffset",
740			mark: func(mark markFunc) {
741				mark(PageBase(BaseChunkIdx+3, 0), PageBase(BaseChunkIdx+35, 0))
742			},
743			find: func(find findFunc) {
744				for i := BaseChunkIdx + 34; i >= BaseChunkIdx+3; i-- {
745					find(i, PallocChunkPages-1)
746				}
747			},
748		},
749		{
750			name: "Mark",
751			mark: func(mark markFunc) {
752				for i := BaseChunkIdx; i < BaseChunkIdx+32; i++ {
753					mark(PageBase(i, 0), PageBase(i+1, 0))
754				}
755			},
756			find: func(find findFunc) {
757				for i := BaseChunkIdx + 31; i >= BaseChunkIdx; i-- {
758					find(i, PallocChunkPages-1)
759				}
760			},
761		},
762		{
763			name: "MarkIdempotentOneChunk",
764			mark: func(mark markFunc) {
765				mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0))
766				mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+1, 0))
767			},
768			find: func(find findFunc) {
769				find(BaseChunkIdx, PallocChunkPages-1)
770			},
771		},
772		{
773			name: "MarkIdempotentThirtyTwoChunks",
774			mark: func(mark markFunc) {
775				mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+32, 0))
776				mark(PageBase(BaseChunkIdx, 0), PageBase(BaseChunkIdx+32, 0))
777			},
778			find: func(find findFunc) {
779				for i := BaseChunkIdx + 31; i >= BaseChunkIdx; i-- {
780					find(i, PallocChunkPages-1)
781				}
782			},
783		},
784		{
785			name: "MarkIdempotentThirtyTwoChunksOffset",
786			mark: func(mark markFunc) {
787				mark(PageBase(BaseChunkIdx+4, 0), PageBase(BaseChunkIdx+31, 0))
788				mark(PageBase(BaseChunkIdx+5, 0), PageBase(BaseChunkIdx+36, 0))
789			},
790			find: func(find findFunc) {
791				for i := BaseChunkIdx + 35; i >= BaseChunkIdx+4; i-- {
792					find(i, PallocChunkPages-1)
793				}
794			},
795		},
796	} {
797		test := test
798		t.Run("Bg/"+test.name, func(t *testing.T) {
799			mark, find, nextGen := setup(t, false)
800			test.mark(mark)
801			find(0, 0)      // Make sure we find nothing at this point.
802			nextGen()       // Move to the next generation.
803			test.find(find) // Now we should be able to find things.
804			find(0, 0)      // The test should always fully exhaust the index.
805		})
806		t.Run("Force/"+test.name, func(t *testing.T) {
807			mark, find, _ := setup(t, true)
808			test.mark(mark)
809			test.find(find) // Finding should always work when forced.
810			find(0, 0)      // The test should always fully exhaust the index.
811		})
812	}
813	t.Run("Bg/MarkInterleaved", func(t *testing.T) {
814		mark, find, nextGen := setup(t, false)
815		for i := BaseChunkIdx; i < BaseChunkIdx+32; i++ {
816			mark(PageBase(i, 0), PageBase(i+1, 0))
817			nextGen()
818			find(i, PallocChunkPages-1)
819		}
820		find(0, 0)
821	})
822	t.Run("Force/MarkInterleaved", func(t *testing.T) {
823		mark, find, _ := setup(t, true)
824		for i := BaseChunkIdx; i < BaseChunkIdx+32; i++ {
825			mark(PageBase(i, 0), PageBase(i+1, 0))
826			find(i, PallocChunkPages-1)
827		}
828		find(0, 0)
829	})
830}
831
832func TestScavChunkDataPack(t *testing.T) {
833	if !CheckPackScavChunkData(1918237402, 512, 512, 0b11) {
834		t.Error("failed pack/unpack check for scavChunkData 1")
835	}
836	if !CheckPackScavChunkData(^uint32(0), 12, 0, 0b00) {
837		t.Error("failed pack/unpack check for scavChunkData 2")
838	}
839}
840
841func FuzzPIController(f *testing.F) {
842	isNormal := func(x float64) bool {
843		return !math.IsInf(x, 0) && !math.IsNaN(x)
844	}
845	isPositive := func(x float64) bool {
846		return isNormal(x) && x > 0
847	}
848	// Seed with constants from controllers in the runtime.
849	// It's not critical that we keep these in sync, they're just
850	// reasonable seed inputs.
851	f.Add(0.3375, 3.2e6, 1e9, 0.001, 1000.0, 0.01)
852	f.Add(0.9, 4.0, 1000.0, -1000.0, 1000.0, 0.84)
853	f.Fuzz(func(t *testing.T, kp, ti, tt, min, max, setPoint float64) {
854		// Ignore uninteresting invalid parameters. These parameters
855		// are constant, so in practice surprising values will be documented
856		// or will be other otherwise immediately visible.
857		//
858		// We just want to make sure that given a non-Inf, non-NaN input,
859		// we always get a non-Inf, non-NaN output.
860		if !isPositive(kp) || !isPositive(ti) || !isPositive(tt) {
861			return
862		}
863		if !isNormal(min) || !isNormal(max) || min > max {
864			return
865		}
866		// Use a random source, but make it deterministic.
867		rs := rand.New(rand.NewSource(800))
868		randFloat64 := func() float64 {
869			return math.Float64frombits(rs.Uint64())
870		}
871		p := NewPIController(kp, ti, tt, min, max)
872		state := float64(0)
873		for i := 0; i < 100; i++ {
874			input := randFloat64()
875			// Ignore the "ok" parameter. We're just trying to break it.
876			// state is intentionally completely uncorrelated with the input.
877			var ok bool
878			state, ok = p.Next(input, setPoint, 1.0)
879			if !isNormal(state) {
880				t.Fatalf("got NaN or Inf result from controller: %f %v", state, ok)
881			}
882		}
883	})
884}
885