1// Copyright 2023 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Simple not-in-heap bump-pointer traceRegion allocator.
6
7package runtime
8
9import (
10	"internal/runtime/atomic"
11	"runtime/internal/sys"
12	"unsafe"
13)
14
15// traceRegionAlloc is a thread-safe region allocator.
16// It holds a linked list of traceRegionAllocBlock.
17type traceRegionAlloc struct {
18	lock     mutex
19	dropping atomic.Bool          // For checking invariants.
20	current  atomic.UnsafePointer // *traceRegionAllocBlock
21	full     *traceRegionAllocBlock
22}
23
24// traceRegionAllocBlock is a block in traceRegionAlloc.
25//
26// traceRegionAllocBlock is allocated from non-GC'd memory, so it must not
27// contain heap pointers. Writes to pointers to traceRegionAllocBlocks do
28// not need write barriers.
29type traceRegionAllocBlock struct {
30	_ sys.NotInHeap
31	traceRegionAllocBlockHeader
32	data [traceRegionAllocBlockData]byte
33}
34
35type traceRegionAllocBlockHeader struct {
36	next *traceRegionAllocBlock
37	off  atomic.Uintptr
38}
39
40const traceRegionAllocBlockData = 64<<10 - unsafe.Sizeof(traceRegionAllocBlockHeader{})
41
42// alloc allocates n-byte block. The block is always aligned to 8 bytes, regardless of platform.
43func (a *traceRegionAlloc) alloc(n uintptr) *notInHeap {
44	n = alignUp(n, 8)
45	if n > traceRegionAllocBlockData {
46		throw("traceRegion: alloc too large")
47	}
48	if a.dropping.Load() {
49		throw("traceRegion: alloc with concurrent drop")
50	}
51
52	// Try to bump-pointer allocate into the current block.
53	block := (*traceRegionAllocBlock)(a.current.Load())
54	if block != nil {
55		r := block.off.Add(n)
56		if r <= uintptr(len(block.data)) {
57			return (*notInHeap)(unsafe.Pointer(&block.data[r-n]))
58		}
59	}
60
61	// Try to install a new block.
62	lock(&a.lock)
63
64	// Check block again under the lock. Someone may
65	// have gotten here first.
66	block = (*traceRegionAllocBlock)(a.current.Load())
67	if block != nil {
68		r := block.off.Add(n)
69		if r <= uintptr(len(block.data)) {
70			unlock(&a.lock)
71			return (*notInHeap)(unsafe.Pointer(&block.data[r-n]))
72		}
73
74		// Add the existing block to the full list.
75		block.next = a.full
76		a.full = block
77	}
78
79	// Allocate a new block.
80	block = (*traceRegionAllocBlock)(sysAlloc(unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys))
81	if block == nil {
82		throw("traceRegion: out of memory")
83	}
84
85	// Allocate space for our current request, so we always make
86	// progress.
87	block.off.Store(n)
88	x := (*notInHeap)(unsafe.Pointer(&block.data[0]))
89
90	// Publish the new block.
91	a.current.Store(unsafe.Pointer(block))
92	unlock(&a.lock)
93	return x
94}
95
96// drop frees all previously allocated memory and resets the allocator.
97//
98// drop is not safe to call concurrently with other calls to drop or with calls to alloc. The caller
99// must ensure that it is not possible for anything else to be using the same structure.
100func (a *traceRegionAlloc) drop() {
101	a.dropping.Store(true)
102	for a.full != nil {
103		block := a.full
104		a.full = block.next
105		sysFree(unsafe.Pointer(block), unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
106	}
107	if current := a.current.Load(); current != nil {
108		sysFree(current, unsafe.Sizeof(traceRegionAllocBlock{}), &memstats.other_sys)
109		a.current.Store(nil)
110	}
111	a.dropping.Store(false)
112}
113