1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"internal/abi"
9	"internal/goarch"
10	"runtime/internal/math"
11	"runtime/internal/sys"
12	"unsafe"
13)
14
15type slice struct {
16	array unsafe.Pointer
17	len   int
18	cap   int
19}
20
21// A notInHeapSlice is a slice backed by runtime/internal/sys.NotInHeap memory.
22type notInHeapSlice struct {
23	array *notInHeap
24	len   int
25	cap   int
26}
27
28func panicmakeslicelen() {
29	panic(errorString("makeslice: len out of range"))
30}
31
32func panicmakeslicecap() {
33	panic(errorString("makeslice: cap out of range"))
34}
35
36// makeslicecopy allocates a slice of "tolen" elements of type "et",
37// then copies "fromlen" elements of type "et" into that new allocation from "from".
38func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer {
39	var tomem, copymem uintptr
40	if uintptr(tolen) > uintptr(fromlen) {
41		var overflow bool
42		tomem, overflow = math.MulUintptr(et.Size_, uintptr(tolen))
43		if overflow || tomem > maxAlloc || tolen < 0 {
44			panicmakeslicelen()
45		}
46		copymem = et.Size_ * uintptr(fromlen)
47	} else {
48		// fromlen is a known good length providing and equal or greater than tolen,
49		// thereby making tolen a good slice length too as from and to slices have the
50		// same element width.
51		tomem = et.Size_ * uintptr(tolen)
52		copymem = tomem
53	}
54
55	var to unsafe.Pointer
56	if !et.Pointers() {
57		to = mallocgc(tomem, nil, false)
58		if copymem < tomem {
59			memclrNoHeapPointers(add(to, copymem), tomem-copymem)
60		}
61	} else {
62		// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
63		to = mallocgc(tomem, et, true)
64		if copymem > 0 && writeBarrier.enabled {
65			// Only shade the pointers in old.array since we know the destination slice to
66			// only contains nil pointers because it has been cleared during alloc.
67			//
68			// It's safe to pass a type to this function as an optimization because
69			// from and to only ever refer to memory representing whole values of
70			// type et. See the comment on bulkBarrierPreWrite.
71			bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem, et)
72		}
73	}
74
75	if raceenabled {
76		callerpc := getcallerpc()
77		pc := abi.FuncPCABIInternal(makeslicecopy)
78		racereadrangepc(from, copymem, callerpc, pc)
79	}
80	if msanenabled {
81		msanread(from, copymem)
82	}
83	if asanenabled {
84		asanread(from, copymem)
85	}
86
87	memmove(to, from, copymem)
88
89	return to
90}
91
92// makeslice should be an internal detail,
93// but widely used packages access it using linkname.
94// Notable members of the hall of shame include:
95//   - github.com/bytedance/sonic
96//
97// Do not remove or change the type signature.
98// See go.dev/issue/67401.
99//
100//go:linkname makeslice
101func makeslice(et *_type, len, cap int) unsafe.Pointer {
102	mem, overflow := math.MulUintptr(et.Size_, uintptr(cap))
103	if overflow || mem > maxAlloc || len < 0 || len > cap {
104		// NOTE: Produce a 'len out of range' error instead of a
105		// 'cap out of range' error when someone does make([]T, bignumber).
106		// 'cap out of range' is true too, but since the cap is only being
107		// supplied implicitly, saying len is clearer.
108		// See golang.org/issue/4085.
109		mem, overflow := math.MulUintptr(et.Size_, uintptr(len))
110		if overflow || mem > maxAlloc || len < 0 {
111			panicmakeslicelen()
112		}
113		panicmakeslicecap()
114	}
115
116	return mallocgc(mem, et, true)
117}
118
119func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer {
120	len := int(len64)
121	if int64(len) != len64 {
122		panicmakeslicelen()
123	}
124
125	cap := int(cap64)
126	if int64(cap) != cap64 {
127		panicmakeslicecap()
128	}
129
130	return makeslice(et, len, cap)
131}
132
133// growslice allocates new backing store for a slice.
134//
135// arguments:
136//
137//	oldPtr = pointer to the slice's backing array
138//	newLen = new length (= oldLen + num)
139//	oldCap = original slice's capacity.
140//	   num = number of elements being added
141//	    et = element type
142//
143// return values:
144//
145//	newPtr = pointer to the new backing store
146//	newLen = same value as the argument
147//	newCap = capacity of the new backing store
148//
149// Requires that uint(newLen) > uint(oldCap).
150// Assumes the original slice length is newLen - num
151//
152// A new backing store is allocated with space for at least newLen elements.
153// Existing entries [0, oldLen) are copied over to the new backing store.
154// Added entries [oldLen, newLen) are not initialized by growslice
155// (although for pointer-containing element types, they are zeroed). They
156// must be initialized by the caller.
157// Trailing entries [newLen, newCap) are zeroed.
158//
159// growslice's odd calling convention makes the generated code that calls
160// this function simpler. In particular, it accepts and returns the
161// new length so that the old length is not live (does not need to be
162// spilled/restored) and the new length is returned (also does not need
163// to be spilled/restored).
164//
165// growslice should be an internal detail,
166// but widely used packages access it using linkname.
167// Notable members of the hall of shame include:
168//   - github.com/bytedance/sonic
169//   - github.com/chenzhuoyu/iasm
170//   - github.com/cloudwego/dynamicgo
171//   - github.com/ugorji/go/codec
172//
173// Do not remove or change the type signature.
174// See go.dev/issue/67401.
175//
176//go:linkname growslice
177func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice {
178	oldLen := newLen - num
179	if raceenabled {
180		callerpc := getcallerpc()
181		racereadrangepc(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice))
182	}
183	if msanenabled {
184		msanread(oldPtr, uintptr(oldLen*int(et.Size_)))
185	}
186	if asanenabled {
187		asanread(oldPtr, uintptr(oldLen*int(et.Size_)))
188	}
189
190	if newLen < 0 {
191		panic(errorString("growslice: len out of range"))
192	}
193
194	if et.Size_ == 0 {
195		// append should not create a slice with nil pointer but non-zero len.
196		// We assume that append doesn't need to preserve oldPtr in this case.
197		return slice{unsafe.Pointer(&zerobase), newLen, newLen}
198	}
199
200	newcap := nextslicecap(newLen, oldCap)
201
202	var overflow bool
203	var lenmem, newlenmem, capmem uintptr
204	// Specialize for common values of et.Size.
205	// For 1 we don't need any division/multiplication.
206	// For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant.
207	// For powers of 2, use a variable shift.
208	noscan := !et.Pointers()
209	switch {
210	case et.Size_ == 1:
211		lenmem = uintptr(oldLen)
212		newlenmem = uintptr(newLen)
213		capmem = roundupsize(uintptr(newcap), noscan)
214		overflow = uintptr(newcap) > maxAlloc
215		newcap = int(capmem)
216	case et.Size_ == goarch.PtrSize:
217		lenmem = uintptr(oldLen) * goarch.PtrSize
218		newlenmem = uintptr(newLen) * goarch.PtrSize
219		capmem = roundupsize(uintptr(newcap)*goarch.PtrSize, noscan)
220		overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize
221		newcap = int(capmem / goarch.PtrSize)
222	case isPowerOfTwo(et.Size_):
223		var shift uintptr
224		if goarch.PtrSize == 8 {
225			// Mask shift for better code generation.
226			shift = uintptr(sys.TrailingZeros64(uint64(et.Size_))) & 63
227		} else {
228			shift = uintptr(sys.TrailingZeros32(uint32(et.Size_))) & 31
229		}
230		lenmem = uintptr(oldLen) << shift
231		newlenmem = uintptr(newLen) << shift
232		capmem = roundupsize(uintptr(newcap)<<shift, noscan)
233		overflow = uintptr(newcap) > (maxAlloc >> shift)
234		newcap = int(capmem >> shift)
235		capmem = uintptr(newcap) << shift
236	default:
237		lenmem = uintptr(oldLen) * et.Size_
238		newlenmem = uintptr(newLen) * et.Size_
239		capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap))
240		capmem = roundupsize(capmem, noscan)
241		newcap = int(capmem / et.Size_)
242		capmem = uintptr(newcap) * et.Size_
243	}
244
245	// The check of overflow in addition to capmem > maxAlloc is needed
246	// to prevent an overflow which can be used to trigger a segfault
247	// on 32bit architectures with this example program:
248	//
249	// type T [1<<27 + 1]int64
250	//
251	// var d T
252	// var s []T
253	//
254	// func main() {
255	//   s = append(s, d, d, d, d)
256	//   print(len(s), "\n")
257	// }
258	if overflow || capmem > maxAlloc {
259		panic(errorString("growslice: len out of range"))
260	}
261
262	var p unsafe.Pointer
263	if !et.Pointers() {
264		p = mallocgc(capmem, nil, false)
265		// The append() that calls growslice is going to overwrite from oldLen to newLen.
266		// Only clear the part that will not be overwritten.
267		// The reflect_growslice() that calls growslice will manually clear
268		// the region not cleared here.
269		memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem)
270	} else {
271		// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory.
272		p = mallocgc(capmem, et, true)
273		if lenmem > 0 && writeBarrier.enabled {
274			// Only shade the pointers in oldPtr since we know the destination slice p
275			// only contains nil pointers because it has been cleared during alloc.
276			//
277			// It's safe to pass a type to this function as an optimization because
278			// from and to only ever refer to memory representing whole values of
279			// type et. See the comment on bulkBarrierPreWrite.
280			bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes, et)
281		}
282	}
283	memmove(p, oldPtr, lenmem)
284
285	return slice{p, newLen, newcap}
286}
287
288// nextslicecap computes the next appropriate slice length.
289func nextslicecap(newLen, oldCap int) int {
290	newcap := oldCap
291	doublecap := newcap + newcap
292	if newLen > doublecap {
293		return newLen
294	}
295
296	const threshold = 256
297	if oldCap < threshold {
298		return doublecap
299	}
300	for {
301		// Transition from growing 2x for small slices
302		// to growing 1.25x for large slices. This formula
303		// gives a smooth-ish transition between the two.
304		newcap += (newcap + 3*threshold) >> 2
305
306		// We need to check `newcap >= newLen` and whether `newcap` overflowed.
307		// newLen is guaranteed to be larger than zero, hence
308		// when newcap overflows then `uint(newcap) > uint(newLen)`.
309		// This allows to check for both with the same comparison.
310		if uint(newcap) >= uint(newLen) {
311			break
312		}
313	}
314
315	// Set newcap to the requested cap when
316	// the newcap calculation overflowed.
317	if newcap <= 0 {
318		return newLen
319	}
320	return newcap
321}
322
323// reflect_growslice should be an internal detail,
324// but widely used packages access it using linkname.
325// Notable members of the hall of shame include:
326//   - github.com/cloudwego/dynamicgo
327//
328// Do not remove or change the type signature.
329// See go.dev/issue/67401.
330//
331//go:linkname reflect_growslice reflect.growslice
332func reflect_growslice(et *_type, old slice, num int) slice {
333	// Semantically equivalent to slices.Grow, except that the caller
334	// is responsible for ensuring that old.len+num > old.cap.
335	num -= old.cap - old.len // preserve memory of old[old.len:old.cap]
336	new := growslice(old.array, old.cap+num, old.cap, num, et)
337	// growslice does not zero out new[old.cap:new.len] since it assumes that
338	// the memory will be overwritten by an append() that called growslice.
339	// Since the caller of reflect_growslice is not append(),
340	// zero out this region before returning the slice to the reflect package.
341	if !et.Pointers() {
342		oldcapmem := uintptr(old.cap) * et.Size_
343		newlenmem := uintptr(new.len) * et.Size_
344		memclrNoHeapPointers(add(new.array, oldcapmem), newlenmem-oldcapmem)
345	}
346	new.len = old.len // preserve the old length
347	return new
348}
349
350func isPowerOfTwo(x uintptr) bool {
351	return x&(x-1) == 0
352}
353
354// slicecopy is used to copy from a string or slice of pointerless elements into a slice.
355func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int {
356	if fromLen == 0 || toLen == 0 {
357		return 0
358	}
359
360	n := fromLen
361	if toLen < n {
362		n = toLen
363	}
364
365	if width == 0 {
366		return n
367	}
368
369	size := uintptr(n) * width
370	if raceenabled {
371		callerpc := getcallerpc()
372		pc := abi.FuncPCABIInternal(slicecopy)
373		racereadrangepc(fromPtr, size, callerpc, pc)
374		racewriterangepc(toPtr, size, callerpc, pc)
375	}
376	if msanenabled {
377		msanread(fromPtr, size)
378		msanwrite(toPtr, size)
379	}
380	if asanenabled {
381		asanread(fromPtr, size)
382		asanwrite(toPtr, size)
383	}
384
385	if size == 1 { // common case worth about 2x to do here
386		// TODO: is this still worth it with new memmove impl?
387		*(*byte)(toPtr) = *(*byte)(fromPtr) // known to be a byte pointer
388	} else {
389		memmove(toPtr, fromPtr, size)
390	}
391	return n
392}
393
394//go:linkname bytealg_MakeNoZero internal/bytealg.MakeNoZero
395func bytealg_MakeNoZero(len int) []byte {
396	if uintptr(len) > maxAlloc {
397		panicmakeslicelen()
398	}
399	cap := roundupsize(uintptr(len), true)
400	return unsafe.Slice((*byte)(mallocgc(uintptr(cap), nil, false)), cap)[:len]
401}
402