1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime
6
7import (
8	"internal/bytealg"
9	"internal/goarch"
10	"internal/runtime/atomic"
11	"unsafe"
12)
13
14// Keep a cached value to make gotraceback fast,
15// since we call it on every call to gentraceback.
16// The cached value is a uint32 in which the low bits
17// are the "crash" and "all" settings and the remaining
18// bits are the traceback value (0 off, 1 on, 2 include system).
19const (
20	tracebackCrash = 1 << iota
21	tracebackAll
22	tracebackShift = iota
23)
24
25var traceback_cache uint32 = 2 << tracebackShift
26var traceback_env uint32
27
28// gotraceback returns the current traceback settings.
29//
30// If level is 0, suppress all tracebacks.
31// If level is 1, show tracebacks, but exclude runtime frames.
32// If level is 2, show tracebacks including runtime frames.
33// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
34// If crash is set, crash (core dump, etc) after tracebacking.
35//
36//go:nosplit
37func gotraceback() (level int32, all, crash bool) {
38	gp := getg()
39	t := atomic.Load(&traceback_cache)
40	crash = t&tracebackCrash != 0
41	all = gp.m.throwing >= throwTypeUser || t&tracebackAll != 0
42	if gp.m.traceback != 0 {
43		level = int32(gp.m.traceback)
44	} else if gp.m.throwing >= throwTypeRuntime {
45		// Always include runtime frames in runtime throws unless
46		// otherwise overridden by m.traceback.
47		level = 2
48	} else {
49		level = int32(t >> tracebackShift)
50	}
51	return
52}
53
54var (
55	argc int32
56	argv **byte
57)
58
59// nosplit for use in linux startup sysargs.
60//
61//go:nosplit
62func argv_index(argv **byte, i int32) *byte {
63	return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*goarch.PtrSize))
64}
65
66func args(c int32, v **byte) {
67	argc = c
68	argv = v
69	sysargs(c, v)
70}
71
72func goargs() {
73	if GOOS == "windows" {
74		return
75	}
76	argslice = make([]string, argc)
77	for i := int32(0); i < argc; i++ {
78		argslice[i] = gostringnocopy(argv_index(argv, i))
79	}
80}
81
82func goenvs_unix() {
83	// TODO(austin): ppc64 in dynamic linking mode doesn't
84	// guarantee env[] will immediately follow argv. Might cause
85	// problems.
86	n := int32(0)
87	for argv_index(argv, argc+1+n) != nil {
88		n++
89	}
90
91	envs = make([]string, n)
92	for i := int32(0); i < n; i++ {
93		envs[i] = gostring(argv_index(argv, argc+1+i))
94	}
95}
96
97func environ() []string {
98	return envs
99}
100
101// TODO: These should be locals in testAtomic64, but we don't 8-byte
102// align stack variables on 386.
103var test_z64, test_x64 uint64
104
105func testAtomic64() {
106	test_z64 = 42
107	test_x64 = 0
108	if atomic.Cas64(&test_z64, test_x64, 1) {
109		throw("cas64 failed")
110	}
111	if test_x64 != 0 {
112		throw("cas64 failed")
113	}
114	test_x64 = 42
115	if !atomic.Cas64(&test_z64, test_x64, 1) {
116		throw("cas64 failed")
117	}
118	if test_x64 != 42 || test_z64 != 1 {
119		throw("cas64 failed")
120	}
121	if atomic.Load64(&test_z64) != 1 {
122		throw("load64 failed")
123	}
124	atomic.Store64(&test_z64, (1<<40)+1)
125	if atomic.Load64(&test_z64) != (1<<40)+1 {
126		throw("store64 failed")
127	}
128	if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
129		throw("xadd64 failed")
130	}
131	if atomic.Load64(&test_z64) != (2<<40)+2 {
132		throw("xadd64 failed")
133	}
134	if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
135		throw("xchg64 failed")
136	}
137	if atomic.Load64(&test_z64) != (3<<40)+3 {
138		throw("xchg64 failed")
139	}
140}
141
142func check() {
143	var (
144		a     int8
145		b     uint8
146		c     int16
147		d     uint16
148		e     int32
149		f     uint32
150		g     int64
151		h     uint64
152		i, i1 float32
153		j, j1 float64
154		k     unsafe.Pointer
155		l     *uint16
156		m     [4]byte
157	)
158	type x1t struct {
159		x uint8
160	}
161	type y1t struct {
162		x1 x1t
163		y  uint8
164	}
165	var x1 x1t
166	var y1 y1t
167
168	if unsafe.Sizeof(a) != 1 {
169		throw("bad a")
170	}
171	if unsafe.Sizeof(b) != 1 {
172		throw("bad b")
173	}
174	if unsafe.Sizeof(c) != 2 {
175		throw("bad c")
176	}
177	if unsafe.Sizeof(d) != 2 {
178		throw("bad d")
179	}
180	if unsafe.Sizeof(e) != 4 {
181		throw("bad e")
182	}
183	if unsafe.Sizeof(f) != 4 {
184		throw("bad f")
185	}
186	if unsafe.Sizeof(g) != 8 {
187		throw("bad g")
188	}
189	if unsafe.Sizeof(h) != 8 {
190		throw("bad h")
191	}
192	if unsafe.Sizeof(i) != 4 {
193		throw("bad i")
194	}
195	if unsafe.Sizeof(j) != 8 {
196		throw("bad j")
197	}
198	if unsafe.Sizeof(k) != goarch.PtrSize {
199		throw("bad k")
200	}
201	if unsafe.Sizeof(l) != goarch.PtrSize {
202		throw("bad l")
203	}
204	if unsafe.Sizeof(x1) != 1 {
205		throw("bad unsafe.Sizeof x1")
206	}
207	if unsafe.Offsetof(y1.y) != 1 {
208		throw("bad offsetof y1.y")
209	}
210	if unsafe.Sizeof(y1) != 2 {
211		throw("bad unsafe.Sizeof y1")
212	}
213
214	if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
215		throw("bad timediv")
216	}
217
218	var z uint32
219	z = 1
220	if !atomic.Cas(&z, 1, 2) {
221		throw("cas1")
222	}
223	if z != 2 {
224		throw("cas2")
225	}
226
227	z = 4
228	if atomic.Cas(&z, 5, 6) {
229		throw("cas3")
230	}
231	if z != 4 {
232		throw("cas4")
233	}
234
235	z = 0xffffffff
236	if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
237		throw("cas5")
238	}
239	if z != 0xfffffffe {
240		throw("cas6")
241	}
242
243	m = [4]byte{1, 1, 1, 1}
244	atomic.Or8(&m[1], 0xf0)
245	if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
246		throw("atomicor8")
247	}
248
249	m = [4]byte{0xff, 0xff, 0xff, 0xff}
250	atomic.And8(&m[1], 0x1)
251	if m[0] != 0xff || m[1] != 0x1 || m[2] != 0xff || m[3] != 0xff {
252		throw("atomicand8")
253	}
254
255	*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
256	if j == j {
257		throw("float64nan")
258	}
259	if !(j != j) {
260		throw("float64nan1")
261	}
262
263	*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
264	if j == j1 {
265		throw("float64nan2")
266	}
267	if !(j != j1) {
268		throw("float64nan3")
269	}
270
271	*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
272	if i == i {
273		throw("float32nan")
274	}
275	if i == i {
276		throw("float32nan1")
277	}
278
279	*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
280	if i == i1 {
281		throw("float32nan2")
282	}
283	if i == i1 {
284		throw("float32nan3")
285	}
286
287	testAtomic64()
288
289	if fixedStack != round2(fixedStack) {
290		throw("FixedStack is not power-of-2")
291	}
292
293	if !checkASM() {
294		throw("assembly checks failed")
295	}
296}
297
298type dbgVar struct {
299	name   string
300	value  *int32        // for variables that can only be set at startup
301	atomic *atomic.Int32 // for variables that can be changed during execution
302	def    int32         // default value (ideally zero)
303}
304
305// Holds variables parsed from GODEBUG env var,
306// except for "memprofilerate" since there is an
307// existing int var for that value, which may
308// already have an initial value.
309var debug struct {
310	cgocheck                 int32
311	clobberfree              int32
312	disablethp               int32
313	dontfreezetheworld       int32
314	efence                   int32
315	gccheckmark              int32
316	gcpacertrace             int32
317	gcshrinkstackoff         int32
318	gcstoptheworld           int32
319	gctrace                  int32
320	invalidptr               int32
321	madvdontneed             int32 // for Linux; issue 28466
322	runtimeContentionStacks  atomic.Int32
323	scavtrace                int32
324	scheddetail              int32
325	schedtrace               int32
326	tracebackancestors       int32
327	asyncpreemptoff          int32
328	harddecommit             int32
329	adaptivestackstart       int32
330	tracefpunwindoff         int32
331	traceadvanceperiod       int32
332	traceCheckStackOwnership int32
333	profstackdepth           int32
334
335	// debug.malloc is used as a combined debug check
336	// in the malloc function and should be set
337	// if any of the below debug options is != 0.
338	malloc    bool
339	inittrace int32
340	sbrk      int32
341	// traceallocfree controls whether execution traces contain
342	// detailed trace data about memory allocation. This value
343	// affects debug.malloc only if it is != 0 and the execution
344	// tracer is enabled, in which case debug.malloc will be
345	// set to "true" if it isn't already while tracing is enabled.
346	// It will be set while the world is stopped, so it's safe.
347	// The value of traceallocfree can be changed any time in response
348	// to os.Setenv("GODEBUG").
349	traceallocfree atomic.Int32
350
351	panicnil atomic.Int32
352
353	// asynctimerchan controls whether timer channels
354	// behave asynchronously (as in Go 1.22 and earlier)
355	// instead of their Go 1.23+ synchronous behavior.
356	// The value can change at any time (in response to os.Setenv("GODEBUG"))
357	// and affects all extant timer channels immediately.
358	// Programs wouldn't normally change over an execution,
359	// but allowing it is convenient for testing and for programs
360	// that do an os.Setenv in main.init or main.main.
361	asynctimerchan atomic.Int32
362}
363
364var dbgvars = []*dbgVar{
365	{name: "adaptivestackstart", value: &debug.adaptivestackstart},
366	{name: "asyncpreemptoff", value: &debug.asyncpreemptoff},
367	{name: "asynctimerchan", atomic: &debug.asynctimerchan},
368	{name: "cgocheck", value: &debug.cgocheck},
369	{name: "clobberfree", value: &debug.clobberfree},
370	{name: "disablethp", value: &debug.disablethp},
371	{name: "dontfreezetheworld", value: &debug.dontfreezetheworld},
372	{name: "efence", value: &debug.efence},
373	{name: "gccheckmark", value: &debug.gccheckmark},
374	{name: "gcpacertrace", value: &debug.gcpacertrace},
375	{name: "gcshrinkstackoff", value: &debug.gcshrinkstackoff},
376	{name: "gcstoptheworld", value: &debug.gcstoptheworld},
377	{name: "gctrace", value: &debug.gctrace},
378	{name: "harddecommit", value: &debug.harddecommit},
379	{name: "inittrace", value: &debug.inittrace},
380	{name: "invalidptr", value: &debug.invalidptr},
381	{name: "madvdontneed", value: &debug.madvdontneed},
382	{name: "panicnil", atomic: &debug.panicnil},
383	{name: "profstackdepth", value: &debug.profstackdepth, def: 128},
384	{name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks},
385	{name: "sbrk", value: &debug.sbrk},
386	{name: "scavtrace", value: &debug.scavtrace},
387	{name: "scheddetail", value: &debug.scheddetail},
388	{name: "schedtrace", value: &debug.schedtrace},
389	{name: "traceadvanceperiod", value: &debug.traceadvanceperiod},
390	{name: "traceallocfree", atomic: &debug.traceallocfree},
391	{name: "tracecheckstackownership", value: &debug.traceCheckStackOwnership},
392	{name: "tracebackancestors", value: &debug.tracebackancestors},
393	{name: "tracefpunwindoff", value: &debug.tracefpunwindoff},
394}
395
396func parsedebugvars() {
397	// defaults
398	debug.cgocheck = 1
399	debug.invalidptr = 1
400	debug.adaptivestackstart = 1 // set this to 0 to turn larger initial goroutine stacks off
401	if GOOS == "linux" {
402		// On Linux, MADV_FREE is faster than MADV_DONTNEED,
403		// but doesn't affect many of the statistics that
404		// MADV_DONTNEED does until the memory is actually
405		// reclaimed. This generally leads to poor user
406		// experience, like confusing stats in top and other
407		// monitoring tools; and bad integration with
408		// management systems that respond to memory usage.
409		// Hence, default to MADV_DONTNEED.
410		debug.madvdontneed = 1
411	}
412	debug.traceadvanceperiod = defaultTraceAdvancePeriod
413
414	godebug := gogetenv("GODEBUG")
415
416	p := new(string)
417	*p = godebug
418	godebugEnv.Store(p)
419
420	// apply runtime defaults, if any
421	for _, v := range dbgvars {
422		if v.def != 0 {
423			// Every var should have either v.value or v.atomic set.
424			if v.value != nil {
425				*v.value = v.def
426			} else if v.atomic != nil {
427				v.atomic.Store(v.def)
428			}
429		}
430	}
431
432	// apply compile-time GODEBUG settings
433	parsegodebug(godebugDefault, nil)
434
435	// apply environment settings
436	parsegodebug(godebug, nil)
437
438	debug.malloc = (debug.inittrace | debug.sbrk) != 0
439	debug.profstackdepth = min(debug.profstackdepth, maxProfStackDepth)
440
441	setTraceback(gogetenv("GOTRACEBACK"))
442	traceback_env = traceback_cache
443}
444
445// reparsedebugvars reparses the runtime's debug variables
446// because the environment variable has been changed to env.
447func reparsedebugvars(env string) {
448	seen := make(map[string]bool)
449	// apply environment settings
450	parsegodebug(env, seen)
451	// apply compile-time GODEBUG settings for as-yet-unseen variables
452	parsegodebug(godebugDefault, seen)
453	// apply defaults for as-yet-unseen variables
454	for _, v := range dbgvars {
455		if v.atomic != nil && !seen[v.name] {
456			v.atomic.Store(0)
457		}
458	}
459}
460
461// parsegodebug parses the godebug string, updating variables listed in dbgvars.
462// If seen == nil, this is startup time and we process the string left to right
463// overwriting older settings with newer ones.
464// If seen != nil, $GODEBUG has changed and we are doing an
465// incremental update. To avoid flapping in the case where a value is
466// set multiple times (perhaps in the default and the environment,
467// or perhaps twice in the environment), we process the string right-to-left
468// and only change values not already seen. After doing this for both
469// the environment and the default settings, the caller must also call
470// cleargodebug(seen) to reset any now-unset values back to their defaults.
471func parsegodebug(godebug string, seen map[string]bool) {
472	for p := godebug; p != ""; {
473		var field string
474		if seen == nil {
475			// startup: process left to right, overwriting older settings with newer
476			i := bytealg.IndexByteString(p, ',')
477			if i < 0 {
478				field, p = p, ""
479			} else {
480				field, p = p[:i], p[i+1:]
481			}
482		} else {
483			// incremental update: process right to left, updating and skipping seen
484			i := len(p) - 1
485			for i >= 0 && p[i] != ',' {
486				i--
487			}
488			if i < 0 {
489				p, field = "", p
490			} else {
491				p, field = p[:i], p[i+1:]
492			}
493		}
494		i := bytealg.IndexByteString(field, '=')
495		if i < 0 {
496			continue
497		}
498		key, value := field[:i], field[i+1:]
499		if seen[key] {
500			continue
501		}
502		if seen != nil {
503			seen[key] = true
504		}
505
506		// Update MemProfileRate directly here since it
507		// is int, not int32, and should only be updated
508		// if specified in GODEBUG.
509		if seen == nil && key == "memprofilerate" {
510			if n, ok := atoi(value); ok {
511				MemProfileRate = n
512			}
513		} else {
514			for _, v := range dbgvars {
515				if v.name == key {
516					if n, ok := atoi32(value); ok {
517						if seen == nil && v.value != nil {
518							*v.value = n
519						} else if v.atomic != nil {
520							v.atomic.Store(n)
521						}
522					}
523				}
524			}
525		}
526	}
527
528	if debug.cgocheck > 1 {
529		throw("cgocheck > 1 mode is no longer supported at runtime. Use GOEXPERIMENT=cgocheck2 at build time instead.")
530	}
531}
532
533//go:linkname setTraceback runtime/debug.SetTraceback
534func setTraceback(level string) {
535	var t uint32
536	switch level {
537	case "none":
538		t = 0
539	case "single", "":
540		t = 1 << tracebackShift
541	case "all":
542		t = 1<<tracebackShift | tracebackAll
543	case "system":
544		t = 2<<tracebackShift | tracebackAll
545	case "crash":
546		t = 2<<tracebackShift | tracebackAll | tracebackCrash
547	case "wer":
548		if GOOS == "windows" {
549			t = 2<<tracebackShift | tracebackAll | tracebackCrash
550			enableWER()
551			break
552		}
553		fallthrough
554	default:
555		t = tracebackAll
556		if n, ok := atoi(level); ok && n == int(uint32(n)) {
557			t |= uint32(n) << tracebackShift
558		}
559	}
560	// when C owns the process, simply exit'ing the process on fatal errors
561	// and panics is surprising. Be louder and abort instead.
562	if islibrary || isarchive {
563		t |= tracebackCrash
564	}
565
566	t |= traceback_env
567
568	atomic.Store(&traceback_cache, t)
569}
570
571// Poor mans 64-bit division.
572// This is a very special function, do not use it if you are not sure what you are doing.
573// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
574// Handles overflow in a time-specific manner.
575// This keeps us within no-split stack limits on 32-bit processors.
576//
577//go:nosplit
578func timediv(v int64, div int32, rem *int32) int32 {
579	res := int32(0)
580	for bit := 30; bit >= 0; bit-- {
581		if v >= int64(div)<<uint(bit) {
582			v = v - (int64(div) << uint(bit))
583			// Before this for loop, res was 0, thus all these
584			// power of 2 increments are now just bitsets.
585			res |= 1 << uint(bit)
586		}
587	}
588	if v >= int64(div) {
589		if rem != nil {
590			*rem = 0
591		}
592		return 0x7fffffff
593	}
594	if rem != nil {
595		*rem = int32(v)
596	}
597	return res
598}
599
600// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
601
602//go:nosplit
603func acquirem() *m {
604	gp := getg()
605	gp.m.locks++
606	return gp.m
607}
608
609//go:nosplit
610func releasem(mp *m) {
611	gp := getg()
612	mp.locks--
613	if mp.locks == 0 && gp.preempt {
614		// restore the preemption request in case we've cleared it in newstack
615		gp.stackguard0 = stackPreempt
616	}
617}
618
619// reflect_typelinks is meant for package reflect,
620// but widely used packages access it using linkname.
621// Notable members of the hall of shame include:
622//   - gitee.com/quant1x/gox
623//   - github.com/goccy/json
624//   - github.com/modern-go/reflect2
625//   - github.com/vmware/govmomi
626//   - github.com/pinpoint-apm/pinpoint-go-agent
627//   - github.com/timandy/routine
628//   - github.com/v2pro/plz
629//
630// Do not remove or change the type signature.
631// See go.dev/issue/67401.
632//
633//go:linkname reflect_typelinks reflect.typelinks
634func reflect_typelinks() ([]unsafe.Pointer, [][]int32) {
635	modules := activeModules()
636	sections := []unsafe.Pointer{unsafe.Pointer(modules[0].types)}
637	ret := [][]int32{modules[0].typelinks}
638	for _, md := range modules[1:] {
639		sections = append(sections, unsafe.Pointer(md.types))
640		ret = append(ret, md.typelinks)
641	}
642	return sections, ret
643}
644
645// reflect_resolveNameOff resolves a name offset from a base pointer.
646//
647// reflect_resolveNameOff is for package reflect,
648// but widely used packages access it using linkname.
649// Notable members of the hall of shame include:
650//   - github.com/agiledragon/gomonkey/v2
651//
652// Do not remove or change the type signature.
653// See go.dev/issue/67401.
654//
655//go:linkname reflect_resolveNameOff reflect.resolveNameOff
656func reflect_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
657	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
658}
659
660// reflect_resolveTypeOff resolves an *rtype offset from a base type.
661//
662// reflect_resolveTypeOff is meant for package reflect,
663// but widely used packages access it using linkname.
664// Notable members of the hall of shame include:
665//   - gitee.com/quant1x/gox
666//   - github.com/modern-go/reflect2
667//   - github.com/v2pro/plz
668//   - github.com/timandy/routine
669//
670// Do not remove or change the type signature.
671// See go.dev/issue/67401.
672//
673//go:linkname reflect_resolveTypeOff reflect.resolveTypeOff
674func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
675	return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
676}
677
678// reflect_resolveTextOff resolves a function pointer offset from a base type.
679//
680// reflect_resolveTextOff is for package reflect,
681// but widely used packages access it using linkname.
682// Notable members of the hall of shame include:
683//   - github.com/cloudwego/frugal
684//   - github.com/agiledragon/gomonkey/v2
685//
686// Do not remove or change the type signature.
687// See go.dev/issue/67401.
688//
689//go:linkname reflect_resolveTextOff reflect.resolveTextOff
690func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
691	return toRType((*_type)(rtype)).textOff(textOff(off))
692}
693
694// reflectlite_resolveNameOff resolves a name offset from a base pointer.
695//
696//go:linkname reflectlite_resolveNameOff internal/reflectlite.resolveNameOff
697func reflectlite_resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer {
698	return unsafe.Pointer(resolveNameOff(ptrInModule, nameOff(off)).Bytes)
699}
700
701// reflectlite_resolveTypeOff resolves an *rtype offset from a base type.
702//
703//go:linkname reflectlite_resolveTypeOff internal/reflectlite.resolveTypeOff
704func reflectlite_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
705	return unsafe.Pointer(toRType((*_type)(rtype)).typeOff(typeOff(off)))
706}
707
708// reflect_addReflectOff adds a pointer to the reflection offset lookup map.
709//
710//go:linkname reflect_addReflectOff reflect.addReflectOff
711func reflect_addReflectOff(ptr unsafe.Pointer) int32 {
712	reflectOffsLock()
713	if reflectOffs.m == nil {
714		reflectOffs.m = make(map[int32]unsafe.Pointer)
715		reflectOffs.minv = make(map[unsafe.Pointer]int32)
716		reflectOffs.next = -1
717	}
718	id, found := reflectOffs.minv[ptr]
719	if !found {
720		id = reflectOffs.next
721		reflectOffs.next-- // use negative offsets as IDs to aid debugging
722		reflectOffs.m[id] = ptr
723		reflectOffs.minv[ptr] = id
724	}
725	reflectOffsUnlock()
726	return id
727}
728