1// Copyright 2017 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package main
6
7import (
8	"os"
9	"runtime"
10	"sync"
11	"time"
12)
13
14var mainTID int
15
16func init() {
17	registerInit("LockOSThreadMain", func() {
18		// init is guaranteed to run on the main thread.
19		mainTID = gettid()
20	})
21	register("LockOSThreadMain", LockOSThreadMain)
22
23	registerInit("LockOSThreadAlt", func() {
24		// Lock the OS thread now so main runs on the main thread.
25		runtime.LockOSThread()
26	})
27	register("LockOSThreadAlt", LockOSThreadAlt)
28
29	registerInit("LockOSThreadAvoidsStatePropagation", func() {
30		// Lock the OS thread now so main runs on the main thread.
31		runtime.LockOSThread()
32	})
33	register("LockOSThreadAvoidsStatePropagation", LockOSThreadAvoidsStatePropagation)
34	register("LockOSThreadTemplateThreadRace", LockOSThreadTemplateThreadRace)
35}
36
37func LockOSThreadMain() {
38	// gettid only works on Linux, so on other platforms this just
39	// checks that the runtime doesn't do anything terrible.
40
41	// This requires GOMAXPROCS=1 from the beginning to reliably
42	// start a goroutine on the main thread.
43	if runtime.GOMAXPROCS(-1) != 1 {
44		println("requires GOMAXPROCS=1")
45		os.Exit(1)
46	}
47
48	ready := make(chan bool, 1)
49	go func() {
50		// Because GOMAXPROCS=1, this *should* be on the main
51		// thread. Stay there.
52		runtime.LockOSThread()
53		if mainTID != 0 && gettid() != mainTID {
54			println("failed to start goroutine on main thread")
55			os.Exit(1)
56		}
57		// Exit with the thread locked, which should exit the
58		// main thread.
59		ready <- true
60	}()
61	<-ready
62	time.Sleep(1 * time.Millisecond)
63	// Check that this goroutine is still running on a different
64	// thread.
65	if mainTID != 0 && gettid() == mainTID {
66		println("goroutine migrated to locked thread")
67		os.Exit(1)
68	}
69	println("OK")
70}
71
72func LockOSThreadAlt() {
73	// This is running locked to the main OS thread.
74
75	var subTID int
76	ready := make(chan bool, 1)
77	go func() {
78		// This goroutine must be running on a new thread.
79		runtime.LockOSThread()
80		subTID = gettid()
81		ready <- true
82		// Exit with the thread locked.
83	}()
84	<-ready
85	runtime.UnlockOSThread()
86	for i := 0; i < 100; i++ {
87		time.Sleep(1 * time.Millisecond)
88		// Check that this goroutine is running on a different thread.
89		if subTID != 0 && gettid() == subTID {
90			println("locked thread reused")
91			os.Exit(1)
92		}
93		exists, supported, err := tidExists(subTID)
94		if err != nil {
95			println("error:", err.Error())
96			return
97		}
98		if !supported || !exists {
99			goto ok
100		}
101	}
102	println("sub thread", subTID, "still running")
103	return
104ok:
105	println("OK")
106}
107
108func LockOSThreadAvoidsStatePropagation() {
109	// This test is similar to LockOSThreadAlt in that it will detect if a thread
110	// which should have died is still running. However, rather than do this with
111	// thread IDs, it does this by unsharing state on that thread. This way, it
112	// also detects whether new threads were cloned from the dead thread, and not
113	// from a clean thread. Cloning from a locked thread is undesirable since
114	// cloned threads will inherit potentially unwanted OS state.
115	//
116	// unshareFs, getcwd, and chdir("/tmp") are only guaranteed to work on
117	// Linux, so on other platforms this just checks that the runtime doesn't
118	// do anything terrible.
119	//
120	// This is running locked to the main OS thread.
121
122	// GOMAXPROCS=1 makes this fail much more reliably if a tainted thread is
123	// cloned from.
124	if runtime.GOMAXPROCS(-1) != 1 {
125		println("requires GOMAXPROCS=1")
126		os.Exit(1)
127	}
128
129	if err := chdir("/"); err != nil {
130		println("failed to chdir:", err.Error())
131		os.Exit(1)
132	}
133	// On systems other than Linux, cwd == "".
134	cwd, err := getcwd()
135	if err != nil {
136		println("failed to get cwd:", err.Error())
137		os.Exit(1)
138	}
139	if cwd != "" && cwd != "/" {
140		println("unexpected cwd", cwd, " wanted /")
141		os.Exit(1)
142	}
143
144	ready := make(chan bool, 1)
145	go func() {
146		// This goroutine must be running on a new thread.
147		runtime.LockOSThread()
148
149		// Unshare details about the FS, like the CWD, with
150		// the rest of the process on this thread.
151		// On systems other than Linux, this is a no-op.
152		if err := unshareFs(); err != nil {
153			if err == errNotPermitted {
154				println("unshare not permitted")
155				os.Exit(0)
156			}
157			println("failed to unshare fs:", err.Error())
158			os.Exit(1)
159		}
160		// Chdir to somewhere else on this thread.
161		// On systems other than Linux, this is a no-op.
162		if err := chdir(os.TempDir()); err != nil {
163			println("failed to chdir:", err.Error())
164			os.Exit(1)
165		}
166
167		// The state on this thread is now considered "tainted", but it
168		// should no longer be observable in any other context.
169
170		ready <- true
171		// Exit with the thread locked.
172	}()
173	<-ready
174
175	// Spawn yet another goroutine and lock it. Since GOMAXPROCS=1, if
176	// for some reason state from the (hopefully dead) locked thread above
177	// propagated into a newly created thread (via clone), or that thread
178	// is actually being re-used, then we should get scheduled on such a
179	// thread with high likelihood.
180	done := make(chan bool)
181	go func() {
182		runtime.LockOSThread()
183
184		// Get the CWD and check if this is the same as the main thread's
185		// CWD. Every thread should share the same CWD.
186		// On systems other than Linux, wd == "".
187		wd, err := getcwd()
188		if err != nil {
189			println("failed to get cwd:", err.Error())
190			os.Exit(1)
191		}
192		if wd != cwd {
193			println("bad state from old thread propagated after it should have died")
194			os.Exit(1)
195		}
196		<-done
197
198		runtime.UnlockOSThread()
199	}()
200	done <- true
201	runtime.UnlockOSThread()
202	println("OK")
203}
204
205func LockOSThreadTemplateThreadRace() {
206	// This test attempts to reproduce the race described in
207	// golang.org/issue/38931. To do so, we must have a stop-the-world
208	// (achieved via ReadMemStats) racing with two LockOSThread calls.
209	//
210	// While this test attempts to line up the timing, it is only expected
211	// to fail (and thus hang) around 2% of the time if the race is
212	// present.
213
214	// Ensure enough Ps to actually run everything in parallel. Though on
215	// <4 core machines, we are still at the whim of the kernel scheduler.
216	runtime.GOMAXPROCS(4)
217
218	go func() {
219		// Stop the world; race with LockOSThread below.
220		var m runtime.MemStats
221		for {
222			runtime.ReadMemStats(&m)
223		}
224	}()
225
226	// Try to synchronize both LockOSThreads.
227	start := time.Now().Add(10 * time.Millisecond)
228
229	var wg sync.WaitGroup
230	wg.Add(2)
231
232	for i := 0; i < 2; i++ {
233		go func() {
234			for time.Now().Before(start) {
235			}
236
237			// Add work to the local runq to trigger early startm
238			// in handoffp.
239			go func() {}()
240
241			runtime.LockOSThread()
242			runtime.Gosched() // add a preemption point.
243			wg.Done()
244		}()
245	}
246
247	wg.Wait()
248	// If both LockOSThreads completed then we did not hit the race.
249	println("OK")
250}
251