1// Copyright 2022 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5//go:build ignore
6
7// mklockrank records the static rank graph of the locks in the
8// runtime and generates the rank checking structures in lockrank.go.
9package main
10
11import (
12	"bytes"
13	"flag"
14	"fmt"
15	"go/format"
16	"internal/dag"
17	"io"
18	"log"
19	"os"
20	"strings"
21)
22
23// ranks describes the lock rank graph. See "go doc internal/dag" for
24// the syntax.
25//
26// "a < b" means a must be acquired before b if both are held
27// (or, if b is held, a cannot be acquired).
28//
29// "NONE < a" means no locks may be held when a is acquired.
30//
31// If a lock is not given a rank, then it is assumed to be a leaf
32// lock, which means no other lock can be acquired while it is held.
33// Therefore, leaf locks do not need to be given an explicit rank.
34//
35// Ranks in all caps are pseudo-nodes that help define order, but do
36// not actually define a rank.
37//
38// TODO: It's often hard to correlate rank names to locks. Change
39// these to be more consistent with the locks they label.
40const ranks = `
41# Sysmon
42NONE
43< sysmon
44< scavenge, forcegc;
45
46# Defer
47NONE < defer;
48
49# GC
50NONE <
51  sweepWaiters,
52  assistQueue,
53  sweep;
54
55# Test only
56NONE < testR, testW;
57
58NONE < timerSend;
59
60# Scheduler, timers, netpoll
61NONE < allocmW, execW, cpuprof, pollCache, pollDesc, wakeableSleep;
62scavenge, sweep, testR, wakeableSleep, timerSend < hchan;
63assistQueue,
64  cpuprof,
65  forcegc,
66  hchan,
67  pollDesc, # pollDesc can interact with timers, which can lock sched.
68  scavenge,
69  sweep,
70  sweepWaiters,
71  testR,
72  wakeableSleep
73# Above SCHED are things that can call into the scheduler.
74< SCHED
75# Below SCHED is the scheduler implementation.
76< allocmR,
77  execR;
78allocmR, execR, hchan < sched;
79sched < allg, allp;
80
81# Channels
82NONE < notifyList;
83hchan, notifyList < sudog;
84
85hchan, pollDesc, wakeableSleep < timers;
86timers, timerSend < timer < netpollInit;
87
88# Semaphores
89NONE < root;
90
91# Itabs
92NONE
93< itab
94< reflectOffs;
95
96# User arena state
97NONE < userArenaState;
98
99# Tracing without a P uses a global trace buffer.
100scavenge
101# Above TRACEGLOBAL can emit a trace event without a P.
102< TRACEGLOBAL
103# Below TRACEGLOBAL manages the global tracing buffer.
104# Note that traceBuf eventually chains to MALLOC, but we never get that far
105# in the situation where there's no P.
106< traceBuf;
107# Starting/stopping tracing traces strings.
108traceBuf < traceStrings;
109
110# Malloc
111allg,
112  allocmR,
113  allp, # procresize
114  execR, # May grow stack
115  execW, # May allocate after BeforeFork
116  hchan,
117  notifyList,
118  reflectOffs,
119  timer,
120  traceStrings,
121  userArenaState
122# Above MALLOC are things that can allocate memory.
123< MALLOC
124# Below MALLOC is the malloc implementation.
125< fin,
126  spanSetSpine,
127  mspanSpecial,
128  traceTypeTab,
129  MPROF;
130
131# We can acquire gcBitsArenas for pinner bits, and
132# it's guarded by mspanSpecial.
133MALLOC, mspanSpecial < gcBitsArenas;
134
135# Memory profiling
136MPROF < profInsert, profBlock, profMemActive;
137profMemActive < profMemFuture;
138
139# Stack allocation and copying
140gcBitsArenas,
141  netpollInit,
142  profBlock,
143  profInsert,
144  profMemFuture,
145  spanSetSpine,
146  fin,
147  root
148# Anything that can grow the stack can acquire STACKGROW.
149# (Most higher layers imply STACKGROW, like MALLOC.)
150< STACKGROW
151# Below STACKGROW is the stack allocator/copying implementation.
152< gscan;
153gscan < stackpool;
154gscan < stackLarge;
155# Generally, hchan must be acquired before gscan. But in one case,
156# where we suspend a G and then shrink its stack, syncadjustsudogs
157# can acquire hchan locks while holding gscan. To allow this case,
158# we use hchanLeaf instead of hchan.
159gscan < hchanLeaf;
160
161# Write barrier
162defer,
163  gscan,
164  mspanSpecial,
165  pollCache,
166  sudog,
167  timer
168# Anything that can have write barriers can acquire WB.
169# Above WB, we can have write barriers.
170< WB
171# Below WB is the write barrier implementation.
172< wbufSpans;
173
174# Span allocator
175stackLarge,
176  stackpool,
177  wbufSpans
178# Above mheap is anything that can call the span allocator.
179< mheap;
180# Below mheap is the span allocator implementation.
181#
182# Specials: we're allowed to allocate a special while holding
183# an mspanSpecial lock, and they're part of the malloc implementation.
184# Pinner bits might be freed by the span allocator.
185mheap, mspanSpecial < mheapSpecial;
186mheap, mheapSpecial < globalAlloc;
187
188# Execution tracer events (with a P)
189hchan,
190  mheap,
191  root,
192  sched,
193  traceStrings,
194  notifyList,
195  fin
196# Above TRACE is anything that can create a trace event
197< TRACE
198< trace
199< traceStackTab;
200
201# panic is handled specially. It is implicitly below all other locks.
202NONE < panic;
203# deadlock is not acquired while holding panic, but it also needs to be
204# below all other locks.
205panic < deadlock;
206# raceFini is only held while exiting.
207panic < raceFini;
208
209# RWMutex internal read lock
210
211allocmR,
212  allocmW
213< allocmRInternal;
214
215execR,
216  execW
217< execRInternal;
218
219testR,
220  testW
221< testRInternal;
222`
223
224// cyclicRanks lists lock ranks that allow multiple locks of the same
225// rank to be acquired simultaneously. The runtime enforces ordering
226// within these ranks using a separate mechanism.
227var cyclicRanks = map[string]bool{
228	// Multiple timers are locked simultaneously in destroy().
229	"timers": true,
230	// Multiple hchans are acquired in hchan.sortkey() order in
231	// select.
232	"hchan": true,
233	// Multiple hchanLeafs are acquired in hchan.sortkey() order in
234	// syncadjustsudogs().
235	"hchanLeaf": true,
236	// The point of the deadlock lock is to deadlock.
237	"deadlock": true,
238}
239
240func main() {
241	flagO := flag.String("o", "", "write to `file` instead of stdout")
242	flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
243	flag.Parse()
244	if flag.NArg() != 0 {
245		fmt.Fprintf(os.Stderr, "too many arguments")
246		os.Exit(2)
247	}
248
249	g, err := dag.Parse(ranks)
250	if err != nil {
251		log.Fatal(err)
252	}
253
254	var out []byte
255	if *flagDot {
256		var b bytes.Buffer
257		g.TransitiveReduction()
258		// Add cyclic edges for visualization.
259		for k := range cyclicRanks {
260			g.AddEdge(k, k)
261		}
262		// Reverse the graph. It's much easier to read this as
263		// a "<" partial order than a ">" partial order. This
264		// ways, locks are acquired from the top going down
265		// and time moves forward over the edges instead of
266		// backward.
267		g.Transpose()
268		generateDot(&b, g)
269		out = b.Bytes()
270	} else {
271		var b bytes.Buffer
272		generateGo(&b, g)
273		out, err = format.Source(b.Bytes())
274		if err != nil {
275			log.Fatal(err)
276		}
277	}
278
279	if *flagO != "" {
280		err = os.WriteFile(*flagO, out, 0666)
281	} else {
282		_, err = os.Stdout.Write(out)
283	}
284	if err != nil {
285		log.Fatal(err)
286	}
287}
288
289func generateGo(w io.Writer, g *dag.Graph) {
290	fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
291
292package runtime
293
294type lockRank int
295
296`)
297
298	// Create numeric ranks.
299	topo := g.Topo()
300	for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
301		topo[i], topo[j] = topo[j], topo[i]
302	}
303	fmt.Fprintf(w, `
304// Constants representing the ranks of all non-leaf runtime locks, in rank order.
305// Locks with lower rank must be taken before locks with higher rank,
306// in addition to satisfying the partial order in lockPartialOrder.
307// A few ranks allow self-cycles, which are specified in lockPartialOrder.
308const (
309	lockRankUnknown lockRank = iota
310
311`)
312	for _, rank := range topo {
313		if isPseudo(rank) {
314			fmt.Fprintf(w, "\t// %s\n", rank)
315		} else {
316			fmt.Fprintf(w, "\t%s\n", cname(rank))
317		}
318	}
319	fmt.Fprintf(w, `)
320
321// lockRankLeafRank is the rank of lock that does not have a declared rank,
322// and hence is a leaf lock.
323const lockRankLeafRank lockRank = 1000
324`)
325
326	// Create string table.
327	fmt.Fprintf(w, `
328// lockNames gives the names associated with each of the above ranks.
329var lockNames = []string{
330`)
331	for _, rank := range topo {
332		if !isPseudo(rank) {
333			fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
334		}
335	}
336	fmt.Fprintf(w, `}
337
338func (rank lockRank) String() string {
339	if rank == 0 {
340		return "UNKNOWN"
341	}
342	if rank == lockRankLeafRank {
343		return "LEAF"
344	}
345	if rank < 0 || int(rank) >= len(lockNames) {
346		return "BAD RANK"
347	}
348	return lockNames[rank]
349}
350`)
351
352	// Create partial order structure.
353	fmt.Fprintf(w, `
354// lockPartialOrder is the transitive closure of the lock rank graph.
355// An entry for rank X lists all of the ranks that can already be held
356// when rank X is acquired.
357//
358// Lock ranks that allow self-cycles list themselves.
359var lockPartialOrder [][]lockRank = [][]lockRank{
360`)
361	for _, rank := range topo {
362		if isPseudo(rank) {
363			continue
364		}
365		list := []string{}
366		for _, before := range g.Edges(rank) {
367			if !isPseudo(before) {
368				list = append(list, cname(before))
369			}
370		}
371		if cyclicRanks[rank] {
372			list = append(list, cname(rank))
373		}
374
375		fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
376	}
377	fmt.Fprintf(w, "}\n")
378}
379
380// cname returns the Go const name for the given lock rank label.
381func cname(label string) string {
382	return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
383}
384
385func isPseudo(label string) bool {
386	return strings.ToUpper(label) == label
387}
388
389// generateDot emits a Graphviz dot representation of g to w.
390func generateDot(w io.Writer, g *dag.Graph) {
391	fmt.Fprintf(w, "digraph g {\n")
392
393	// Define all nodes.
394	for _, node := range g.Nodes {
395		fmt.Fprintf(w, "%q;\n", node)
396	}
397
398	// Create edges.
399	for _, node := range g.Nodes {
400		for _, to := range g.Edges(node) {
401			fmt.Fprintf(w, "%q -> %q;\n", node, to)
402		}
403	}
404
405	fmt.Fprintf(w, "}\n")
406}
407