1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package runtime_test
6
7import (
8	"internal/testenv"
9	"math"
10	"runtime"
11	"sync"
12	"sync/atomic"
13	"testing"
14	"time"
15)
16
17func TestChan(t *testing.T) {
18	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
19	N := 200
20	if testing.Short() {
21		N = 20
22	}
23	for chanCap := 0; chanCap < N; chanCap++ {
24		{
25			// Ensure that receive from empty chan blocks.
26			c := make(chan int, chanCap)
27			recv1 := false
28			go func() {
29				_ = <-c
30				recv1 = true
31			}()
32			recv2 := false
33			go func() {
34				_, _ = <-c
35				recv2 = true
36			}()
37			time.Sleep(time.Millisecond)
38			if recv1 || recv2 {
39				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
40			}
41			// Ensure that non-blocking receive does not block.
42			select {
43			case _ = <-c:
44				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
45			default:
46			}
47			select {
48			case _, _ = <-c:
49				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
50			default:
51			}
52			c <- 0
53			c <- 0
54		}
55
56		{
57			// Ensure that send to full chan blocks.
58			c := make(chan int, chanCap)
59			for i := 0; i < chanCap; i++ {
60				c <- i
61			}
62			sent := uint32(0)
63			go func() {
64				c <- 0
65				atomic.StoreUint32(&sent, 1)
66			}()
67			time.Sleep(time.Millisecond)
68			if atomic.LoadUint32(&sent) != 0 {
69				t.Fatalf("chan[%d]: send to full chan", chanCap)
70			}
71			// Ensure that non-blocking send does not block.
72			select {
73			case c <- 0:
74				t.Fatalf("chan[%d]: send to full chan", chanCap)
75			default:
76			}
77			<-c
78		}
79
80		{
81			// Ensure that we receive 0 from closed chan.
82			c := make(chan int, chanCap)
83			for i := 0; i < chanCap; i++ {
84				c <- i
85			}
86			close(c)
87			for i := 0; i < chanCap; i++ {
88				v := <-c
89				if v != i {
90					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
91				}
92			}
93			if v := <-c; v != 0 {
94				t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0)
95			}
96			if v, ok := <-c; v != 0 || ok {
97				t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false)
98			}
99		}
100
101		{
102			// Ensure that close unblocks receive.
103			c := make(chan int, chanCap)
104			done := make(chan bool)
105			go func() {
106				v, ok := <-c
107				done <- v == 0 && ok == false
108			}()
109			time.Sleep(time.Millisecond)
110			close(c)
111			if !<-done {
112				t.Fatalf("chan[%d]: received non zero from closed chan", chanCap)
113			}
114		}
115
116		{
117			// Send 100 integers,
118			// ensure that we receive them non-corrupted in FIFO order.
119			c := make(chan int, chanCap)
120			go func() {
121				for i := 0; i < 100; i++ {
122					c <- i
123				}
124			}()
125			for i := 0; i < 100; i++ {
126				v := <-c
127				if v != i {
128					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
129				}
130			}
131
132			// Same, but using recv2.
133			go func() {
134				for i := 0; i < 100; i++ {
135					c <- i
136				}
137			}()
138			for i := 0; i < 100; i++ {
139				v, ok := <-c
140				if !ok {
141					t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i)
142				}
143				if v != i {
144					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
145				}
146			}
147
148			// Send 1000 integers in 4 goroutines,
149			// ensure that we receive what we send.
150			const P = 4
151			const L = 1000
152			for p := 0; p < P; p++ {
153				go func() {
154					for i := 0; i < L; i++ {
155						c <- i
156					}
157				}()
158			}
159			done := make(chan map[int]int)
160			for p := 0; p < P; p++ {
161				go func() {
162					recv := make(map[int]int)
163					for i := 0; i < L; i++ {
164						v := <-c
165						recv[v] = recv[v] + 1
166					}
167					done <- recv
168				}()
169			}
170			recv := make(map[int]int)
171			for p := 0; p < P; p++ {
172				for k, v := range <-done {
173					recv[k] = recv[k] + v
174				}
175			}
176			if len(recv) != L {
177				t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L)
178			}
179			for _, v := range recv {
180				if v != P {
181					t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P)
182				}
183			}
184		}
185
186		{
187			// Test len/cap.
188			c := make(chan int, chanCap)
189			if len(c) != 0 || cap(c) != chanCap {
190				t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c))
191			}
192			for i := 0; i < chanCap; i++ {
193				c <- i
194			}
195			if len(c) != chanCap || cap(c) != chanCap {
196				t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c))
197			}
198		}
199
200	}
201}
202
203func TestNonblockRecvRace(t *testing.T) {
204	n := 10000
205	if testing.Short() {
206		n = 100
207	}
208	for i := 0; i < n; i++ {
209		c := make(chan int, 1)
210		c <- 1
211		go func() {
212			select {
213			case <-c:
214			default:
215				t.Error("chan is not ready")
216			}
217		}()
218		close(c)
219		<-c
220		if t.Failed() {
221			return
222		}
223	}
224}
225
226// This test checks that select acts on the state of the channels at one
227// moment in the execution, not over a smeared time window.
228// In the test, one goroutine does:
229//
230//	create c1, c2
231//	make c1 ready for receiving
232//	create second goroutine
233//	make c2 ready for receiving
234//	make c1 no longer ready for receiving (if possible)
235//
236// The second goroutine does a non-blocking select receiving from c1 and c2.
237// From the time the second goroutine is created, at least one of c1 and c2
238// is always ready for receiving, so the select in the second goroutine must
239// always receive from one or the other. It must never execute the default case.
240func TestNonblockSelectRace(t *testing.T) {
241	n := 100000
242	if testing.Short() {
243		n = 1000
244	}
245	done := make(chan bool, 1)
246	for i := 0; i < n; i++ {
247		c1 := make(chan int, 1)
248		c2 := make(chan int, 1)
249		c1 <- 1
250		go func() {
251			select {
252			case <-c1:
253			case <-c2:
254			default:
255				done <- false
256				return
257			}
258			done <- true
259		}()
260		c2 <- 1
261		select {
262		case <-c1:
263		default:
264		}
265		if !<-done {
266			t.Fatal("no chan is ready")
267		}
268	}
269}
270
271// Same as TestNonblockSelectRace, but close(c2) replaces c2 <- 1.
272func TestNonblockSelectRace2(t *testing.T) {
273	n := 100000
274	if testing.Short() {
275		n = 1000
276	}
277	done := make(chan bool, 1)
278	for i := 0; i < n; i++ {
279		c1 := make(chan int, 1)
280		c2 := make(chan int)
281		c1 <- 1
282		go func() {
283			select {
284			case <-c1:
285			case <-c2:
286			default:
287				done <- false
288				return
289			}
290			done <- true
291		}()
292		close(c2)
293		select {
294		case <-c1:
295		default:
296		}
297		if !<-done {
298			t.Fatal("no chan is ready")
299		}
300	}
301}
302
303func TestSelfSelect(t *testing.T) {
304	// Ensure that send/recv on the same chan in select
305	// does not crash nor deadlock.
306	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
307	for _, chanCap := range []int{0, 10} {
308		var wg sync.WaitGroup
309		wg.Add(2)
310		c := make(chan int, chanCap)
311		for p := 0; p < 2; p++ {
312			p := p
313			go func() {
314				defer wg.Done()
315				for i := 0; i < 1000; i++ {
316					if p == 0 || i%2 == 0 {
317						select {
318						case c <- p:
319						case v := <-c:
320							if chanCap == 0 && v == p {
321								t.Errorf("self receive")
322								return
323							}
324						}
325					} else {
326						select {
327						case v := <-c:
328							if chanCap == 0 && v == p {
329								t.Errorf("self receive")
330								return
331							}
332						case c <- p:
333						}
334					}
335				}
336			}()
337		}
338		wg.Wait()
339	}
340}
341
342func TestSelectStress(t *testing.T) {
343	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
344	var c [4]chan int
345	c[0] = make(chan int)
346	c[1] = make(chan int)
347	c[2] = make(chan int, 2)
348	c[3] = make(chan int, 3)
349	N := int(1e5)
350	if testing.Short() {
351		N /= 10
352	}
353	// There are 4 goroutines that send N values on each of the chans,
354	// + 4 goroutines that receive N values on each of the chans,
355	// + 1 goroutine that sends N values on each of the chans in a single select,
356	// + 1 goroutine that receives N values on each of the chans in a single select.
357	// All these sends, receives and selects interact chaotically at runtime,
358	// but we are careful that this whole construct does not deadlock.
359	var wg sync.WaitGroup
360	wg.Add(10)
361	for k := 0; k < 4; k++ {
362		k := k
363		go func() {
364			for i := 0; i < N; i++ {
365				c[k] <- 0
366			}
367			wg.Done()
368		}()
369		go func() {
370			for i := 0; i < N; i++ {
371				<-c[k]
372			}
373			wg.Done()
374		}()
375	}
376	go func() {
377		var n [4]int
378		c1 := c
379		for i := 0; i < 4*N; i++ {
380			select {
381			case c1[3] <- 0:
382				n[3]++
383				if n[3] == N {
384					c1[3] = nil
385				}
386			case c1[2] <- 0:
387				n[2]++
388				if n[2] == N {
389					c1[2] = nil
390				}
391			case c1[0] <- 0:
392				n[0]++
393				if n[0] == N {
394					c1[0] = nil
395				}
396			case c1[1] <- 0:
397				n[1]++
398				if n[1] == N {
399					c1[1] = nil
400				}
401			}
402		}
403		wg.Done()
404	}()
405	go func() {
406		var n [4]int
407		c1 := c
408		for i := 0; i < 4*N; i++ {
409			select {
410			case <-c1[0]:
411				n[0]++
412				if n[0] == N {
413					c1[0] = nil
414				}
415			case <-c1[1]:
416				n[1]++
417				if n[1] == N {
418					c1[1] = nil
419				}
420			case <-c1[2]:
421				n[2]++
422				if n[2] == N {
423					c1[2] = nil
424				}
425			case <-c1[3]:
426				n[3]++
427				if n[3] == N {
428					c1[3] = nil
429				}
430			}
431		}
432		wg.Done()
433	}()
434	wg.Wait()
435}
436
437func TestSelectFairness(t *testing.T) {
438	const trials = 10000
439	if runtime.GOOS == "linux" && runtime.GOARCH == "ppc64le" {
440		testenv.SkipFlaky(t, 22047)
441	}
442	c1 := make(chan byte, trials+1)
443	c2 := make(chan byte, trials+1)
444	for i := 0; i < trials+1; i++ {
445		c1 <- 1
446		c2 <- 2
447	}
448	c3 := make(chan byte)
449	c4 := make(chan byte)
450	out := make(chan byte)
451	done := make(chan byte)
452	var wg sync.WaitGroup
453	wg.Add(1)
454	go func() {
455		defer wg.Done()
456		for {
457			var b byte
458			select {
459			case b = <-c3:
460			case b = <-c4:
461			case b = <-c1:
462			case b = <-c2:
463			}
464			select {
465			case out <- b:
466			case <-done:
467				return
468			}
469		}
470	}()
471	cnt1, cnt2 := 0, 0
472	for i := 0; i < trials; i++ {
473		switch b := <-out; b {
474		case 1:
475			cnt1++
476		case 2:
477			cnt2++
478		default:
479			t.Fatalf("unexpected value %d on channel", b)
480		}
481	}
482	// If the select in the goroutine is fair,
483	// cnt1 and cnt2 should be about the same value.
484	// See if we're more than 10 sigma away from the expected value.
485	// 10 sigma is a lot, but we're ok with some systematic bias as
486	// long as it isn't too severe.
487	const mean = trials * 0.5
488	const variance = trials * 0.5 * (1 - 0.5)
489	stddev := math.Sqrt(variance)
490	if math.Abs(float64(cnt1-mean)) > 10*stddev {
491		t.Errorf("unfair select: in %d trials, results were %d, %d", trials, cnt1, cnt2)
492	}
493	close(done)
494	wg.Wait()
495}
496
497func TestChanSendInterface(t *testing.T) {
498	type mt struct{}
499	m := &mt{}
500	c := make(chan any, 1)
501	c <- m
502	select {
503	case c <- m:
504	default:
505	}
506	select {
507	case c <- m:
508	case c <- &mt{}:
509	default:
510	}
511}
512
513func TestPseudoRandomSend(t *testing.T) {
514	n := 100
515	for _, chanCap := range []int{0, n} {
516		c := make(chan int, chanCap)
517		l := make([]int, n)
518		var m sync.Mutex
519		m.Lock()
520		go func() {
521			for i := 0; i < n; i++ {
522				runtime.Gosched()
523				l[i] = <-c
524			}
525			m.Unlock()
526		}()
527		for i := 0; i < n; i++ {
528			select {
529			case c <- 1:
530			case c <- 0:
531			}
532		}
533		m.Lock() // wait
534		n0 := 0
535		n1 := 0
536		for _, i := range l {
537			n0 += (i + 1) % 2
538			n1 += i
539		}
540		if n0 <= n/10 || n1 <= n/10 {
541			t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap)
542		}
543	}
544}
545
546func TestMultiConsumer(t *testing.T) {
547	const nwork = 23
548	const niter = 271828
549
550	pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31}
551
552	q := make(chan int, nwork*3)
553	r := make(chan int, nwork*3)
554
555	// workers
556	var wg sync.WaitGroup
557	for i := 0; i < nwork; i++ {
558		wg.Add(1)
559		go func(w int) {
560			for v := range q {
561				// mess with the fifo-ish nature of range
562				if pn[w%len(pn)] == v {
563					runtime.Gosched()
564				}
565				r <- v
566			}
567			wg.Done()
568		}(i)
569	}
570
571	// feeder & closer
572	expect := 0
573	go func() {
574		for i := 0; i < niter; i++ {
575			v := pn[i%len(pn)]
576			expect += v
577			q <- v
578		}
579		close(q)  // no more work
580		wg.Wait() // workers done
581		close(r)  // ... so there can be no more results
582	}()
583
584	// consume & check
585	n := 0
586	s := 0
587	for v := range r {
588		n++
589		s += v
590	}
591	if n != niter || s != expect {
592		t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)",
593			expect, s, niter, n)
594	}
595}
596
597func TestShrinkStackDuringBlockedSend(t *testing.T) {
598	// make sure that channel operations still work when we are
599	// blocked on a channel send and we shrink the stack.
600	// NOTE: this test probably won't fail unless stack1.go:stackDebug
601	// is set to >= 1.
602	const n = 10
603	c := make(chan int)
604	done := make(chan struct{})
605
606	go func() {
607		for i := 0; i < n; i++ {
608			c <- i
609			// use lots of stack, briefly.
610			stackGrowthRecursive(20)
611		}
612		done <- struct{}{}
613	}()
614
615	for i := 0; i < n; i++ {
616		x := <-c
617		if x != i {
618			t.Errorf("bad channel read: want %d, got %d", i, x)
619		}
620		// Waste some time so sender can finish using lots of stack
621		// and block in channel send.
622		time.Sleep(1 * time.Millisecond)
623		// trigger GC which will shrink the stack of the sender.
624		runtime.GC()
625	}
626	<-done
627}
628
629func TestNoShrinkStackWhileParking(t *testing.T) {
630	if runtime.GOOS == "netbsd" && runtime.GOARCH == "arm64" {
631		testenv.SkipFlaky(t, 49382)
632	}
633	if runtime.GOOS == "openbsd" {
634		testenv.SkipFlaky(t, 51482)
635	}
636
637	// The goal of this test is to trigger a "racy sudog adjustment"
638	// throw. Basically, there's a window between when a goroutine
639	// becomes available for preemption for stack scanning (and thus,
640	// stack shrinking) but before the goroutine has fully parked on a
641	// channel. See issue 40641 for more details on the problem.
642	//
643	// The way we try to induce this failure is to set up two
644	// goroutines: a sender and a receiver that communicate across
645	// a channel. We try to set up a situation where the sender
646	// grows its stack temporarily then *fully* blocks on a channel
647	// often. Meanwhile a GC is triggered so that we try to get a
648	// mark worker to shrink the sender's stack and race with the
649	// sender parking.
650	//
651	// Unfortunately the race window here is so small that we
652	// either need a ridiculous number of iterations, or we add
653	// "usleep(1000)" to park_m, just before the unlockf call.
654	const n = 10
655	send := func(c chan<- int, done chan struct{}) {
656		for i := 0; i < n; i++ {
657			c <- i
658			// Use lots of stack briefly so that
659			// the GC is going to want to shrink us
660			// when it scans us. Make sure not to
661			// do any function calls otherwise
662			// in order to avoid us shrinking ourselves
663			// when we're preempted.
664			stackGrowthRecursive(20)
665		}
666		done <- struct{}{}
667	}
668	recv := func(c <-chan int, done chan struct{}) {
669		for i := 0; i < n; i++ {
670			// Sleep here so that the sender always
671			// fully blocks.
672			time.Sleep(10 * time.Microsecond)
673			<-c
674		}
675		done <- struct{}{}
676	}
677	for i := 0; i < n*20; i++ {
678		c := make(chan int)
679		done := make(chan struct{})
680		go recv(c, done)
681		go send(c, done)
682		// Wait a little bit before triggering
683		// the GC to make sure the sender and
684		// receiver have gotten into their groove.
685		time.Sleep(50 * time.Microsecond)
686		runtime.GC()
687		<-done
688		<-done
689	}
690}
691
692func TestSelectDuplicateChannel(t *testing.T) {
693	// This test makes sure we can queue a G on
694	// the same channel multiple times.
695	c := make(chan int)
696	d := make(chan int)
697	e := make(chan int)
698
699	// goroutine A
700	go func() {
701		select {
702		case <-c:
703		case <-c:
704		case <-d:
705		}
706		e <- 9
707	}()
708	time.Sleep(time.Millisecond) // make sure goroutine A gets queued first on c
709
710	// goroutine B
711	go func() {
712		<-c
713	}()
714	time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing
715
716	d <- 7 // wake up A, it dequeues itself from c.  This operation used to corrupt c.recvq.
717	<-e    // A tells us it's done
718	c <- 8 // wake up B.  This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B)
719}
720
721func TestSelectStackAdjust(t *testing.T) {
722	// Test that channel receive slots that contain local stack
723	// pointers are adjusted correctly by stack shrinking.
724	c := make(chan *int)
725	d := make(chan *int)
726	ready1 := make(chan bool)
727	ready2 := make(chan bool)
728
729	f := func(ready chan bool, dup bool) {
730		// Temporarily grow the stack to 10K.
731		stackGrowthRecursive((10 << 10) / (128 * 8))
732
733		// We're ready to trigger GC and stack shrink.
734		ready <- true
735
736		val := 42
737		var cx *int
738		cx = &val
739
740		var c2 chan *int
741		var d2 chan *int
742		if dup {
743			c2 = c
744			d2 = d
745		}
746
747		// Receive from d. cx won't be affected.
748		select {
749		case cx = <-c:
750		case <-c2:
751		case <-d:
752		case <-d2:
753		}
754
755		// Check that pointer in cx was adjusted correctly.
756		if cx != &val {
757			t.Error("cx no longer points to val")
758		} else if val != 42 {
759			t.Error("val changed")
760		} else {
761			*cx = 43
762			if val != 43 {
763				t.Error("changing *cx failed to change val")
764			}
765		}
766		ready <- true
767	}
768
769	go f(ready1, false)
770	go f(ready2, true)
771
772	// Let the goroutines get into the select.
773	<-ready1
774	<-ready2
775	time.Sleep(10 * time.Millisecond)
776
777	// Force concurrent GC to shrink the stacks.
778	runtime.GC()
779
780	// Wake selects.
781	close(d)
782	<-ready1
783	<-ready2
784}
785
786type struct0 struct{}
787
788func BenchmarkMakeChan(b *testing.B) {
789	b.Run("Byte", func(b *testing.B) {
790		var x chan byte
791		for i := 0; i < b.N; i++ {
792			x = make(chan byte, 8)
793		}
794		close(x)
795	})
796	b.Run("Int", func(b *testing.B) {
797		var x chan int
798		for i := 0; i < b.N; i++ {
799			x = make(chan int, 8)
800		}
801		close(x)
802	})
803	b.Run("Ptr", func(b *testing.B) {
804		var x chan *byte
805		for i := 0; i < b.N; i++ {
806			x = make(chan *byte, 8)
807		}
808		close(x)
809	})
810	b.Run("Struct", func(b *testing.B) {
811		b.Run("0", func(b *testing.B) {
812			var x chan struct0
813			for i := 0; i < b.N; i++ {
814				x = make(chan struct0, 8)
815			}
816			close(x)
817		})
818		b.Run("32", func(b *testing.B) {
819			var x chan struct32
820			for i := 0; i < b.N; i++ {
821				x = make(chan struct32, 8)
822			}
823			close(x)
824		})
825		b.Run("40", func(b *testing.B) {
826			var x chan struct40
827			for i := 0; i < b.N; i++ {
828				x = make(chan struct40, 8)
829			}
830			close(x)
831		})
832	})
833}
834
835func BenchmarkChanNonblocking(b *testing.B) {
836	myc := make(chan int)
837	b.RunParallel(func(pb *testing.PB) {
838		for pb.Next() {
839			select {
840			case <-myc:
841			default:
842			}
843		}
844	})
845}
846
847func BenchmarkSelectUncontended(b *testing.B) {
848	b.RunParallel(func(pb *testing.PB) {
849		myc1 := make(chan int, 1)
850		myc2 := make(chan int, 1)
851		myc1 <- 0
852		for pb.Next() {
853			select {
854			case <-myc1:
855				myc2 <- 0
856			case <-myc2:
857				myc1 <- 0
858			}
859		}
860	})
861}
862
863func BenchmarkSelectSyncContended(b *testing.B) {
864	myc1 := make(chan int)
865	myc2 := make(chan int)
866	myc3 := make(chan int)
867	done := make(chan int)
868	b.RunParallel(func(pb *testing.PB) {
869		go func() {
870			for {
871				select {
872				case myc1 <- 0:
873				case myc2 <- 0:
874				case myc3 <- 0:
875				case <-done:
876					return
877				}
878			}
879		}()
880		for pb.Next() {
881			select {
882			case <-myc1:
883			case <-myc2:
884			case <-myc3:
885			}
886		}
887	})
888	close(done)
889}
890
891func BenchmarkSelectAsyncContended(b *testing.B) {
892	procs := runtime.GOMAXPROCS(0)
893	myc1 := make(chan int, procs)
894	myc2 := make(chan int, procs)
895	b.RunParallel(func(pb *testing.PB) {
896		myc1 <- 0
897		for pb.Next() {
898			select {
899			case <-myc1:
900				myc2 <- 0
901			case <-myc2:
902				myc1 <- 0
903			}
904		}
905	})
906}
907
908func BenchmarkSelectNonblock(b *testing.B) {
909	myc1 := make(chan int)
910	myc2 := make(chan int)
911	myc3 := make(chan int, 1)
912	myc4 := make(chan int, 1)
913	b.RunParallel(func(pb *testing.PB) {
914		for pb.Next() {
915			select {
916			case <-myc1:
917			default:
918			}
919			select {
920			case myc2 <- 0:
921			default:
922			}
923			select {
924			case <-myc3:
925			default:
926			}
927			select {
928			case myc4 <- 0:
929			default:
930			}
931		}
932	})
933}
934
935func BenchmarkChanUncontended(b *testing.B) {
936	const C = 100
937	b.RunParallel(func(pb *testing.PB) {
938		myc := make(chan int, C)
939		for pb.Next() {
940			for i := 0; i < C; i++ {
941				myc <- 0
942			}
943			for i := 0; i < C; i++ {
944				<-myc
945			}
946		}
947	})
948}
949
950func BenchmarkChanContended(b *testing.B) {
951	const C = 100
952	myc := make(chan int, C*runtime.GOMAXPROCS(0))
953	b.RunParallel(func(pb *testing.PB) {
954		for pb.Next() {
955			for i := 0; i < C; i++ {
956				myc <- 0
957			}
958			for i := 0; i < C; i++ {
959				<-myc
960			}
961		}
962	})
963}
964
965func benchmarkChanSync(b *testing.B, work int) {
966	const CallsPerSched = 1000
967	procs := 2
968	N := int32(b.N / CallsPerSched / procs * procs)
969	c := make(chan bool, procs)
970	myc := make(chan int)
971	for p := 0; p < procs; p++ {
972		go func() {
973			for {
974				i := atomic.AddInt32(&N, -1)
975				if i < 0 {
976					break
977				}
978				for g := 0; g < CallsPerSched; g++ {
979					if i%2 == 0 {
980						<-myc
981						localWork(work)
982						myc <- 0
983						localWork(work)
984					} else {
985						myc <- 0
986						localWork(work)
987						<-myc
988						localWork(work)
989					}
990				}
991			}
992			c <- true
993		}()
994	}
995	for p := 0; p < procs; p++ {
996		<-c
997	}
998}
999
1000func BenchmarkChanSync(b *testing.B) {
1001	benchmarkChanSync(b, 0)
1002}
1003
1004func BenchmarkChanSyncWork(b *testing.B) {
1005	benchmarkChanSync(b, 1000)
1006}
1007
1008func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) {
1009	const CallsPerSched = 1000
1010	procs := runtime.GOMAXPROCS(-1)
1011	N := int32(b.N / CallsPerSched)
1012	c := make(chan bool, 2*procs)
1013	myc := make(chan int, chanSize)
1014	for p := 0; p < procs; p++ {
1015		go func() {
1016			foo := 0
1017			for atomic.AddInt32(&N, -1) >= 0 {
1018				for g := 0; g < CallsPerSched; g++ {
1019					for i := 0; i < localWork; i++ {
1020						foo *= 2
1021						foo /= 2
1022					}
1023					myc <- 1
1024				}
1025			}
1026			myc <- 0
1027			c <- foo == 42
1028		}()
1029		go func() {
1030			foo := 0
1031			for {
1032				v := <-myc
1033				if v == 0 {
1034					break
1035				}
1036				for i := 0; i < localWork; i++ {
1037					foo *= 2
1038					foo /= 2
1039				}
1040			}
1041			c <- foo == 42
1042		}()
1043	}
1044	for p := 0; p < procs; p++ {
1045		<-c
1046		<-c
1047	}
1048}
1049
1050func BenchmarkChanProdCons0(b *testing.B) {
1051	benchmarkChanProdCons(b, 0, 0)
1052}
1053
1054func BenchmarkChanProdCons10(b *testing.B) {
1055	benchmarkChanProdCons(b, 10, 0)
1056}
1057
1058func BenchmarkChanProdCons100(b *testing.B) {
1059	benchmarkChanProdCons(b, 100, 0)
1060}
1061
1062func BenchmarkChanProdConsWork0(b *testing.B) {
1063	benchmarkChanProdCons(b, 0, 100)
1064}
1065
1066func BenchmarkChanProdConsWork10(b *testing.B) {
1067	benchmarkChanProdCons(b, 10, 100)
1068}
1069
1070func BenchmarkChanProdConsWork100(b *testing.B) {
1071	benchmarkChanProdCons(b, 100, 100)
1072}
1073
1074func BenchmarkSelectProdCons(b *testing.B) {
1075	const CallsPerSched = 1000
1076	procs := runtime.GOMAXPROCS(-1)
1077	N := int32(b.N / CallsPerSched)
1078	c := make(chan bool, 2*procs)
1079	myc := make(chan int, 128)
1080	myclose := make(chan bool)
1081	for p := 0; p < procs; p++ {
1082		go func() {
1083			// Producer: sends to myc.
1084			foo := 0
1085			// Intended to not fire during benchmarking.
1086			mytimer := time.After(time.Hour)
1087			for atomic.AddInt32(&N, -1) >= 0 {
1088				for g := 0; g < CallsPerSched; g++ {
1089					// Model some local work.
1090					for i := 0; i < 100; i++ {
1091						foo *= 2
1092						foo /= 2
1093					}
1094					select {
1095					case myc <- 1:
1096					case <-mytimer:
1097					case <-myclose:
1098					}
1099				}
1100			}
1101			myc <- 0
1102			c <- foo == 42
1103		}()
1104		go func() {
1105			// Consumer: receives from myc.
1106			foo := 0
1107			// Intended to not fire during benchmarking.
1108			mytimer := time.After(time.Hour)
1109		loop:
1110			for {
1111				select {
1112				case v := <-myc:
1113					if v == 0 {
1114						break loop
1115					}
1116				case <-mytimer:
1117				case <-myclose:
1118				}
1119				// Model some local work.
1120				for i := 0; i < 100; i++ {
1121					foo *= 2
1122					foo /= 2
1123				}
1124			}
1125			c <- foo == 42
1126		}()
1127	}
1128	for p := 0; p < procs; p++ {
1129		<-c
1130		<-c
1131	}
1132}
1133
1134func BenchmarkReceiveDataFromClosedChan(b *testing.B) {
1135	count := b.N
1136	ch := make(chan struct{}, count)
1137	for i := 0; i < count; i++ {
1138		ch <- struct{}{}
1139	}
1140	close(ch)
1141
1142	b.ResetTimer()
1143	for range ch {
1144	}
1145}
1146
1147func BenchmarkChanCreation(b *testing.B) {
1148	b.RunParallel(func(pb *testing.PB) {
1149		for pb.Next() {
1150			myc := make(chan int, 1)
1151			myc <- 0
1152			<-myc
1153		}
1154	})
1155}
1156
1157func BenchmarkChanSem(b *testing.B) {
1158	type Empty struct{}
1159	myc := make(chan Empty, runtime.GOMAXPROCS(0))
1160	b.RunParallel(func(pb *testing.PB) {
1161		for pb.Next() {
1162			myc <- Empty{}
1163			<-myc
1164		}
1165	})
1166}
1167
1168func BenchmarkChanPopular(b *testing.B) {
1169	const n = 1000
1170	c := make(chan bool)
1171	var a []chan bool
1172	var wg sync.WaitGroup
1173	wg.Add(n)
1174	for j := 0; j < n; j++ {
1175		d := make(chan bool)
1176		a = append(a, d)
1177		go func() {
1178			for i := 0; i < b.N; i++ {
1179				select {
1180				case <-c:
1181				case <-d:
1182				}
1183			}
1184			wg.Done()
1185		}()
1186	}
1187	for i := 0; i < b.N; i++ {
1188		for _, d := range a {
1189			d <- true
1190		}
1191	}
1192	wg.Wait()
1193}
1194
1195func BenchmarkChanClosed(b *testing.B) {
1196	c := make(chan struct{})
1197	close(c)
1198	b.RunParallel(func(pb *testing.PB) {
1199		for pb.Next() {
1200			select {
1201			case <-c:
1202			default:
1203				b.Error("Unreachable")
1204			}
1205		}
1206	})
1207}
1208
1209var (
1210	alwaysFalse = false
1211	workSink    = 0
1212)
1213
1214func localWork(w int) {
1215	foo := 0
1216	for i := 0; i < w; i++ {
1217		foo /= (foo + 1)
1218	}
1219	if alwaysFalse {
1220		workSink += foo
1221	}
1222}
1223