Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package runtime_test
      6 
      7 import (
      8 	"runtime"
      9 	"sync"
     10 	"sync/atomic"
     11 	"testing"
     12 	"time"
     13 )
     14 
     15 func TestChan(t *testing.T) {
     16 	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
     17 	N := 200
     18 	if testing.Short() {
     19 		N = 20
     20 	}
     21 	for chanCap := 0; chanCap < N; chanCap++ {
     22 		{
     23 			// Ensure that receive from empty chan blocks.
     24 			c := make(chan int, chanCap)
     25 			recv1 := false
     26 			go func() {
     27 				_ = <-c
     28 				recv1 = true
     29 			}()
     30 			recv2 := false
     31 			go func() {
     32 				_, _ = <-c
     33 				recv2 = true
     34 			}()
     35 			time.Sleep(time.Millisecond)
     36 			if recv1 || recv2 {
     37 				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
     38 			}
     39 			// Ensure that non-blocking receive does not block.
     40 			select {
     41 			case _ = <-c:
     42 				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
     43 			default:
     44 			}
     45 			select {
     46 			case _, _ = <-c:
     47 				t.Fatalf("chan[%d]: receive from empty chan", chanCap)
     48 			default:
     49 			}
     50 			c <- 0
     51 			c <- 0
     52 		}
     53 
     54 		{
     55 			// Ensure that send to full chan blocks.
     56 			c := make(chan int, chanCap)
     57 			for i := 0; i < chanCap; i++ {
     58 				c <- i
     59 			}
     60 			sent := uint32(0)
     61 			go func() {
     62 				c <- 0
     63 				atomic.StoreUint32(&sent, 1)
     64 			}()
     65 			time.Sleep(time.Millisecond)
     66 			if atomic.LoadUint32(&sent) != 0 {
     67 				t.Fatalf("chan[%d]: send to full chan", chanCap)
     68 			}
     69 			// Ensure that non-blocking send does not block.
     70 			select {
     71 			case c <- 0:
     72 				t.Fatalf("chan[%d]: send to full chan", chanCap)
     73 			default:
     74 			}
     75 			<-c
     76 		}
     77 
     78 		{
     79 			// Ensure that we receive 0 from closed chan.
     80 			c := make(chan int, chanCap)
     81 			for i := 0; i < chanCap; i++ {
     82 				c <- i
     83 			}
     84 			close(c)
     85 			for i := 0; i < chanCap; i++ {
     86 				v := <-c
     87 				if v != i {
     88 					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
     89 				}
     90 			}
     91 			if v := <-c; v != 0 {
     92 				t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0)
     93 			}
     94 			if v, ok := <-c; v != 0 || ok {
     95 				t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false)
     96 			}
     97 		}
     98 
     99 		{
    100 			// Ensure that close unblocks receive.
    101 			c := make(chan int, chanCap)
    102 			done := make(chan bool)
    103 			go func() {
    104 				v, ok := <-c
    105 				done <- v == 0 && ok == false
    106 			}()
    107 			time.Sleep(time.Millisecond)
    108 			close(c)
    109 			if !<-done {
    110 				t.Fatalf("chan[%d]: received non zero from closed chan", chanCap)
    111 			}
    112 		}
    113 
    114 		{
    115 			// Send 100 integers,
    116 			// ensure that we receive them non-corrupted in FIFO order.
    117 			c := make(chan int, chanCap)
    118 			go func() {
    119 				for i := 0; i < 100; i++ {
    120 					c <- i
    121 				}
    122 			}()
    123 			for i := 0; i < 100; i++ {
    124 				v := <-c
    125 				if v != i {
    126 					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
    127 				}
    128 			}
    129 
    130 			// Same, but using recv2.
    131 			go func() {
    132 				for i := 0; i < 100; i++ {
    133 					c <- i
    134 				}
    135 			}()
    136 			for i := 0; i < 100; i++ {
    137 				v, ok := <-c
    138 				if !ok {
    139 					t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i)
    140 				}
    141 				if v != i {
    142 					t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
    143 				}
    144 			}
    145 
    146 			// Send 1000 integers in 4 goroutines,
    147 			// ensure that we receive what we send.
    148 			const P = 4
    149 			const L = 1000
    150 			for p := 0; p < P; p++ {
    151 				go func() {
    152 					for i := 0; i < L; i++ {
    153 						c <- i
    154 					}
    155 				}()
    156 			}
    157 			done := make(chan map[int]int)
    158 			for p := 0; p < P; p++ {
    159 				go func() {
    160 					recv := make(map[int]int)
    161 					for i := 0; i < L; i++ {
    162 						v := <-c
    163 						recv[v] = recv[v] + 1
    164 					}
    165 					done <- recv
    166 				}()
    167 			}
    168 			recv := make(map[int]int)
    169 			for p := 0; p < P; p++ {
    170 				for k, v := range <-done {
    171 					recv[k] = recv[k] + v
    172 				}
    173 			}
    174 			if len(recv) != L {
    175 				t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L)
    176 			}
    177 			for _, v := range recv {
    178 				if v != P {
    179 					t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P)
    180 				}
    181 			}
    182 		}
    183 
    184 		{
    185 			// Test len/cap.
    186 			c := make(chan int, chanCap)
    187 			if len(c) != 0 || cap(c) != chanCap {
    188 				t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c))
    189 			}
    190 			for i := 0; i < chanCap; i++ {
    191 				c <- i
    192 			}
    193 			if len(c) != chanCap || cap(c) != chanCap {
    194 				t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c))
    195 			}
    196 		}
    197 
    198 	}
    199 }
    200 
    201 func TestNonblockRecvRace(t *testing.T) {
    202 	n := 10000
    203 	if testing.Short() {
    204 		n = 100
    205 	}
    206 	for i := 0; i < n; i++ {
    207 		c := make(chan int, 1)
    208 		c <- 1
    209 		go func() {
    210 			select {
    211 			case <-c:
    212 			default:
    213 				t.Error("chan is not ready")
    214 			}
    215 		}()
    216 		close(c)
    217 		<-c
    218 		if t.Failed() {
    219 			return
    220 		}
    221 	}
    222 }
    223 
    224 // This test checks that select acts on the state of the channels at one
    225 // moment in the execution, not over a smeared time window.
    226 // In the test, one goroutine does:
    227 //	create c1, c2
    228 //	make c1 ready for receiving
    229 //	create second goroutine
    230 //	make c2 ready for receiving
    231 //	make c1 no longer ready for receiving (if possible)
    232 // The second goroutine does a non-blocking select receiving from c1 and c2.
    233 // From the time the second goroutine is created, at least one of c1 and c2
    234 // is always ready for receiving, so the select in the second goroutine must
    235 // always receive from one or the other. It must never execute the default case.
    236 func TestNonblockSelectRace(t *testing.T) {
    237 	n := 100000
    238 	if testing.Short() {
    239 		n = 1000
    240 	}
    241 	done := make(chan bool, 1)
    242 	for i := 0; i < n; i++ {
    243 		c1 := make(chan int, 1)
    244 		c2 := make(chan int, 1)
    245 		c1 <- 1
    246 		go func() {
    247 			select {
    248 			case <-c1:
    249 			case <-c2:
    250 			default:
    251 				done <- false
    252 				return
    253 			}
    254 			done <- true
    255 		}()
    256 		c2 <- 1
    257 		select {
    258 		case <-c1:
    259 		default:
    260 		}
    261 		if !<-done {
    262 			t.Fatal("no chan is ready")
    263 		}
    264 	}
    265 }
    266 
    267 // Same as TestNonblockSelectRace, but close(c2) replaces c2 <- 1.
    268 func TestNonblockSelectRace2(t *testing.T) {
    269 	n := 100000
    270 	if testing.Short() {
    271 		n = 1000
    272 	}
    273 	done := make(chan bool, 1)
    274 	for i := 0; i < n; i++ {
    275 		c1 := make(chan int, 1)
    276 		c2 := make(chan int)
    277 		c1 <- 1
    278 		go func() {
    279 			select {
    280 			case <-c1:
    281 			case <-c2:
    282 			default:
    283 				done <- false
    284 				return
    285 			}
    286 			done <- true
    287 		}()
    288 		close(c2)
    289 		select {
    290 		case <-c1:
    291 		default:
    292 		}
    293 		if !<-done {
    294 			t.Fatal("no chan is ready")
    295 		}
    296 	}
    297 }
    298 
    299 func TestSelfSelect(t *testing.T) {
    300 	// Ensure that send/recv on the same chan in select
    301 	// does not crash nor deadlock.
    302 	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
    303 	for _, chanCap := range []int{0, 10} {
    304 		var wg sync.WaitGroup
    305 		wg.Add(2)
    306 		c := make(chan int, chanCap)
    307 		for p := 0; p < 2; p++ {
    308 			p := p
    309 			go func() {
    310 				defer wg.Done()
    311 				for i := 0; i < 1000; i++ {
    312 					if p == 0 || i%2 == 0 {
    313 						select {
    314 						case c <- p:
    315 						case v := <-c:
    316 							if chanCap == 0 && v == p {
    317 								t.Errorf("self receive")
    318 								return
    319 							}
    320 						}
    321 					} else {
    322 						select {
    323 						case v := <-c:
    324 							if chanCap == 0 && v == p {
    325 								t.Errorf("self receive")
    326 								return
    327 							}
    328 						case c <- p:
    329 						}
    330 					}
    331 				}
    332 			}()
    333 		}
    334 		wg.Wait()
    335 	}
    336 }
    337 
    338 func TestSelectStress(t *testing.T) {
    339 	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
    340 	var c [4]chan int
    341 	c[0] = make(chan int)
    342 	c[1] = make(chan int)
    343 	c[2] = make(chan int, 2)
    344 	c[3] = make(chan int, 3)
    345 	N := int(1e5)
    346 	if testing.Short() {
    347 		N /= 10
    348 	}
    349 	// There are 4 goroutines that send N values on each of the chans,
    350 	// + 4 goroutines that receive N values on each of the chans,
    351 	// + 1 goroutine that sends N values on each of the chans in a single select,
    352 	// + 1 goroutine that receives N values on each of the chans in a single select.
    353 	// All these sends, receives and selects interact chaotically at runtime,
    354 	// but we are careful that this whole construct does not deadlock.
    355 	var wg sync.WaitGroup
    356 	wg.Add(10)
    357 	for k := 0; k < 4; k++ {
    358 		k := k
    359 		go func() {
    360 			for i := 0; i < N; i++ {
    361 				c[k] <- 0
    362 			}
    363 			wg.Done()
    364 		}()
    365 		go func() {
    366 			for i := 0; i < N; i++ {
    367 				<-c[k]
    368 			}
    369 			wg.Done()
    370 		}()
    371 	}
    372 	go func() {
    373 		var n [4]int
    374 		c1 := c
    375 		for i := 0; i < 4*N; i++ {
    376 			select {
    377 			case c1[3] <- 0:
    378 				n[3]++
    379 				if n[3] == N {
    380 					c1[3] = nil
    381 				}
    382 			case c1[2] <- 0:
    383 				n[2]++
    384 				if n[2] == N {
    385 					c1[2] = nil
    386 				}
    387 			case c1[0] <- 0:
    388 				n[0]++
    389 				if n[0] == N {
    390 					c1[0] = nil
    391 				}
    392 			case c1[1] <- 0:
    393 				n[1]++
    394 				if n[1] == N {
    395 					c1[1] = nil
    396 				}
    397 			}
    398 		}
    399 		wg.Done()
    400 	}()
    401 	go func() {
    402 		var n [4]int
    403 		c1 := c
    404 		for i := 0; i < 4*N; i++ {
    405 			select {
    406 			case <-c1[0]:
    407 				n[0]++
    408 				if n[0] == N {
    409 					c1[0] = nil
    410 				}
    411 			case <-c1[1]:
    412 				n[1]++
    413 				if n[1] == N {
    414 					c1[1] = nil
    415 				}
    416 			case <-c1[2]:
    417 				n[2]++
    418 				if n[2] == N {
    419 					c1[2] = nil
    420 				}
    421 			case <-c1[3]:
    422 				n[3]++
    423 				if n[3] == N {
    424 					c1[3] = nil
    425 				}
    426 			}
    427 		}
    428 		wg.Done()
    429 	}()
    430 	wg.Wait()
    431 }
    432 
    433 func TestChanSendInterface(t *testing.T) {
    434 	type mt struct{}
    435 	m := &mt{}
    436 	c := make(chan interface{}, 1)
    437 	c <- m
    438 	select {
    439 	case c <- m:
    440 	default:
    441 	}
    442 	select {
    443 	case c <- m:
    444 	case c <- &mt{}:
    445 	default:
    446 	}
    447 }
    448 
    449 func TestPseudoRandomSend(t *testing.T) {
    450 	n := 100
    451 	for _, chanCap := range []int{0, n} {
    452 		c := make(chan int, chanCap)
    453 		l := make([]int, n)
    454 		var m sync.Mutex
    455 		m.Lock()
    456 		go func() {
    457 			for i := 0; i < n; i++ {
    458 				runtime.Gosched()
    459 				l[i] = <-c
    460 			}
    461 			m.Unlock()
    462 		}()
    463 		for i := 0; i < n; i++ {
    464 			select {
    465 			case c <- 1:
    466 			case c <- 0:
    467 			}
    468 		}
    469 		m.Lock() // wait
    470 		n0 := 0
    471 		n1 := 0
    472 		for _, i := range l {
    473 			n0 += (i + 1) % 2
    474 			n1 += i
    475 		}
    476 		if n0 <= n/10 || n1 <= n/10 {
    477 			t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap)
    478 		}
    479 	}
    480 }
    481 
    482 func TestMultiConsumer(t *testing.T) {
    483 	const nwork = 23
    484 	const niter = 271828
    485 
    486 	pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31}
    487 
    488 	q := make(chan int, nwork*3)
    489 	r := make(chan int, nwork*3)
    490 
    491 	// workers
    492 	var wg sync.WaitGroup
    493 	for i := 0; i < nwork; i++ {
    494 		wg.Add(1)
    495 		go func(w int) {
    496 			for v := range q {
    497 				// mess with the fifo-ish nature of range
    498 				if pn[w%len(pn)] == v {
    499 					runtime.Gosched()
    500 				}
    501 				r <- v
    502 			}
    503 			wg.Done()
    504 		}(i)
    505 	}
    506 
    507 	// feeder & closer
    508 	expect := 0
    509 	go func() {
    510 		for i := 0; i < niter; i++ {
    511 			v := pn[i%len(pn)]
    512 			expect += v
    513 			q <- v
    514 		}
    515 		close(q)  // no more work
    516 		wg.Wait() // workers done
    517 		close(r)  // ... so there can be no more results
    518 	}()
    519 
    520 	// consume & check
    521 	n := 0
    522 	s := 0
    523 	for v := range r {
    524 		n++
    525 		s += v
    526 	}
    527 	if n != niter || s != expect {
    528 		t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)",
    529 			expect, s, niter, n)
    530 	}
    531 }
    532 
    533 func TestShrinkStackDuringBlockedSend(t *testing.T) {
    534 	// make sure that channel operations still work when we are
    535 	// blocked on a channel send and we shrink the stack.
    536 	// NOTE: this test probably won't fail unless stack1.go:stackDebug
    537 	// is set to >= 1.
    538 	const n = 10
    539 	c := make(chan int)
    540 	done := make(chan struct{})
    541 
    542 	go func() {
    543 		for i := 0; i < n; i++ {
    544 			c <- i
    545 			// use lots of stack, briefly.
    546 			stackGrowthRecursive(20)
    547 		}
    548 		done <- struct{}{}
    549 	}()
    550 
    551 	for i := 0; i < n; i++ {
    552 		x := <-c
    553 		if x != i {
    554 			t.Errorf("bad channel read: want %d, got %d", i, x)
    555 		}
    556 		// Waste some time so sender can finish using lots of stack
    557 		// and block in channel send.
    558 		time.Sleep(1 * time.Millisecond)
    559 		// trigger GC which will shrink the stack of the sender.
    560 		runtime.GC()
    561 	}
    562 	<-done
    563 }
    564 
    565 func TestSelectDuplicateChannel(t *testing.T) {
    566 	// This test makes sure we can queue a G on
    567 	// the same channel multiple times.
    568 	c := make(chan int)
    569 	d := make(chan int)
    570 	e := make(chan int)
    571 
    572 	// goroutine A
    573 	go func() {
    574 		select {
    575 		case <-c:
    576 		case <-c:
    577 		case <-d:
    578 		}
    579 		e <- 9
    580 	}()
    581 	time.Sleep(time.Millisecond) // make sure goroutine A gets queued first on c
    582 
    583 	// goroutine B
    584 	go func() {
    585 		<-c
    586 	}()
    587 	time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing
    588 
    589 	d <- 7 // wake up A, it dequeues itself from c.  This operation used to corrupt c.recvq.
    590 	<-e    // A tells us it's done
    591 	c <- 8 // wake up B.  This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B)
    592 }
    593 
    594 var selectSink interface{}
    595 
    596 func TestSelectStackAdjust(t *testing.T) {
    597 	// Test that channel receive slots that contain local stack
    598 	// pointers are adjusted correctly by stack shrinking.
    599 	c := make(chan *int)
    600 	d := make(chan *int)
    601 	ready1 := make(chan bool)
    602 	ready2 := make(chan bool)
    603 
    604 	f := func(ready chan bool, dup bool) {
    605 		// Temporarily grow the stack to 10K.
    606 		stackGrowthRecursive((10 << 10) / (128 * 8))
    607 
    608 		// We're ready to trigger GC and stack shrink.
    609 		ready <- true
    610 
    611 		val := 42
    612 		var cx *int
    613 		cx = &val
    614 
    615 		var c2 chan *int
    616 		var d2 chan *int
    617 		if dup {
    618 			c2 = c
    619 			d2 = d
    620 		}
    621 
    622 		// Receive from d. cx won't be affected.
    623 		select {
    624 		case cx = <-c:
    625 		case <-c2:
    626 		case <-d:
    627 		case <-d2:
    628 		}
    629 
    630 		// Check that pointer in cx was adjusted correctly.
    631 		if cx != &val {
    632 			t.Error("cx no longer points to val")
    633 		} else if val != 42 {
    634 			t.Error("val changed")
    635 		} else {
    636 			*cx = 43
    637 			if val != 43 {
    638 				t.Error("changing *cx failed to change val")
    639 			}
    640 		}
    641 		ready <- true
    642 	}
    643 
    644 	go f(ready1, false)
    645 	go f(ready2, true)
    646 
    647 	// Let the goroutines get into the select.
    648 	<-ready1
    649 	<-ready2
    650 	time.Sleep(10 * time.Millisecond)
    651 
    652 	// Force concurrent GC a few times.
    653 	var before, after runtime.MemStats
    654 	runtime.ReadMemStats(&before)
    655 	for i := 0; i < 100; i++ {
    656 		selectSink = new([1 << 20]byte)
    657 		runtime.ReadMemStats(&after)
    658 		if after.NumGC-before.NumGC >= 2 {
    659 			goto done
    660 		}
    661 	}
    662 	t.Fatal("failed to trigger concurrent GC")
    663 done:
    664 	selectSink = nil
    665 
    666 	// Wake selects.
    667 	close(d)
    668 	<-ready1
    669 	<-ready2
    670 }
    671 
    672 func BenchmarkChanNonblocking(b *testing.B) {
    673 	myc := make(chan int)
    674 	b.RunParallel(func(pb *testing.PB) {
    675 		for pb.Next() {
    676 			select {
    677 			case <-myc:
    678 			default:
    679 			}
    680 		}
    681 	})
    682 }
    683 
    684 func BenchmarkSelectUncontended(b *testing.B) {
    685 	b.RunParallel(func(pb *testing.PB) {
    686 		myc1 := make(chan int, 1)
    687 		myc2 := make(chan int, 1)
    688 		myc1 <- 0
    689 		for pb.Next() {
    690 			select {
    691 			case <-myc1:
    692 				myc2 <- 0
    693 			case <-myc2:
    694 				myc1 <- 0
    695 			}
    696 		}
    697 	})
    698 }
    699 
    700 func BenchmarkSelectSyncContended(b *testing.B) {
    701 	myc1 := make(chan int)
    702 	myc2 := make(chan int)
    703 	myc3 := make(chan int)
    704 	done := make(chan int)
    705 	b.RunParallel(func(pb *testing.PB) {
    706 		go func() {
    707 			for {
    708 				select {
    709 				case myc1 <- 0:
    710 				case myc2 <- 0:
    711 				case myc3 <- 0:
    712 				case <-done:
    713 					return
    714 				}
    715 			}
    716 		}()
    717 		for pb.Next() {
    718 			select {
    719 			case <-myc1:
    720 			case <-myc2:
    721 			case <-myc3:
    722 			}
    723 		}
    724 	})
    725 	close(done)
    726 }
    727 
    728 func BenchmarkSelectAsyncContended(b *testing.B) {
    729 	procs := runtime.GOMAXPROCS(0)
    730 	myc1 := make(chan int, procs)
    731 	myc2 := make(chan int, procs)
    732 	b.RunParallel(func(pb *testing.PB) {
    733 		myc1 <- 0
    734 		for pb.Next() {
    735 			select {
    736 			case <-myc1:
    737 				myc2 <- 0
    738 			case <-myc2:
    739 				myc1 <- 0
    740 			}
    741 		}
    742 	})
    743 }
    744 
    745 func BenchmarkSelectNonblock(b *testing.B) {
    746 	myc1 := make(chan int)
    747 	myc2 := make(chan int)
    748 	myc3 := make(chan int, 1)
    749 	myc4 := make(chan int, 1)
    750 	b.RunParallel(func(pb *testing.PB) {
    751 		for pb.Next() {
    752 			select {
    753 			case <-myc1:
    754 			default:
    755 			}
    756 			select {
    757 			case myc2 <- 0:
    758 			default:
    759 			}
    760 			select {
    761 			case <-myc3:
    762 			default:
    763 			}
    764 			select {
    765 			case myc4 <- 0:
    766 			default:
    767 			}
    768 		}
    769 	})
    770 }
    771 
    772 func BenchmarkChanUncontended(b *testing.B) {
    773 	const C = 100
    774 	b.RunParallel(func(pb *testing.PB) {
    775 		myc := make(chan int, C)
    776 		for pb.Next() {
    777 			for i := 0; i < C; i++ {
    778 				myc <- 0
    779 			}
    780 			for i := 0; i < C; i++ {
    781 				<-myc
    782 			}
    783 		}
    784 	})
    785 }
    786 
    787 func BenchmarkChanContended(b *testing.B) {
    788 	const C = 100
    789 	myc := make(chan int, C*runtime.GOMAXPROCS(0))
    790 	b.RunParallel(func(pb *testing.PB) {
    791 		for pb.Next() {
    792 			for i := 0; i < C; i++ {
    793 				myc <- 0
    794 			}
    795 			for i := 0; i < C; i++ {
    796 				<-myc
    797 			}
    798 		}
    799 	})
    800 }
    801 
    802 func benchmarkChanSync(b *testing.B, work int) {
    803 	const CallsPerSched = 1000
    804 	procs := 2
    805 	N := int32(b.N / CallsPerSched / procs * procs)
    806 	c := make(chan bool, procs)
    807 	myc := make(chan int)
    808 	for p := 0; p < procs; p++ {
    809 		go func() {
    810 			for {
    811 				i := atomic.AddInt32(&N, -1)
    812 				if i < 0 {
    813 					break
    814 				}
    815 				for g := 0; g < CallsPerSched; g++ {
    816 					if i%2 == 0 {
    817 						<-myc
    818 						localWork(work)
    819 						myc <- 0
    820 						localWork(work)
    821 					} else {
    822 						myc <- 0
    823 						localWork(work)
    824 						<-myc
    825 						localWork(work)
    826 					}
    827 				}
    828 			}
    829 			c <- true
    830 		}()
    831 	}
    832 	for p := 0; p < procs; p++ {
    833 		<-c
    834 	}
    835 }
    836 
    837 func BenchmarkChanSync(b *testing.B) {
    838 	benchmarkChanSync(b, 0)
    839 }
    840 
    841 func BenchmarkChanSyncWork(b *testing.B) {
    842 	benchmarkChanSync(b, 1000)
    843 }
    844 
    845 func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) {
    846 	const CallsPerSched = 1000
    847 	procs := runtime.GOMAXPROCS(-1)
    848 	N := int32(b.N / CallsPerSched)
    849 	c := make(chan bool, 2*procs)
    850 	myc := make(chan int, chanSize)
    851 	for p := 0; p < procs; p++ {
    852 		go func() {
    853 			foo := 0
    854 			for atomic.AddInt32(&N, -1) >= 0 {
    855 				for g := 0; g < CallsPerSched; g++ {
    856 					for i := 0; i < localWork; i++ {
    857 						foo *= 2
    858 						foo /= 2
    859 					}
    860 					myc <- 1
    861 				}
    862 			}
    863 			myc <- 0
    864 			c <- foo == 42
    865 		}()
    866 		go func() {
    867 			foo := 0
    868 			for {
    869 				v := <-myc
    870 				if v == 0 {
    871 					break
    872 				}
    873 				for i := 0; i < localWork; i++ {
    874 					foo *= 2
    875 					foo /= 2
    876 				}
    877 			}
    878 			c <- foo == 42
    879 		}()
    880 	}
    881 	for p := 0; p < procs; p++ {
    882 		<-c
    883 		<-c
    884 	}
    885 }
    886 
    887 func BenchmarkChanProdCons0(b *testing.B) {
    888 	benchmarkChanProdCons(b, 0, 0)
    889 }
    890 
    891 func BenchmarkChanProdCons10(b *testing.B) {
    892 	benchmarkChanProdCons(b, 10, 0)
    893 }
    894 
    895 func BenchmarkChanProdCons100(b *testing.B) {
    896 	benchmarkChanProdCons(b, 100, 0)
    897 }
    898 
    899 func BenchmarkChanProdConsWork0(b *testing.B) {
    900 	benchmarkChanProdCons(b, 0, 100)
    901 }
    902 
    903 func BenchmarkChanProdConsWork10(b *testing.B) {
    904 	benchmarkChanProdCons(b, 10, 100)
    905 }
    906 
    907 func BenchmarkChanProdConsWork100(b *testing.B) {
    908 	benchmarkChanProdCons(b, 100, 100)
    909 }
    910 
    911 func BenchmarkSelectProdCons(b *testing.B) {
    912 	const CallsPerSched = 1000
    913 	procs := runtime.GOMAXPROCS(-1)
    914 	N := int32(b.N / CallsPerSched)
    915 	c := make(chan bool, 2*procs)
    916 	myc := make(chan int, 128)
    917 	myclose := make(chan bool)
    918 	for p := 0; p < procs; p++ {
    919 		go func() {
    920 			// Producer: sends to myc.
    921 			foo := 0
    922 			// Intended to not fire during benchmarking.
    923 			mytimer := time.After(time.Hour)
    924 			for atomic.AddInt32(&N, -1) >= 0 {
    925 				for g := 0; g < CallsPerSched; g++ {
    926 					// Model some local work.
    927 					for i := 0; i < 100; i++ {
    928 						foo *= 2
    929 						foo /= 2
    930 					}
    931 					select {
    932 					case myc <- 1:
    933 					case <-mytimer:
    934 					case <-myclose:
    935 					}
    936 				}
    937 			}
    938 			myc <- 0
    939 			c <- foo == 42
    940 		}()
    941 		go func() {
    942 			// Consumer: receives from myc.
    943 			foo := 0
    944 			// Intended to not fire during benchmarking.
    945 			mytimer := time.After(time.Hour)
    946 		loop:
    947 			for {
    948 				select {
    949 				case v := <-myc:
    950 					if v == 0 {
    951 						break loop
    952 					}
    953 				case <-mytimer:
    954 				case <-myclose:
    955 				}
    956 				// Model some local work.
    957 				for i := 0; i < 100; i++ {
    958 					foo *= 2
    959 					foo /= 2
    960 				}
    961 			}
    962 			c <- foo == 42
    963 		}()
    964 	}
    965 	for p := 0; p < procs; p++ {
    966 		<-c
    967 		<-c
    968 	}
    969 }
    970 
    971 func BenchmarkChanCreation(b *testing.B) {
    972 	b.RunParallel(func(pb *testing.PB) {
    973 		for pb.Next() {
    974 			myc := make(chan int, 1)
    975 			myc <- 0
    976 			<-myc
    977 		}
    978 	})
    979 }
    980 
    981 func BenchmarkChanSem(b *testing.B) {
    982 	type Empty struct{}
    983 	myc := make(chan Empty, runtime.GOMAXPROCS(0))
    984 	b.RunParallel(func(pb *testing.PB) {
    985 		for pb.Next() {
    986 			myc <- Empty{}
    987 			<-myc
    988 		}
    989 	})
    990 }
    991 
    992 func BenchmarkChanPopular(b *testing.B) {
    993 	const n = 1000
    994 	c := make(chan bool)
    995 	var a []chan bool
    996 	var wg sync.WaitGroup
    997 	wg.Add(n)
    998 	for j := 0; j < n; j++ {
    999 		d := make(chan bool)
   1000 		a = append(a, d)
   1001 		go func() {
   1002 			for i := 0; i < b.N; i++ {
   1003 				select {
   1004 				case <-c:
   1005 				case <-d:
   1006 				}
   1007 			}
   1008 			wg.Done()
   1009 		}()
   1010 	}
   1011 	for i := 0; i < b.N; i++ {
   1012 		for _, d := range a {
   1013 			d <- true
   1014 		}
   1015 	}
   1016 	wg.Wait()
   1017 }
   1018 
   1019 var (
   1020 	alwaysFalse = false
   1021 	workSink    = 0
   1022 )
   1023 
   1024 func localWork(w int) {
   1025 	foo := 0
   1026 	for i := 0; i < w; i++ {
   1027 		foo /= (foo + 1)
   1028 	}
   1029 	if alwaysFalse {
   1030 		workSink += foo
   1031 	}
   1032 }
   1033