Home | History | Annotate | Download | only in sync
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // GOMAXPROCS=10 go test
      6 
      7 package sync_test
      8 
      9 import (
     10 	"fmt"
     11 	"runtime"
     12 	. "sync"
     13 	"sync/atomic"
     14 	"testing"
     15 )
     16 
     17 func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) {
     18 	m.RLock()
     19 	clocked <- true
     20 	<-cunlock
     21 	m.RUnlock()
     22 	cdone <- true
     23 }
     24 
     25 func doTestParallelReaders(numReaders, gomaxprocs int) {
     26 	runtime.GOMAXPROCS(gomaxprocs)
     27 	var m RWMutex
     28 	clocked := make(chan bool)
     29 	cunlock := make(chan bool)
     30 	cdone := make(chan bool)
     31 	for i := 0; i < numReaders; i++ {
     32 		go parallelReader(&m, clocked, cunlock, cdone)
     33 	}
     34 	// Wait for all parallel RLock()s to succeed.
     35 	for i := 0; i < numReaders; i++ {
     36 		<-clocked
     37 	}
     38 	for i := 0; i < numReaders; i++ {
     39 		cunlock <- true
     40 	}
     41 	// Wait for the goroutines to finish.
     42 	for i := 0; i < numReaders; i++ {
     43 		<-cdone
     44 	}
     45 }
     46 
     47 func TestParallelReaders(t *testing.T) {
     48 	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
     49 	doTestParallelReaders(1, 4)
     50 	doTestParallelReaders(3, 4)
     51 	doTestParallelReaders(4, 2)
     52 }
     53 
     54 func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
     55 	for i := 0; i < num_iterations; i++ {
     56 		rwm.RLock()
     57 		n := atomic.AddInt32(activity, 1)
     58 		if n < 1 || n >= 10000 {
     59 			panic(fmt.Sprintf("wlock(%d)\n", n))
     60 		}
     61 		for i := 0; i < 100; i++ {
     62 		}
     63 		atomic.AddInt32(activity, -1)
     64 		rwm.RUnlock()
     65 	}
     66 	cdone <- true
     67 }
     68 
     69 func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
     70 	for i := 0; i < num_iterations; i++ {
     71 		rwm.Lock()
     72 		n := atomic.AddInt32(activity, 10000)
     73 		if n != 10000 {
     74 			panic(fmt.Sprintf("wlock(%d)\n", n))
     75 		}
     76 		for i := 0; i < 100; i++ {
     77 		}
     78 		atomic.AddInt32(activity, -10000)
     79 		rwm.Unlock()
     80 	}
     81 	cdone <- true
     82 }
     83 
     84 func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
     85 	runtime.GOMAXPROCS(gomaxprocs)
     86 	// Number of active readers + 10000 * number of active writers.
     87 	var activity int32
     88 	var rwm RWMutex
     89 	cdone := make(chan bool)
     90 	go writer(&rwm, num_iterations, &activity, cdone)
     91 	var i int
     92 	for i = 0; i < numReaders/2; i++ {
     93 		go reader(&rwm, num_iterations, &activity, cdone)
     94 	}
     95 	go writer(&rwm, num_iterations, &activity, cdone)
     96 	for ; i < numReaders; i++ {
     97 		go reader(&rwm, num_iterations, &activity, cdone)
     98 	}
     99 	// Wait for the 2 writers and all readers to finish.
    100 	for i := 0; i < 2+numReaders; i++ {
    101 		<-cdone
    102 	}
    103 }
    104 
    105 func TestRWMutex(t *testing.T) {
    106 	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
    107 	n := 1000
    108 	if testing.Short() {
    109 		n = 5
    110 	}
    111 	HammerRWMutex(1, 1, n)
    112 	HammerRWMutex(1, 3, n)
    113 	HammerRWMutex(1, 10, n)
    114 	HammerRWMutex(4, 1, n)
    115 	HammerRWMutex(4, 3, n)
    116 	HammerRWMutex(4, 10, n)
    117 	HammerRWMutex(10, 1, n)
    118 	HammerRWMutex(10, 3, n)
    119 	HammerRWMutex(10, 10, n)
    120 	HammerRWMutex(10, 5, n)
    121 }
    122 
    123 func TestRLocker(t *testing.T) {
    124 	var wl RWMutex
    125 	var rl Locker
    126 	wlocked := make(chan bool, 1)
    127 	rlocked := make(chan bool, 1)
    128 	rl = wl.RLocker()
    129 	n := 10
    130 	go func() {
    131 		for i := 0; i < n; i++ {
    132 			rl.Lock()
    133 			rl.Lock()
    134 			rlocked <- true
    135 			wl.Lock()
    136 			wlocked <- true
    137 		}
    138 	}()
    139 	for i := 0; i < n; i++ {
    140 		<-rlocked
    141 		rl.Unlock()
    142 		select {
    143 		case <-wlocked:
    144 			t.Fatal("RLocker() didn't read-lock it")
    145 		default:
    146 		}
    147 		rl.Unlock()
    148 		<-wlocked
    149 		select {
    150 		case <-rlocked:
    151 			t.Fatal("RLocker() didn't respect the write lock")
    152 		default:
    153 		}
    154 		wl.Unlock()
    155 	}
    156 }
    157 
    158 func BenchmarkRWMutexUncontended(b *testing.B) {
    159 	type PaddedRWMutex struct {
    160 		RWMutex
    161 		pad [32]uint32
    162 	}
    163 	b.RunParallel(func(pb *testing.PB) {
    164 		var rwm PaddedRWMutex
    165 		for pb.Next() {
    166 			rwm.RLock()
    167 			rwm.RLock()
    168 			rwm.RUnlock()
    169 			rwm.RUnlock()
    170 			rwm.Lock()
    171 			rwm.Unlock()
    172 		}
    173 	})
    174 }
    175 
    176 func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
    177 	var rwm RWMutex
    178 	b.RunParallel(func(pb *testing.PB) {
    179 		foo := 0
    180 		for pb.Next() {
    181 			foo++
    182 			if foo%writeRatio == 0 {
    183 				rwm.Lock()
    184 				rwm.Unlock()
    185 			} else {
    186 				rwm.RLock()
    187 				for i := 0; i != localWork; i += 1 {
    188 					foo *= 2
    189 					foo /= 2
    190 				}
    191 				rwm.RUnlock()
    192 			}
    193 		}
    194 		_ = foo
    195 	})
    196 }
    197 
    198 func BenchmarkRWMutexWrite100(b *testing.B) {
    199 	benchmarkRWMutex(b, 0, 100)
    200 }
    201 
    202 func BenchmarkRWMutexWrite10(b *testing.B) {
    203 	benchmarkRWMutex(b, 0, 10)
    204 }
    205 
    206 func BenchmarkRWMutexWorkWrite100(b *testing.B) {
    207 	benchmarkRWMutex(b, 100, 100)
    208 }
    209 
    210 func BenchmarkRWMutexWorkWrite10(b *testing.B) {
    211 	benchmarkRWMutex(b, 100, 10)
    212 }
    213