Home | History | Annotate | Download | only in sync
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // GOMAXPROCS=10 go test
      6 
      7 package sync_test
      8 
      9 import (
     10 	"fmt"
     11 	"runtime"
     12 	. "sync"
     13 	"sync/atomic"
     14 	"testing"
     15 )
     16 
     17 func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) {
     18 	m.RLock()
     19 	clocked <- true
     20 	<-cunlock
     21 	m.RUnlock()
     22 	cdone <- true
     23 }
     24 
     25 func doTestParallelReaders(numReaders, gomaxprocs int) {
     26 	runtime.GOMAXPROCS(gomaxprocs)
     27 	var m RWMutex
     28 	clocked := make(chan bool)
     29 	cunlock := make(chan bool)
     30 	cdone := make(chan bool)
     31 	for i := 0; i < numReaders; i++ {
     32 		go parallelReader(&m, clocked, cunlock, cdone)
     33 	}
     34 	// Wait for all parallel RLock()s to succeed.
     35 	for i := 0; i < numReaders; i++ {
     36 		<-clocked
     37 	}
     38 	for i := 0; i < numReaders; i++ {
     39 		cunlock <- true
     40 	}
     41 	// Wait for the goroutines to finish.
     42 	for i := 0; i < numReaders; i++ {
     43 		<-cdone
     44 	}
     45 }
     46 
     47 func TestParallelReaders(t *testing.T) {
     48 	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
     49 	doTestParallelReaders(1, 4)
     50 	doTestParallelReaders(3, 4)
     51 	doTestParallelReaders(4, 2)
     52 }
     53 
     54 func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
     55 	for i := 0; i < num_iterations; i++ {
     56 		rwm.RLock()
     57 		n := atomic.AddInt32(activity, 1)
     58 		if n < 1 || n >= 10000 {
     59 			panic(fmt.Sprintf("wlock(%d)\n", n))
     60 		}
     61 		for i := 0; i < 100; i++ {
     62 		}
     63 		atomic.AddInt32(activity, -1)
     64 		rwm.RUnlock()
     65 	}
     66 	cdone <- true
     67 }
     68 
     69 func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
     70 	for i := 0; i < num_iterations; i++ {
     71 		rwm.Lock()
     72 		n := atomic.AddInt32(activity, 10000)
     73 		if n != 10000 {
     74 			panic(fmt.Sprintf("wlock(%d)\n", n))
     75 		}
     76 		for i := 0; i < 100; i++ {
     77 		}
     78 		atomic.AddInt32(activity, -10000)
     79 		rwm.Unlock()
     80 	}
     81 	cdone <- true
     82 }
     83 
     84 func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
     85 	runtime.GOMAXPROCS(gomaxprocs)
     86 	// Number of active readers + 10000 * number of active writers.
     87 	var activity int32
     88 	var rwm RWMutex
     89 	cdone := make(chan bool)
     90 	go writer(&rwm, num_iterations, &activity, cdone)
     91 	var i int
     92 	for i = 0; i < numReaders/2; i++ {
     93 		go reader(&rwm, num_iterations, &activity, cdone)
     94 	}
     95 	go writer(&rwm, num_iterations, &activity, cdone)
     96 	for ; i < numReaders; i++ {
     97 		go reader(&rwm, num_iterations, &activity, cdone)
     98 	}
     99 	// Wait for the 2 writers and all readers to finish.
    100 	for i := 0; i < 2+numReaders; i++ {
    101 		<-cdone
    102 	}
    103 }
    104 
    105 func TestRWMutex(t *testing.T) {
    106 	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
    107 	n := 1000
    108 	if testing.Short() {
    109 		n = 5
    110 	}
    111 	HammerRWMutex(1, 1, n)
    112 	HammerRWMutex(1, 3, n)
    113 	HammerRWMutex(1, 10, n)
    114 	HammerRWMutex(4, 1, n)
    115 	HammerRWMutex(4, 3, n)
    116 	HammerRWMutex(4, 10, n)
    117 	HammerRWMutex(10, 1, n)
    118 	HammerRWMutex(10, 3, n)
    119 	HammerRWMutex(10, 10, n)
    120 	HammerRWMutex(10, 5, n)
    121 }
    122 
    123 func TestRLocker(t *testing.T) {
    124 	var wl RWMutex
    125 	var rl Locker
    126 	wlocked := make(chan bool, 1)
    127 	rlocked := make(chan bool, 1)
    128 	rl = wl.RLocker()
    129 	n := 10
    130 	go func() {
    131 		for i := 0; i < n; i++ {
    132 			rl.Lock()
    133 			rl.Lock()
    134 			rlocked <- true
    135 			wl.Lock()
    136 			wlocked <- true
    137 		}
    138 	}()
    139 	for i := 0; i < n; i++ {
    140 		<-rlocked
    141 		rl.Unlock()
    142 		select {
    143 		case <-wlocked:
    144 			t.Fatal("RLocker() didn't read-lock it")
    145 		default:
    146 		}
    147 		rl.Unlock()
    148 		<-wlocked
    149 		select {
    150 		case <-rlocked:
    151 			t.Fatal("RLocker() didn't respect the write lock")
    152 		default:
    153 		}
    154 		wl.Unlock()
    155 	}
    156 }
    157 
    158 func TestUnlockPanic(t *testing.T) {
    159 	defer func() {
    160 		if recover() == nil {
    161 			t.Fatalf("unlock of unlocked RWMutex did not panic")
    162 		}
    163 	}()
    164 	var mu RWMutex
    165 	mu.Unlock()
    166 }
    167 
    168 func TestUnlockPanic2(t *testing.T) {
    169 	defer func() {
    170 		if recover() == nil {
    171 			t.Fatalf("unlock of unlocked RWMutex did not panic")
    172 		}
    173 	}()
    174 	var mu RWMutex
    175 	mu.RLock()
    176 	mu.Unlock()
    177 }
    178 
    179 func TestRUnlockPanic(t *testing.T) {
    180 	defer func() {
    181 		if recover() == nil {
    182 			t.Fatalf("read unlock of unlocked RWMutex did not panic")
    183 		}
    184 	}()
    185 	var mu RWMutex
    186 	mu.RUnlock()
    187 }
    188 
    189 func TestRUnlockPanic2(t *testing.T) {
    190 	defer func() {
    191 		if recover() == nil {
    192 			t.Fatalf("read unlock of unlocked RWMutex did not panic")
    193 		}
    194 	}()
    195 	var mu RWMutex
    196 	mu.Lock()
    197 	mu.RUnlock()
    198 }
    199 
    200 func BenchmarkRWMutexUncontended(b *testing.B) {
    201 	type PaddedRWMutex struct {
    202 		RWMutex
    203 		pad [32]uint32
    204 	}
    205 	b.RunParallel(func(pb *testing.PB) {
    206 		var rwm PaddedRWMutex
    207 		for pb.Next() {
    208 			rwm.RLock()
    209 			rwm.RLock()
    210 			rwm.RUnlock()
    211 			rwm.RUnlock()
    212 			rwm.Lock()
    213 			rwm.Unlock()
    214 		}
    215 	})
    216 }
    217 
    218 func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
    219 	var rwm RWMutex
    220 	b.RunParallel(func(pb *testing.PB) {
    221 		foo := 0
    222 		for pb.Next() {
    223 			foo++
    224 			if foo%writeRatio == 0 {
    225 				rwm.Lock()
    226 				rwm.Unlock()
    227 			} else {
    228 				rwm.RLock()
    229 				for i := 0; i != localWork; i += 1 {
    230 					foo *= 2
    231 					foo /= 2
    232 				}
    233 				rwm.RUnlock()
    234 			}
    235 		}
    236 		_ = foo
    237 	})
    238 }
    239 
    240 func BenchmarkRWMutexWrite100(b *testing.B) {
    241 	benchmarkRWMutex(b, 0, 100)
    242 }
    243 
    244 func BenchmarkRWMutexWrite10(b *testing.B) {
    245 	benchmarkRWMutex(b, 0, 10)
    246 }
    247 
    248 func BenchmarkRWMutexWorkWrite100(b *testing.B) {
    249 	benchmarkRWMutex(b, 100, 100)
    250 }
    251 
    252 func BenchmarkRWMutexWorkWrite10(b *testing.B) {
    253 	benchmarkRWMutex(b, 100, 10)
    254 }
    255