Home | History | Annotate | Download | only in sync
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 // GOMAXPROCS=10 go test
      6 
      7 package sync_test
      8 
      9 import (
     10 	"fmt"
     11 	"internal/testenv"
     12 	"os"
     13 	"os/exec"
     14 	"runtime"
     15 	"strings"
     16 	. "sync"
     17 	"testing"
     18 	"time"
     19 )
     20 
     21 func HammerSemaphore(s *uint32, loops int, cdone chan bool) {
     22 	for i := 0; i < loops; i++ {
     23 		Runtime_Semacquire(s)
     24 		Runtime_Semrelease(s, false)
     25 	}
     26 	cdone <- true
     27 }
     28 
     29 func TestSemaphore(t *testing.T) {
     30 	s := new(uint32)
     31 	*s = 1
     32 	c := make(chan bool)
     33 	for i := 0; i < 10; i++ {
     34 		go HammerSemaphore(s, 1000, c)
     35 	}
     36 	for i := 0; i < 10; i++ {
     37 		<-c
     38 	}
     39 }
     40 
     41 func BenchmarkUncontendedSemaphore(b *testing.B) {
     42 	s := new(uint32)
     43 	*s = 1
     44 	HammerSemaphore(s, b.N, make(chan bool, 2))
     45 }
     46 
     47 func BenchmarkContendedSemaphore(b *testing.B) {
     48 	b.StopTimer()
     49 	s := new(uint32)
     50 	*s = 1
     51 	c := make(chan bool)
     52 	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
     53 	b.StartTimer()
     54 
     55 	go HammerSemaphore(s, b.N/2, c)
     56 	go HammerSemaphore(s, b.N/2, c)
     57 	<-c
     58 	<-c
     59 }
     60 
     61 func HammerMutex(m *Mutex, loops int, cdone chan bool) {
     62 	for i := 0; i < loops; i++ {
     63 		m.Lock()
     64 		m.Unlock()
     65 	}
     66 	cdone <- true
     67 }
     68 
     69 func TestMutex(t *testing.T) {
     70 	if n := runtime.SetMutexProfileFraction(1); n != 0 {
     71 		t.Logf("got mutexrate %d expected 0", n)
     72 	}
     73 	defer runtime.SetMutexProfileFraction(0)
     74 	m := new(Mutex)
     75 	c := make(chan bool)
     76 	for i := 0; i < 10; i++ {
     77 		go HammerMutex(m, 1000, c)
     78 	}
     79 	for i := 0; i < 10; i++ {
     80 		<-c
     81 	}
     82 }
     83 
     84 var misuseTests = []struct {
     85 	name string
     86 	f    func()
     87 }{
     88 	{
     89 		"Mutex.Unlock",
     90 		func() {
     91 			var mu Mutex
     92 			mu.Unlock()
     93 		},
     94 	},
     95 	{
     96 		"Mutex.Unlock2",
     97 		func() {
     98 			var mu Mutex
     99 			mu.Lock()
    100 			mu.Unlock()
    101 			mu.Unlock()
    102 		},
    103 	},
    104 	{
    105 		"RWMutex.Unlock",
    106 		func() {
    107 			var mu RWMutex
    108 			mu.Unlock()
    109 		},
    110 	},
    111 	{
    112 		"RWMutex.Unlock2",
    113 		func() {
    114 			var mu RWMutex
    115 			mu.RLock()
    116 			mu.Unlock()
    117 		},
    118 	},
    119 	{
    120 		"RWMutex.Unlock3",
    121 		func() {
    122 			var mu RWMutex
    123 			mu.Lock()
    124 			mu.Unlock()
    125 			mu.Unlock()
    126 		},
    127 	},
    128 	{
    129 		"RWMutex.RUnlock",
    130 		func() {
    131 			var mu RWMutex
    132 			mu.RUnlock()
    133 		},
    134 	},
    135 	{
    136 		"RWMutex.RUnlock2",
    137 		func() {
    138 			var mu RWMutex
    139 			mu.Lock()
    140 			mu.RUnlock()
    141 		},
    142 	},
    143 	{
    144 		"RWMutex.RUnlock3",
    145 		func() {
    146 			var mu RWMutex
    147 			mu.RLock()
    148 			mu.RUnlock()
    149 			mu.RUnlock()
    150 		},
    151 	},
    152 }
    153 
    154 func init() {
    155 	if len(os.Args) == 3 && os.Args[1] == "TESTMISUSE" {
    156 		for _, test := range misuseTests {
    157 			if test.name == os.Args[2] {
    158 				func() {
    159 					defer func() { recover() }()
    160 					test.f()
    161 				}()
    162 				fmt.Printf("test completed\n")
    163 				os.Exit(0)
    164 			}
    165 		}
    166 		fmt.Printf("unknown test\n")
    167 		os.Exit(0)
    168 	}
    169 }
    170 
    171 func TestMutexMisuse(t *testing.T) {
    172 	testenv.MustHaveExec(t)
    173 	for _, test := range misuseTests {
    174 		out, err := exec.Command(os.Args[0], "TESTMISUSE", test.name).CombinedOutput()
    175 		if err == nil || !strings.Contains(string(out), "unlocked") {
    176 			t.Errorf("%s: did not find failure with message about unlocked lock: %s\n%s\n", test.name, err, out)
    177 		}
    178 	}
    179 }
    180 
    181 func TestMutexFairness(t *testing.T) {
    182 	var mu Mutex
    183 	stop := make(chan bool)
    184 	defer close(stop)
    185 	go func() {
    186 		for {
    187 			mu.Lock()
    188 			time.Sleep(100 * time.Microsecond)
    189 			mu.Unlock()
    190 			select {
    191 			case <-stop:
    192 				return
    193 			default:
    194 			}
    195 		}
    196 	}()
    197 	done := make(chan bool)
    198 	go func() {
    199 		for i := 0; i < 10; i++ {
    200 			time.Sleep(100 * time.Microsecond)
    201 			mu.Lock()
    202 			mu.Unlock()
    203 		}
    204 		done <- true
    205 	}()
    206 	select {
    207 	case <-done:
    208 	case <-time.After(10 * time.Second):
    209 		t.Fatalf("can't acquire Mutex in 10 seconds")
    210 	}
    211 }
    212 
    213 func BenchmarkMutexUncontended(b *testing.B) {
    214 	type PaddedMutex struct {
    215 		Mutex
    216 		pad [128]uint8
    217 	}
    218 	b.RunParallel(func(pb *testing.PB) {
    219 		var mu PaddedMutex
    220 		for pb.Next() {
    221 			mu.Lock()
    222 			mu.Unlock()
    223 		}
    224 	})
    225 }
    226 
    227 func benchmarkMutex(b *testing.B, slack, work bool) {
    228 	var mu Mutex
    229 	if slack {
    230 		b.SetParallelism(10)
    231 	}
    232 	b.RunParallel(func(pb *testing.PB) {
    233 		foo := 0
    234 		for pb.Next() {
    235 			mu.Lock()
    236 			mu.Unlock()
    237 			if work {
    238 				for i := 0; i < 100; i++ {
    239 					foo *= 2
    240 					foo /= 2
    241 				}
    242 			}
    243 		}
    244 		_ = foo
    245 	})
    246 }
    247 
    248 func BenchmarkMutex(b *testing.B) {
    249 	benchmarkMutex(b, false, false)
    250 }
    251 
    252 func BenchmarkMutexSlack(b *testing.B) {
    253 	benchmarkMutex(b, true, false)
    254 }
    255 
    256 func BenchmarkMutexWork(b *testing.B) {
    257 	benchmarkMutex(b, false, true)
    258 }
    259 
    260 func BenchmarkMutexWorkSlack(b *testing.B) {
    261 	benchmarkMutex(b, true, true)
    262 }
    263 
    264 func BenchmarkMutexNoSpin(b *testing.B) {
    265 	// This benchmark models a situation where spinning in the mutex should be
    266 	// non-profitable and allows to confirm that spinning does not do harm.
    267 	// To achieve this we create excess of goroutines most of which do local work.
    268 	// These goroutines yield during local work, so that switching from
    269 	// a blocked goroutine to other goroutines is profitable.
    270 	// As a matter of fact, this benchmark still triggers some spinning in the mutex.
    271 	var m Mutex
    272 	var acc0, acc1 uint64
    273 	b.SetParallelism(4)
    274 	b.RunParallel(func(pb *testing.PB) {
    275 		c := make(chan bool)
    276 		var data [4 << 10]uint64
    277 		for i := 0; pb.Next(); i++ {
    278 			if i%4 == 0 {
    279 				m.Lock()
    280 				acc0 -= 100
    281 				acc1 += 100
    282 				m.Unlock()
    283 			} else {
    284 				for i := 0; i < len(data); i += 4 {
    285 					data[i]++
    286 				}
    287 				// Elaborate way to say runtime.Gosched
    288 				// that does not put the goroutine onto global runq.
    289 				go func() {
    290 					c <- true
    291 				}()
    292 				<-c
    293 			}
    294 		}
    295 	})
    296 }
    297 
    298 func BenchmarkMutexSpin(b *testing.B) {
    299 	// This benchmark models a situation where spinning in the mutex should be
    300 	// profitable. To achieve this we create a goroutine per-proc.
    301 	// These goroutines access considerable amount of local data so that
    302 	// unnecessary rescheduling is penalized by cache misses.
    303 	var m Mutex
    304 	var acc0, acc1 uint64
    305 	b.RunParallel(func(pb *testing.PB) {
    306 		var data [16 << 10]uint64
    307 		for i := 0; pb.Next(); i++ {
    308 			m.Lock()
    309 			acc0 -= 100
    310 			acc1 += 100
    311 			m.Unlock()
    312 			for i := 0; i < len(data); i += 4 {
    313 				data[i]++
    314 			}
    315 		}
    316 	})
    317 }
    318