1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build dragonfly freebsd linux 6 7 package runtime 8 9 import "unsafe" 10 11 // This implementation depends on OS-specific implementations of 12 // 13 // runtimefutexsleep(uint32 *addr, uint32 val, int64 ns) 14 // Atomically, 15 // if(*addr == val) sleep 16 // Might be woken up spuriously; that's allowed. 17 // Don't sleep longer than ns; ns < 0 means forever. 18 // 19 // runtimefutexwakeup(uint32 *addr, uint32 cnt) 20 // If any procs are sleeping on addr, wake up at most cnt. 21 22 const ( 23 mutex_unlocked = 0 24 mutex_locked = 1 25 mutex_sleeping = 2 26 27 active_spin = 4 28 active_spin_cnt = 30 29 passive_spin = 1 30 ) 31 32 // Possible lock states are mutex_unlocked, mutex_locked and mutex_sleeping. 33 // mutex_sleeping means that there is presumably at least one sleeping thread. 34 // Note that there can be spinning threads during all states - they do not 35 // affect mutex's state. 36 37 // We use the uintptr mutex.key and note.key as a uint32. 38 func key32(p *uintptr) *uint32 { 39 return (*uint32)(unsafe.Pointer(p)) 40 } 41 42 func lock(l *mutex) { 43 gp := getg() 44 45 if gp.m.locks < 0 { 46 throw("runtimelock: lock count") 47 } 48 gp.m.locks++ 49 50 // Speculative grab for lock. 51 v := xchg(key32(&l.key), mutex_locked) 52 if v == mutex_unlocked { 53 return 54 } 55 56 // wait is either MUTEX_LOCKED or MUTEX_SLEEPING 57 // depending on whether there is a thread sleeping 58 // on this mutex. If we ever change l->key from 59 // MUTEX_SLEEPING to some other value, we must be 60 // careful to change it back to MUTEX_SLEEPING before 61 // returning, to ensure that the sleeping thread gets 62 // its wakeup call. 63 wait := v 64 65 // On uniprocessors, no point spinning. 66 // On multiprocessors, spin for ACTIVE_SPIN attempts. 67 spin := 0 68 if ncpu > 1 { 69 spin = active_spin 70 } 71 for { 72 // Try for lock, spinning. 73 for i := 0; i < spin; i++ { 74 for l.key == mutex_unlocked { 75 if cas(key32(&l.key), mutex_unlocked, wait) { 76 return 77 } 78 } 79 procyield(active_spin_cnt) 80 } 81 82 // Try for lock, rescheduling. 83 for i := 0; i < passive_spin; i++ { 84 for l.key == mutex_unlocked { 85 if cas(key32(&l.key), mutex_unlocked, wait) { 86 return 87 } 88 } 89 osyield() 90 } 91 92 // Sleep. 93 v = xchg(key32(&l.key), mutex_sleeping) 94 if v == mutex_unlocked { 95 return 96 } 97 wait = mutex_sleeping 98 futexsleep(key32(&l.key), mutex_sleeping, -1) 99 } 100 } 101 102 func unlock(l *mutex) { 103 v := xchg(key32(&l.key), mutex_unlocked) 104 if v == mutex_unlocked { 105 throw("unlock of unlocked lock") 106 } 107 if v == mutex_sleeping { 108 futexwakeup(key32(&l.key), 1) 109 } 110 111 gp := getg() 112 gp.m.locks-- 113 if gp.m.locks < 0 { 114 throw("runtimeunlock: lock count") 115 } 116 if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack 117 gp.stackguard0 = stackPreempt 118 } 119 } 120 121 // One-time notifications. 122 func noteclear(n *note) { 123 n.key = 0 124 } 125 126 func notewakeup(n *note) { 127 old := xchg(key32(&n.key), 1) 128 if old != 0 { 129 print("notewakeup - double wakeup (", old, ")\n") 130 throw("notewakeup - double wakeup") 131 } 132 futexwakeup(key32(&n.key), 1) 133 } 134 135 func notesleep(n *note) { 136 gp := getg() 137 if gp != gp.m.g0 { 138 throw("notesleep not on g0") 139 } 140 for atomicload(key32(&n.key)) == 0 { 141 gp.m.blocked = true 142 futexsleep(key32(&n.key), 0, -1) 143 gp.m.blocked = false 144 } 145 } 146 147 // May run with m.p==nil if called from notetsleep, so write barriers 148 // are not allowed. 149 // 150 //go:nosplit 151 //go:nowritebarrier 152 func notetsleep_internal(n *note, ns int64) bool { 153 gp := getg() 154 155 if ns < 0 { 156 for atomicload(key32(&n.key)) == 0 { 157 gp.m.blocked = true 158 futexsleep(key32(&n.key), 0, -1) 159 gp.m.blocked = false 160 } 161 return true 162 } 163 164 if atomicload(key32(&n.key)) != 0 { 165 return true 166 } 167 168 deadline := nanotime() + ns 169 for { 170 gp.m.blocked = true 171 futexsleep(key32(&n.key), 0, ns) 172 gp.m.blocked = false 173 if atomicload(key32(&n.key)) != 0 { 174 break 175 } 176 now := nanotime() 177 if now >= deadline { 178 break 179 } 180 ns = deadline - now 181 } 182 return atomicload(key32(&n.key)) != 0 183 } 184 185 func notetsleep(n *note, ns int64) bool { 186 gp := getg() 187 if gp != gp.m.g0 && gp.m.preemptoff != "" { 188 throw("notetsleep not on g0") 189 } 190 191 return notetsleep_internal(n, ns) 192 } 193 194 // same as runtimenotetsleep, but called on user g (not g0) 195 // calls only nosplit functions between entersyscallblock/exitsyscall 196 func notetsleepg(n *note, ns int64) bool { 197 gp := getg() 198 if gp == gp.m.g0 { 199 throw("notetsleepg on g0") 200 } 201 202 entersyscallblock(0) 203 ok := notetsleep_internal(n, ns) 204 exitsyscall(0) 205 return ok 206 } 207