1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build darwin nacl netbsd openbsd plan9 solaris windows 6 7 package runtime 8 9 import "unsafe" 10 11 // This implementation depends on OS-specific implementations of 12 // 13 // uintptr runtimesemacreate(void) 14 // Create a semaphore, which will be assigned to m->waitsema. 15 // The zero value is treated as absence of any semaphore, 16 // so be sure to return a non-zero value. 17 // 18 // int32 runtimesemasleep(int64 ns) 19 // If ns < 0, acquire m->waitsema and return 0. 20 // If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds. 21 // Return 0 if the semaphore was acquired, -1 if interrupted or timed out. 22 // 23 // int32 runtimesemawakeup(M *mp) 24 // Wake up mp, which is or will soon be sleeping on mp->waitsema. 25 // 26 const ( 27 locked uintptr = 1 28 29 active_spin = 4 30 active_spin_cnt = 30 31 passive_spin = 1 32 ) 33 34 func lock(l *mutex) { 35 gp := getg() 36 if gp.m.locks < 0 { 37 throw("runtimelock: lock count") 38 } 39 gp.m.locks++ 40 41 // Speculative grab for lock. 42 if casuintptr(&l.key, 0, locked) { 43 return 44 } 45 if gp.m.waitsema == 0 { 46 gp.m.waitsema = semacreate() 47 } 48 49 // On uniprocessor's, no point spinning. 50 // On multiprocessors, spin for ACTIVE_SPIN attempts. 51 spin := 0 52 if ncpu > 1 { 53 spin = active_spin 54 } 55 Loop: 56 for i := 0; ; i++ { 57 v := atomicloaduintptr(&l.key) 58 if v&locked == 0 { 59 // Unlocked. Try to lock. 60 if casuintptr(&l.key, v, v|locked) { 61 return 62 } 63 i = 0 64 } 65 if i < spin { 66 procyield(active_spin_cnt) 67 } else if i < spin+passive_spin { 68 osyield() 69 } else { 70 // Someone else has it. 71 // l->waitm points to a linked list of M's waiting 72 // for this lock, chained through m->nextwaitm. 73 // Queue this M. 74 for { 75 gp.m.nextwaitm = v &^ locked 76 if casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) { 77 break 78 } 79 v = atomicloaduintptr(&l.key) 80 if v&locked == 0 { 81 continue Loop 82 } 83 } 84 if v&locked != 0 { 85 // Queued. Wait. 86 semasleep(-1) 87 i = 0 88 } 89 } 90 } 91 } 92 93 //go:nowritebarrier 94 // We might not be holding a p in this code. 95 func unlock(l *mutex) { 96 gp := getg() 97 var mp *m 98 for { 99 v := atomicloaduintptr(&l.key) 100 if v == locked { 101 if casuintptr(&l.key, locked, 0) { 102 break 103 } 104 } else { 105 // Other M's are waiting for the lock. 106 // Dequeue an M. 107 mp = (*m)((unsafe.Pointer)(v &^ locked)) 108 if casuintptr(&l.key, v, mp.nextwaitm) { 109 // Dequeued an M. Wake it. 110 semawakeup(mp) 111 break 112 } 113 } 114 } 115 gp.m.locks-- 116 if gp.m.locks < 0 { 117 throw("runtimeunlock: lock count") 118 } 119 if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack 120 gp.stackguard0 = stackPreempt 121 } 122 } 123 124 // One-time notifications. 125 func noteclear(n *note) { 126 n.key = 0 127 } 128 129 func notewakeup(n *note) { 130 var v uintptr 131 for { 132 v = atomicloaduintptr(&n.key) 133 if casuintptr(&n.key, v, locked) { 134 break 135 } 136 } 137 138 // Successfully set waitm to locked. 139 // What was it before? 140 switch { 141 case v == 0: 142 // Nothing was waiting. Done. 143 case v == locked: 144 // Two notewakeups! Not allowed. 145 throw("notewakeup - double wakeup") 146 default: 147 // Must be the waiting m. Wake it up. 148 semawakeup((*m)(unsafe.Pointer(v))) 149 } 150 } 151 152 func notesleep(n *note) { 153 gp := getg() 154 if gp != gp.m.g0 { 155 throw("notesleep not on g0") 156 } 157 if gp.m.waitsema == 0 { 158 gp.m.waitsema = semacreate() 159 } 160 if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { 161 // Must be locked (got wakeup). 162 if n.key != locked { 163 throw("notesleep - waitm out of sync") 164 } 165 return 166 } 167 // Queued. Sleep. 168 gp.m.blocked = true 169 semasleep(-1) 170 gp.m.blocked = false 171 } 172 173 //go:nosplit 174 func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool { 175 // gp and deadline are logically local variables, but they are written 176 // as parameters so that the stack space they require is charged 177 // to the caller. 178 // This reduces the nosplit footprint of notetsleep_internal. 179 gp = getg() 180 181 // Register for wakeup on n->waitm. 182 if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) { 183 // Must be locked (got wakeup). 184 if n.key != locked { 185 throw("notetsleep - waitm out of sync") 186 } 187 return true 188 } 189 if ns < 0 { 190 // Queued. Sleep. 191 gp.m.blocked = true 192 semasleep(-1) 193 gp.m.blocked = false 194 return true 195 } 196 197 deadline = nanotime() + ns 198 for { 199 // Registered. Sleep. 200 gp.m.blocked = true 201 if semasleep(ns) >= 0 { 202 gp.m.blocked = false 203 // Acquired semaphore, semawakeup unregistered us. 204 // Done. 205 return true 206 } 207 gp.m.blocked = false 208 // Interrupted or timed out. Still registered. Semaphore not acquired. 209 ns = deadline - nanotime() 210 if ns <= 0 { 211 break 212 } 213 // Deadline hasn't arrived. Keep sleeping. 214 } 215 216 // Deadline arrived. Still registered. Semaphore not acquired. 217 // Want to give up and return, but have to unregister first, 218 // so that any notewakeup racing with the return does not 219 // try to grant us the semaphore when we don't expect it. 220 for { 221 v := atomicloaduintptr(&n.key) 222 switch v { 223 case uintptr(unsafe.Pointer(gp.m)): 224 // No wakeup yet; unregister if possible. 225 if casuintptr(&n.key, v, 0) { 226 return false 227 } 228 case locked: 229 // Wakeup happened so semaphore is available. 230 // Grab it to avoid getting out of sync. 231 gp.m.blocked = true 232 if semasleep(-1) < 0 { 233 throw("runtime: unable to acquire - semaphore out of sync") 234 } 235 gp.m.blocked = false 236 return true 237 default: 238 throw("runtime: unexpected waitm - semaphore out of sync") 239 } 240 } 241 } 242 243 func notetsleep(n *note, ns int64) bool { 244 gp := getg() 245 if gp != gp.m.g0 && gp.m.preemptoff != "" { 246 throw("notetsleep not on g0") 247 } 248 if gp.m.waitsema == 0 { 249 gp.m.waitsema = semacreate() 250 } 251 return notetsleep_internal(n, ns, nil, 0) 252 } 253 254 // same as runtimenotetsleep, but called on user g (not g0) 255 // calls only nosplit functions between entersyscallblock/exitsyscall 256 func notetsleepg(n *note, ns int64) bool { 257 gp := getg() 258 if gp == gp.m.g0 { 259 throw("notetsleepg on g0") 260 } 261 if gp.m.waitsema == 0 { 262 gp.m.waitsema = semacreate() 263 } 264 entersyscallblock(0) 265 ok := notetsleep_internal(n, ns, nil, 0) 266 exitsyscall(0) 267 return ok 268 } 269