Home | History | Annotate | Download | only in runtime
      1 // Copyright 2009 The Go Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style
      3 // license that can be found in the LICENSE file.
      4 
      5 package runtime
      6 
      7 // This file contains the implementation of Go select statements.
      8 
      9 import (
     10 	"runtime/internal/sys"
     11 	"unsafe"
     12 )
     13 
     14 const (
     15 	debugSelect = false
     16 
     17 	// scase.kind
     18 	caseRecv = iota
     19 	caseSend
     20 	caseDefault
     21 )
     22 
     23 // Select statement header.
     24 // Known to compiler.
     25 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
     26 type hselect struct {
     27 	tcase     uint16   // total count of scase[]
     28 	ncase     uint16   // currently filled scase[]
     29 	pollorder *uint16  // case poll order
     30 	lockorder *uint16  // channel lock order
     31 	scase     [1]scase // one per case (in order of appearance)
     32 }
     33 
     34 // Select case descriptor.
     35 // Known to compiler.
     36 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
     37 type scase struct {
     38 	elem        unsafe.Pointer // data element
     39 	c           *hchan         // chan
     40 	pc          uintptr        // return pc
     41 	kind        uint16
     42 	so          uint16 // vararg of selected bool
     43 	receivedp   *bool  // pointer to received bool (recv2)
     44 	releasetime int64
     45 }
     46 
     47 var (
     48 	chansendpc = funcPC(chansend)
     49 	chanrecvpc = funcPC(chanrecv)
     50 )
     51 
     52 func selectsize(size uintptr) uintptr {
     53 	selsize := unsafe.Sizeof(hselect{}) +
     54 		(size-1)*unsafe.Sizeof(hselect{}.scase[0]) +
     55 		size*unsafe.Sizeof(*hselect{}.lockorder) +
     56 		size*unsafe.Sizeof(*hselect{}.pollorder)
     57 	return round(selsize, sys.Int64Align)
     58 }
     59 
     60 func newselect(sel *hselect, selsize int64, size int32) {
     61 	if selsize != int64(selectsize(uintptr(size))) {
     62 		print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n")
     63 		throw("bad select size")
     64 	}
     65 	sel.tcase = uint16(size)
     66 	sel.ncase = 0
     67 	sel.lockorder = (*uint16)(add(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(hselect{}.scase[0])))
     68 	sel.pollorder = (*uint16)(add(unsafe.Pointer(sel.lockorder), uintptr(size)*unsafe.Sizeof(*hselect{}.lockorder)))
     69 
     70 	if debugSelect {
     71 		print("newselect s=", sel, " size=", size, "\n")
     72 	}
     73 }
     74 
     75 //go:nosplit
     76 func selectsend(sel *hselect, c *hchan, elem unsafe.Pointer) (selected bool) {
     77 	// nil cases do not compete
     78 	if c != nil {
     79 		selectsendImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel)))
     80 	}
     81 	return
     82 }
     83 
     84 // cut in half to give stack a chance to split
     85 func selectsendImpl(sel *hselect, c *hchan, pc uintptr, elem unsafe.Pointer, so uintptr) {
     86 	i := sel.ncase
     87 	if i >= sel.tcase {
     88 		throw("selectsend: too many cases")
     89 	}
     90 	sel.ncase = i + 1
     91 	cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
     92 
     93 	cas.pc = pc
     94 	cas.c = c
     95 	cas.so = uint16(so)
     96 	cas.kind = caseSend
     97 	cas.elem = elem
     98 
     99 	if debugSelect {
    100 		print("selectsend s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, " so=", cas.so, "\n")
    101 	}
    102 }
    103 
    104 //go:nosplit
    105 func selectrecv(sel *hselect, c *hchan, elem unsafe.Pointer) (selected bool) {
    106 	// nil cases do not compete
    107 	if c != nil {
    108 		selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, nil, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel)))
    109 	}
    110 	return
    111 }
    112 
    113 //go:nosplit
    114 func selectrecv2(sel *hselect, c *hchan, elem unsafe.Pointer, received *bool) (selected bool) {
    115 	// nil cases do not compete
    116 	if c != nil {
    117 		selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, received, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel)))
    118 	}
    119 	return
    120 }
    121 
    122 func selectrecvImpl(sel *hselect, c *hchan, pc uintptr, elem unsafe.Pointer, received *bool, so uintptr) {
    123 	i := sel.ncase
    124 	if i >= sel.tcase {
    125 		throw("selectrecv: too many cases")
    126 	}
    127 	sel.ncase = i + 1
    128 	cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
    129 	cas.pc = pc
    130 	cas.c = c
    131 	cas.so = uint16(so)
    132 	cas.kind = caseRecv
    133 	cas.elem = elem
    134 	cas.receivedp = received
    135 
    136 	if debugSelect {
    137 		print("selectrecv s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, " so=", cas.so, "\n")
    138 	}
    139 }
    140 
    141 //go:nosplit
    142 func selectdefault(sel *hselect) (selected bool) {
    143 	selectdefaultImpl(sel, getcallerpc(unsafe.Pointer(&sel)), uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel)))
    144 	return
    145 }
    146 
    147 func selectdefaultImpl(sel *hselect, callerpc uintptr, so uintptr) {
    148 	i := sel.ncase
    149 	if i >= sel.tcase {
    150 		throw("selectdefault: too many cases")
    151 	}
    152 	sel.ncase = i + 1
    153 	cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
    154 	cas.pc = callerpc
    155 	cas.c = nil
    156 	cas.so = uint16(so)
    157 	cas.kind = caseDefault
    158 
    159 	if debugSelect {
    160 		print("selectdefault s=", sel, " pc=", hex(cas.pc), " so=", cas.so, "\n")
    161 	}
    162 }
    163 
    164 func sellock(scases []scase, lockorder []uint16) {
    165 	var c *hchan
    166 	for _, o := range lockorder {
    167 		c0 := scases[o].c
    168 		if c0 != nil && c0 != c {
    169 			c = c0
    170 			lock(&c.lock)
    171 		}
    172 	}
    173 }
    174 
    175 func selunlock(scases []scase, lockorder []uint16) {
    176 	// We must be very careful here to not touch sel after we have unlocked
    177 	// the last lock, because sel can be freed right after the last unlock.
    178 	// Consider the following situation.
    179 	// First M calls runtimepark() in runtimeselectgo() passing the sel.
    180 	// Once runtimepark() has unlocked the last lock, another M makes
    181 	// the G that calls select runnable again and schedules it for execution.
    182 	// When the G runs on another M, it locks all the locks and frees sel.
    183 	// Now if the first M touches sel, it will access freed memory.
    184 	n := len(scases)
    185 	r := 0
    186 	// skip the default case
    187 	if n > 0 && scases[lockorder[0]].c == nil {
    188 		r = 1
    189 	}
    190 	for i := n - 1; i >= r; i-- {
    191 		c := scases[lockorder[i]].c
    192 		if i > 0 && c == scases[lockorder[i-1]].c {
    193 			continue // will unlock it on the next iteration
    194 		}
    195 		unlock(&c.lock)
    196 	}
    197 }
    198 
    199 func selparkcommit(gp *g, _ unsafe.Pointer) bool {
    200 	// This must not access gp's stack (see gopark). In
    201 	// particular, it must not access the *hselect. That's okay,
    202 	// because by the time this is called, gp.waiting has all
    203 	// channels in lock order.
    204 	var lastc *hchan
    205 	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
    206 		if sg.c != lastc && lastc != nil {
    207 			// As soon as we unlock the channel, fields in
    208 			// any sudog with that channel may change,
    209 			// including c and waitlink. Since multiple
    210 			// sudogs may have the same channel, we unlock
    211 			// only after we've passed the last instance
    212 			// of a channel.
    213 			unlock(&lastc.lock)
    214 		}
    215 		lastc = sg.c
    216 	}
    217 	if lastc != nil {
    218 		unlock(&lastc.lock)
    219 	}
    220 	return true
    221 }
    222 
    223 func block() {
    224 	gopark(nil, nil, "select (no cases)", traceEvGoStop, 1) // forever
    225 }
    226 
    227 // selectgo implements the select statement.
    228 //
    229 // *sel is on the current goroutine's stack (regardless of any
    230 // escaping in selectgo).
    231 //
    232 // selectgo does not return. Instead, it overwrites its return PC and
    233 // returns directly to the triggered select case. Because of this, it
    234 // cannot appear at the top of a split stack.
    235 //
    236 //go:nosplit
    237 func selectgo(sel *hselect) {
    238 	pc, offset := selectgoImpl(sel)
    239 	*(*bool)(add(unsafe.Pointer(&sel), uintptr(offset))) = true
    240 	setcallerpc(unsafe.Pointer(&sel), pc)
    241 }
    242 
    243 // selectgoImpl returns scase.pc and scase.so for the select
    244 // case which fired.
    245 func selectgoImpl(sel *hselect) (uintptr, uint16) {
    246 	if debugSelect {
    247 		print("select: sel=", sel, "\n")
    248 	}
    249 
    250 	scaseslice := slice{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)}
    251 	scases := *(*[]scase)(unsafe.Pointer(&scaseslice))
    252 
    253 	var t0 int64
    254 	if blockprofilerate > 0 {
    255 		t0 = cputicks()
    256 		for i := 0; i < int(sel.ncase); i++ {
    257 			scases[i].releasetime = -1
    258 		}
    259 	}
    260 
    261 	// The compiler rewrites selects that statically have
    262 	// only 0 or 1 cases plus default into simpler constructs.
    263 	// The only way we can end up with such small sel.ncase
    264 	// values here is for a larger select in which most channels
    265 	// have been nilled out. The general code handles those
    266 	// cases correctly, and they are rare enough not to bother
    267 	// optimizing (and needing to test).
    268 
    269 	// generate permuted order
    270 	pollslice := slice{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)}
    271 	pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice))
    272 	for i := 1; i < int(sel.ncase); i++ {
    273 		j := int(fastrand()) % (i + 1)
    274 		pollorder[i] = pollorder[j]
    275 		pollorder[j] = uint16(i)
    276 	}
    277 
    278 	// sort the cases by Hchan address to get the locking order.
    279 	// simple heap sort, to guarantee n log n time and constant stack footprint.
    280 	lockslice := slice{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)}
    281 	lockorder := *(*[]uint16)(unsafe.Pointer(&lockslice))
    282 	for i := 0; i < int(sel.ncase); i++ {
    283 		j := i
    284 		// Start with the pollorder to permute cases on the same channel.
    285 		c := scases[pollorder[i]].c
    286 		for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() {
    287 			k := (j - 1) / 2
    288 			lockorder[j] = lockorder[k]
    289 			j = k
    290 		}
    291 		lockorder[j] = pollorder[i]
    292 	}
    293 	for i := int(sel.ncase) - 1; i >= 0; i-- {
    294 		o := lockorder[i]
    295 		c := scases[o].c
    296 		lockorder[i] = lockorder[0]
    297 		j := 0
    298 		for {
    299 			k := j*2 + 1
    300 			if k >= i {
    301 				break
    302 			}
    303 			if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() {
    304 				k++
    305 			}
    306 			if c.sortkey() < scases[lockorder[k]].c.sortkey() {
    307 				lockorder[j] = lockorder[k]
    308 				j = k
    309 				continue
    310 			}
    311 			break
    312 		}
    313 		lockorder[j] = o
    314 	}
    315 	/*
    316 		for i := 0; i+1 < int(sel.ncase); i++ {
    317 			if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
    318 				print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
    319 				throw("select: broken sort")
    320 			}
    321 		}
    322 	*/
    323 
    324 	// lock all the channels involved in the select
    325 	sellock(scases, lockorder)
    326 
    327 	var (
    328 		gp     *g
    329 		done   uint32
    330 		sg     *sudog
    331 		c      *hchan
    332 		k      *scase
    333 		sglist *sudog
    334 		sgnext *sudog
    335 		qp     unsafe.Pointer
    336 		nextp  **sudog
    337 	)
    338 
    339 loop:
    340 	// pass 1 - look for something already waiting
    341 	var dfl *scase
    342 	var cas *scase
    343 	for i := 0; i < int(sel.ncase); i++ {
    344 		cas = &scases[pollorder[i]]
    345 		c = cas.c
    346 
    347 		switch cas.kind {
    348 		case caseRecv:
    349 			sg = c.sendq.dequeue()
    350 			if sg != nil {
    351 				goto recv
    352 			}
    353 			if c.qcount > 0 {
    354 				goto bufrecv
    355 			}
    356 			if c.closed != 0 {
    357 				goto rclose
    358 			}
    359 
    360 		case caseSend:
    361 			if raceenabled {
    362 				racereadpc(unsafe.Pointer(c), cas.pc, chansendpc)
    363 			}
    364 			if c.closed != 0 {
    365 				goto sclose
    366 			}
    367 			sg = c.recvq.dequeue()
    368 			if sg != nil {
    369 				goto send
    370 			}
    371 			if c.qcount < c.dataqsiz {
    372 				goto bufsend
    373 			}
    374 
    375 		case caseDefault:
    376 			dfl = cas
    377 		}
    378 	}
    379 
    380 	if dfl != nil {
    381 		selunlock(scases, lockorder)
    382 		cas = dfl
    383 		goto retc
    384 	}
    385 
    386 	// pass 2 - enqueue on all chans
    387 	gp = getg()
    388 	done = 0
    389 	if gp.waiting != nil {
    390 		throw("gp.waiting != nil")
    391 	}
    392 	nextp = &gp.waiting
    393 	for _, casei := range lockorder {
    394 		cas = &scases[casei]
    395 		c = cas.c
    396 		sg := acquireSudog()
    397 		sg.g = gp
    398 		// Note: selectdone is adjusted for stack copies in stack1.go:adjustsudogs
    399 		sg.selectdone = (*uint32)(noescape(unsafe.Pointer(&done)))
    400 		// No stack splits between assigning elem and enqueuing
    401 		// sg on gp.waiting where copystack can find it.
    402 		sg.elem = cas.elem
    403 		sg.releasetime = 0
    404 		if t0 != 0 {
    405 			sg.releasetime = -1
    406 		}
    407 		sg.c = c
    408 		// Construct waiting list in lock order.
    409 		*nextp = sg
    410 		nextp = &sg.waitlink
    411 
    412 		switch cas.kind {
    413 		case caseRecv:
    414 			c.recvq.enqueue(sg)
    415 
    416 		case caseSend:
    417 			c.sendq.enqueue(sg)
    418 		}
    419 	}
    420 
    421 	// wait for someone to wake us up
    422 	gp.param = nil
    423 	gopark(selparkcommit, nil, "select", traceEvGoBlockSelect, 2)
    424 
    425 	// While we were asleep, some goroutine came along and completed
    426 	// one of the cases in the select and woke us up (called ready).
    427 	// As part of that process, the goroutine did a cas on done above
    428 	// (aka *sg.selectdone for all queued sg) to win the right to
    429 	// complete the select. Now done = 1.
    430 	//
    431 	// If we copy (grow) our own stack, we will update the
    432 	// selectdone pointers inside the gp.waiting sudog list to point
    433 	// at the new stack. Another goroutine attempting to
    434 	// complete one of our (still linked in) select cases might
    435 	// see the new selectdone pointer (pointing at the new stack)
    436 	// before the new stack has real data; if the new stack has done = 0
    437 	// (before the old values are copied over), the goroutine might
    438 	// do a cas via sg.selectdone and incorrectly believe that it has
    439 	// won the right to complete the select, executing a second
    440 	// communication and attempting to wake us (call ready) again.
    441 	//
    442 	// Then things break.
    443 	//
    444 	// The best break is that the goroutine doing ready sees the
    445 	// _Gcopystack status and throws, as in #17007.
    446 	// A worse break would be for us to continue on, start running real code,
    447 	// block in a semaphore acquisition (sema.go), and have the other
    448 	// goroutine wake us up without having really acquired the semaphore.
    449 	// That would result in the goroutine spuriously running and then
    450 	// queue up another spurious wakeup when the semaphore really is ready.
    451 	// In general the situation can cascade until something notices the
    452 	// problem and causes a crash.
    453 	//
    454 	// A stack shrink does not have this problem, because it locks
    455 	// all the channels that are involved first, blocking out the
    456 	// possibility of a cas on selectdone.
    457 	//
    458 	// A stack growth before gopark above does not have this
    459 	// problem, because we hold those channel locks (released by
    460 	// selparkcommit).
    461 	//
    462 	// A stack growth after sellock below does not have this
    463 	// problem, because again we hold those channel locks.
    464 	//
    465 	// The only problem is a stack growth during sellock.
    466 	// To keep that from happening, run sellock on the system stack.
    467 	//
    468 	// It might be that we could avoid this if copystack copied the
    469 	// stack before calling adjustsudogs. In that case,
    470 	// syncadjustsudogs would need to recopy the tiny part that
    471 	// it copies today, resulting in a little bit of extra copying.
    472 	//
    473 	// An even better fix, not for the week before a release candidate,
    474 	// would be to put space in every sudog and make selectdone
    475 	// point at (say) the space in the first sudog.
    476 
    477 	systemstack(func() {
    478 		sellock(scases, lockorder)
    479 	})
    480 
    481 	sg = (*sudog)(gp.param)
    482 	gp.param = nil
    483 
    484 	// pass 3 - dequeue from unsuccessful chans
    485 	// otherwise they stack up on quiet channels
    486 	// record the successful case, if any.
    487 	// We singly-linked up the SudoGs in lock order.
    488 	cas = nil
    489 	sglist = gp.waiting
    490 	// Clear all elem before unlinking from gp.waiting.
    491 	for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
    492 		sg1.selectdone = nil
    493 		sg1.elem = nil
    494 		sg1.c = nil
    495 	}
    496 	gp.waiting = nil
    497 
    498 	for _, casei := range lockorder {
    499 		k = &scases[casei]
    500 		if sglist.releasetime > 0 {
    501 			k.releasetime = sglist.releasetime
    502 		}
    503 		if sg == sglist {
    504 			// sg has already been dequeued by the G that woke us up.
    505 			cas = k
    506 		} else {
    507 			c = k.c
    508 			if k.kind == caseSend {
    509 				c.sendq.dequeueSudoG(sglist)
    510 			} else {
    511 				c.recvq.dequeueSudoG(sglist)
    512 			}
    513 		}
    514 		sgnext = sglist.waitlink
    515 		sglist.waitlink = nil
    516 		releaseSudog(sglist)
    517 		sglist = sgnext
    518 	}
    519 
    520 	if cas == nil {
    521 		// We can wake up with gp.param == nil (so cas == nil)
    522 		// when a channel involved in the select has been closed.
    523 		// It is easiest to loop and re-run the operation;
    524 		// we'll see that it's now closed.
    525 		// Maybe some day we can signal the close explicitly,
    526 		// but we'd have to distinguish close-on-reader from close-on-writer.
    527 		// It's easiest not to duplicate the code and just recheck above.
    528 		// We know that something closed, and things never un-close,
    529 		// so we won't block again.
    530 		goto loop
    531 	}
    532 
    533 	c = cas.c
    534 
    535 	if debugSelect {
    536 		print("wait-return: sel=", sel, " c=", c, " cas=", cas, " kind=", cas.kind, "\n")
    537 	}
    538 
    539 	if cas.kind == caseRecv {
    540 		if cas.receivedp != nil {
    541 			*cas.receivedp = true
    542 		}
    543 	}
    544 
    545 	if raceenabled {
    546 		if cas.kind == caseRecv && cas.elem != nil {
    547 			raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
    548 		} else if cas.kind == caseSend {
    549 			raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
    550 		}
    551 	}
    552 	if msanenabled {
    553 		if cas.kind == caseRecv && cas.elem != nil {
    554 			msanwrite(cas.elem, c.elemtype.size)
    555 		} else if cas.kind == caseSend {
    556 			msanread(cas.elem, c.elemtype.size)
    557 		}
    558 	}
    559 
    560 	selunlock(scases, lockorder)
    561 	goto retc
    562 
    563 bufrecv:
    564 	// can receive from buffer
    565 	if raceenabled {
    566 		if cas.elem != nil {
    567 			raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
    568 		}
    569 		raceacquire(chanbuf(c, c.recvx))
    570 		racerelease(chanbuf(c, c.recvx))
    571 	}
    572 	if msanenabled && cas.elem != nil {
    573 		msanwrite(cas.elem, c.elemtype.size)
    574 	}
    575 	if cas.receivedp != nil {
    576 		*cas.receivedp = true
    577 	}
    578 	qp = chanbuf(c, c.recvx)
    579 	if cas.elem != nil {
    580 		typedmemmove(c.elemtype, cas.elem, qp)
    581 	}
    582 	typedmemclr(c.elemtype, qp)
    583 	c.recvx++
    584 	if c.recvx == c.dataqsiz {
    585 		c.recvx = 0
    586 	}
    587 	c.qcount--
    588 	selunlock(scases, lockorder)
    589 	goto retc
    590 
    591 bufsend:
    592 	// can send to buffer
    593 	if raceenabled {
    594 		raceacquire(chanbuf(c, c.sendx))
    595 		racerelease(chanbuf(c, c.sendx))
    596 		raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
    597 	}
    598 	if msanenabled {
    599 		msanread(cas.elem, c.elemtype.size)
    600 	}
    601 	typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem)
    602 	c.sendx++
    603 	if c.sendx == c.dataqsiz {
    604 		c.sendx = 0
    605 	}
    606 	c.qcount++
    607 	selunlock(scases, lockorder)
    608 	goto retc
    609 
    610 recv:
    611 	// can receive from sleeping sender (sg)
    612 	recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) })
    613 	if debugSelect {
    614 		print("syncrecv: sel=", sel, " c=", c, "\n")
    615 	}
    616 	if cas.receivedp != nil {
    617 		*cas.receivedp = true
    618 	}
    619 	goto retc
    620 
    621 rclose:
    622 	// read at end of closed channel
    623 	selunlock(scases, lockorder)
    624 	if cas.receivedp != nil {
    625 		*cas.receivedp = false
    626 	}
    627 	if cas.elem != nil {
    628 		typedmemclr(c.elemtype, cas.elem)
    629 	}
    630 	if raceenabled {
    631 		raceacquire(unsafe.Pointer(c))
    632 	}
    633 	goto retc
    634 
    635 send:
    636 	// can send to a sleeping receiver (sg)
    637 	if raceenabled {
    638 		raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
    639 	}
    640 	if msanenabled {
    641 		msanread(cas.elem, c.elemtype.size)
    642 	}
    643 	send(c, sg, cas.elem, func() { selunlock(scases, lockorder) })
    644 	if debugSelect {
    645 		print("syncsend: sel=", sel, " c=", c, "\n")
    646 	}
    647 	goto retc
    648 
    649 retc:
    650 	if cas.releasetime > 0 {
    651 		blockevent(cas.releasetime-t0, 2)
    652 	}
    653 	return cas.pc, cas.so
    654 
    655 sclose:
    656 	// send on closed channel
    657 	selunlock(scases, lockorder)
    658 	panic(plainError("send on closed channel"))
    659 }
    660 
    661 func (c *hchan) sortkey() uintptr {
    662 	// TODO(khr): if we have a moving garbage collector, we'll need to
    663 	// change this function.
    664 	return uintptr(unsafe.Pointer(c))
    665 }
    666 
    667 // A runtimeSelect is a single case passed to rselect.
    668 // This must match ../reflect/value.go:/runtimeSelect
    669 type runtimeSelect struct {
    670 	dir selectDir
    671 	typ unsafe.Pointer // channel type (not used here)
    672 	ch  *hchan         // channel
    673 	val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
    674 }
    675 
    676 // These values must match ../reflect/value.go:/SelectDir.
    677 type selectDir int
    678 
    679 const (
    680 	_             selectDir = iota
    681 	selectSend              // case Chan <- Send
    682 	selectRecv              // case <-Chan:
    683 	selectDefault           // default
    684 )
    685 
    686 //go:linkname reflect_rselect reflect.rselect
    687 func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) {
    688 	// flagNoScan is safe here, because all objects are also referenced from cases.
    689 	size := selectsize(uintptr(len(cases)))
    690 	sel := (*hselect)(mallocgc(size, nil, true))
    691 	newselect(sel, int64(size), int32(len(cases)))
    692 	r := new(bool)
    693 	for i := range cases {
    694 		rc := &cases[i]
    695 		switch rc.dir {
    696 		case selectDefault:
    697 			selectdefaultImpl(sel, uintptr(i), 0)
    698 		case selectSend:
    699 			if rc.ch == nil {
    700 				break
    701 			}
    702 			selectsendImpl(sel, rc.ch, uintptr(i), rc.val, 0)
    703 		case selectRecv:
    704 			if rc.ch == nil {
    705 				break
    706 			}
    707 			selectrecvImpl(sel, rc.ch, uintptr(i), rc.val, r, 0)
    708 		}
    709 	}
    710 
    711 	pc, _ := selectgoImpl(sel)
    712 	chosen = int(pc)
    713 	recvOK = *r
    714 	return
    715 }
    716 
    717 func (q *waitq) dequeueSudoG(sgp *sudog) {
    718 	x := sgp.prev
    719 	y := sgp.next
    720 	if x != nil {
    721 		if y != nil {
    722 			// middle of queue
    723 			x.next = y
    724 			y.prev = x
    725 			sgp.next = nil
    726 			sgp.prev = nil
    727 			return
    728 		}
    729 		// end of queue
    730 		x.next = nil
    731 		q.last = x
    732 		sgp.prev = nil
    733 		return
    734 	}
    735 	if y != nil {
    736 		// start of queue
    737 		y.prev = nil
    738 		q.first = y
    739 		sgp.next = nil
    740 		return
    741 	}
    742 
    743 	// x==y==nil. Either sgp is the only element in the queue,
    744 	// or it has already been removed. Use q.first to disambiguate.
    745 	if q.first == sgp {
    746 		q.first = nil
    747 		q.last = nil
    748 	}
    749 }
    750