// Copyright 2009 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.package runtime// This file contains the implementation of Go select statements.import ()constdebugSelect = false// Select case descriptor.// Known to compiler.// Changes here must also be made in src/cmd/compile/internal/walk/select.go's scasetype.typescasestruct {c *hchan// chanelemunsafe.Pointer// data element}var (chansendpc = abi.FuncPCABIInternal(chansend)chanrecvpc = abi.FuncPCABIInternal(chanrecv))func ( *uintptr) { * = getcallerpc()}func ( []scase, []uint16) {var *hchanfor , := range { := [].cif != { = lock(&.lock) } }}func ( []scase, []uint16) {// We must be very careful here to not touch sel after we have unlocked // the last lock, because sel can be freed right after the last unlock. // Consider the following situation. // First M calls runtime·park() in runtime·selectgo() passing the sel. // Once runtime·park() has unlocked the last lock, another M makes // the G that calls select runnable again and schedules it for execution. // When the G runs on another M, it locks all the locks and frees sel. // Now if the first M touches sel, it will access freed memory.for := len() - 1; >= 0; -- { := [[]].cif > 0 && == [[-1]].c {continue// will unlock it on the next iteration }unlock(&.lock) }}func ( *g, unsafe.Pointer) bool {// There are unlocked sudogs that point into gp's stack. Stack // copying must lock the channels of those sudogs. // Set activeStackChans here instead of before we try parking // because we could self-deadlock in stack growth on a // channel lock. .activeStackChans = true// Mark that it's safe for stack shrinking to occur now, // because any thread acquiring this G's stack for shrinking // is guaranteed to observe activeStackChans after this store. .parkingOnChan.Store(false)// Make sure we unlock after setting activeStackChans and // unsetting parkingOnChan. The moment we unlock any of the // channel locks we risk gp getting readied by a channel operation // and so gp could continue running before everything before the // unlock is visible (even to gp itself).// This must not access gp's stack (see gopark). In // particular, it must not access the *hselect. That's okay, // because by the time this is called, gp.waiting has all // channels in lock order.var *hchanfor := .waiting; != nil; = .waitlink {if .c != && != nil {// As soon as we unlock the channel, fields in // any sudog with that channel may change, // including c and waitlink. Since multiple // sudogs may have the same channel, we unlock // only after we've passed the last instance // of a channel.unlock(&.lock) } = .c }if != nil {unlock(&.lock) }returntrue}func () {gopark(nil, nil, waitReasonSelectNoCases, traceBlockForever, 1) // forever}// selectgo implements the select statement.//// cas0 points to an array of type [ncases]scase, and order0 points to// an array of type [2*ncases]uint16 where ncases must be <= 65536.// Both reside on the goroutine's stack (regardless of any escaping in// selectgo).//// For race detector builds, pc0 points to an array of type// [ncases]uintptr (also on the stack); for other builds, it's set to// nil.//// selectgo returns the index of the chosen scase, which matches the// ordinal position of its respective select{recv,send,default} call.// Also, if the chosen scase was a receive operation, it reports whether// a value was received.func ( *scase, *uint16, *uintptr, , int, bool) (int, bool) {ifdebugSelect {print("select: cas0=", , "\n") }// NOTE: In order to maintain a lean stack size, the number of scases // is capped at 65536. := (*[1 << 16]scase)(unsafe.Pointer()) := (*[1 << 17]uint16)(unsafe.Pointer()) := + := [::] := [::] := [:][::]// NOTE: pollorder/lockorder's underlying array was not zero-initialized by compiler.// Even when raceenabled is true, there might be select // statements in packages compiled without -race (e.g., // ensureSigM in runtime/signal_unix.go).var []uintptrifraceenabled && != nil { := (*[1 << 16]uintptr)(unsafe.Pointer()) = [::] } := func( int) uintptr {if == nil {return0 }return [] }varint64ifblockprofilerate > 0 { = cputicks() }// The compiler rewrites selects that statically have // only 0 or 1 cases plus default into simpler constructs. // The only way we can end up with such small sel.ncase // values here is for a larger select in which most channels // have been nilled out. The general code handles those // cases correctly, and they are rare enough not to bother // optimizing (and needing to test).// generate permuted order := 0for := range { := &[]// Omit cases without channels from the poll and lock orders.if .c == nil { .elem = nil// allow GCcontinue } := fastrandn(uint32( + 1)) [] = [] [] = uint16() ++ } = [:] = [:]// sort the cases by Hchan address to get the locking order. // simple heap sort, to guarantee n log n time and constant stack footprint.for := range { := // Start with the pollorder to permute cases on the same channel. := [[]].cfor > 0 && [[(-1)/2]].c.sortkey() < .sortkey() { := ( - 1) / 2 [] = [] = } [] = [] }for := len() - 1; >= 0; -- { := [] := [].c [] = [0] := 0for { := *2 + 1if >= {break }if +1 < && [[]].c.sortkey() < [[+1]].c.sortkey() { ++ }if .sortkey() < [[]].c.sortkey() { [] = [] = continue }break } [] = }ifdebugSelect {for := 0; +1 < len(); ++ {if [[]].c.sortkey() > [[+1]].c.sortkey() {print("i=", , " x=", [], " y=", [+1], "\n")throw("select: broken sort") } } }// lock all the channels involved in the selectsellock(, )var ( *g *sudog *hchan *scase *sudog *sudogunsafe.Pointer **sudog )// pass 1 - look for something already waitingvarintvar *scasevarboolvarint64 = -1varboolfor , := range { = int() = &[] = .cif >= { = .sendq.dequeue()if != nil {goto }if .qcount > 0 {goto }if .closed != 0 {goto } } else {ifraceenabled {racereadpc(.raceaddr(), (), chansendpc) }if .closed != 0 {goto } = .recvq.dequeue()if != nil {goto }if .qcount < .dataqsiz {goto } } }if ! {selunlock(, ) = -1goto }// pass 2 - enqueue on all chans = getg()if .waiting != nil {throw("gp.waiting != nil") } = &.waitingfor , := range { = int() = &[] = .c := acquireSudog() .g = .isSelect = true// No stack splits between assigning elem and enqueuing // sg on gp.waiting where copystack can find it. .elem = .elem .releasetime = 0if != 0 { .releasetime = -1 } .c = // Construct waiting list in lock order. * = = &.waitlinkif < { .sendq.enqueue() } else { .recvq.enqueue() } }// wait for someone to wake us up .param = nil// Signal to anyone trying to shrink our stack that we're about // to park on a channel. The window between when this G's status // changes and when we set gp.activeStackChans is not safe for // stack shrinking. .parkingOnChan.Store(true)gopark(selparkcommit, nil, waitReasonSelect, traceBlockSelect, 1) .activeStackChans = falsesellock(, ) .selectDone.Store(0) = (*sudog)(.param) .param = nil// pass 3 - dequeue from unsuccessful chans // otherwise they stack up on quiet channels // record the successful case, if any. // We singly-linked up the SudoGs in lock order. = -1 = nil = false = .waiting// Clear all elem before unlinking from gp.waiting.for := .waiting; != nil; = .waitlink { .isSelect = false .elem = nil .c = nil } .waiting = nilfor , := range { = &[]if == {// sg has already been dequeued by the G that woke us up. = int() = = .successif .releasetime > 0 { = .releasetime } } else { = .cifint() < { .sendq.dequeueSudoG() } else { .recvq.dequeueSudoG() } } = .waitlink .waitlink = nilreleaseSudog() = }if == nil {throw("selectgo: bad wakeup") } = .cifdebugSelect {print("wait-return: cas0=", , " c=", , " cas=", , " send=", < , "\n") }if < {if ! {goto } } else { = }ifraceenabled {if < {raceReadObjectPC(.elemtype, .elem, (), chansendpc) } elseif .elem != nil {raceWriteObjectPC(.elemtype, .elem, (), chanrecvpc) } }ifmsanenabled {if < {msanread(.elem, .elemtype.Size_) } elseif .elem != nil {msanwrite(.elem, .elemtype.Size_) } }ifasanenabled {if < {asanread(.elem, .elemtype.Size_) } elseif .elem != nil {asanwrite(.elem, .elemtype.Size_) } }selunlock(, )goto:// can receive from bufferifraceenabled {if .elem != nil {raceWriteObjectPC(.elemtype, .elem, (), chanrecvpc) }racenotify(, .recvx, nil) }ifmsanenabled && .elem != nil {msanwrite(.elem, .elemtype.Size_) }ifasanenabled && .elem != nil {asanwrite(.elem, .elemtype.Size_) } = true = chanbuf(, .recvx)if .elem != nil {typedmemmove(.elemtype, .elem, ) }typedmemclr(.elemtype, ) .recvx++if .recvx == .dataqsiz { .recvx = 0 } .qcount--selunlock(, )goto:// can send to bufferifraceenabled {racenotify(, .sendx, nil)raceReadObjectPC(.elemtype, .elem, (), chansendpc) }ifmsanenabled {msanread(.elem, .elemtype.Size_) }ifasanenabled {asanread(.elem, .elemtype.Size_) }typedmemmove(.elemtype, chanbuf(, .sendx), .elem) .sendx++if .sendx == .dataqsiz { .sendx = 0 } .qcount++selunlock(, )goto:// can receive from sleeping sender (sg)recv(, , .elem, func() { selunlock(, ) }, 2)ifdebugSelect {print("syncrecv: cas0=", , " c=", , "\n") } = truegoto:// read at end of closed channelselunlock(, ) = falseif .elem != nil {typedmemclr(.elemtype, .elem) }ifraceenabled {raceacquire(.raceaddr()) }goto:// can send to a sleeping receiver (sg)ifraceenabled {raceReadObjectPC(.elemtype, .elem, (), chansendpc) }ifmsanenabled {msanread(.elem, .elemtype.Size_) }ifasanenabled {asanread(.elem, .elemtype.Size_) }send(, , .elem, func() { selunlock(, ) }, 2)ifdebugSelect {print("syncsend: cas0=", , " c=", , "\n") }goto:if > 0 {blockevent(-, 1) }return , :// send on closed channelselunlock(, )panic(plainError("send on closed channel"))}func ( *hchan) () uintptr {returnuintptr(unsafe.Pointer())}// A runtimeSelect is a single case passed to rselect.// This must match ../reflect/value.go:/runtimeSelecttyperuntimeSelectstruct {dirselectDirtypunsafe.Pointer// channel type (not used here)ch *hchan// channelvalunsafe.Pointer// ptr to data (SendDir) or ptr to receive buffer (RecvDir)}// These values must match ../reflect/value.go:/SelectDir.typeselectDirintconst ( _ selectDir = iotaselectSend// case Chan <- SendselectRecv// case <-Chan:selectDefault// default)//go:linkname reflect_rselect reflect.rselectfunc ( []runtimeSelect) (int, bool) {iflen() == 0 {block() } := make([]scase, len()) := make([]int, len()) , := 0, 0 := -1for , := range {varintswitch .dir {caseselectDefault: = continuecaseselectSend: = ++caseselectRecv: ++ = len() - } [] = scase{c: .ch, elem: .val} [] = }// Only a default case.if + == 0 {return , false }// Compact sel and orig if necessary.if + < len() {copy([:], [len()-:])copy([:], [len()-:]) } := make([]uint16, 2*(+))var *uintptrifraceenabled { := make([]uintptr, +)for := range {selectsetpc(&[]) } = &[0] } , := selectgo(&[0], &[0], , , , == -1)// Translate chosen back to caller's ordering.if < 0 { = } else { = [] }return , }func ( *waitq) ( *sudog) { := .prev := .nextif != nil {if != nil {// middle of queue .next = .prev = .next = nil .prev = nilreturn }// end of queue .next = nil .last = .prev = nilreturn }if != nil {// start of queue .prev = nil .first = .next = nilreturn }// x==y==nil. Either sgp is the only element in the queue, // or it has already been removed. Use q.first to disambiguate.if .first == { .first = nil .last = nil }}
The pages are generated with Goldsv0.6.7. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds.