runtime/internal/atomic.Uint32.Load (method)
95 uses
runtime/internal/atomic (current package)
types.go#L193: func (u *Uint32) Load() uint32 {
runtime
cgocall.go#L275: if gp.m.needextram || extraMWaiters.Load() > 0 {
debuglog.go#L67: if l1.owned.Load() == 0 && l1.owned.CompareAndSwap(0, 1) {
mcache.go#L321: flushGen := c.flushGen.Load()
metrics.go#L315: out.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load())
mfinal.go#L162: if fingStatus.Load() == fingUninitialized && fingStatus.CompareAndSwap(fingUninitialized, fingCreated) {
mgc.go#L460: n := work.cycles.Load()
mgc.go#L475: for work.cycles.Load() == n+1 && sweepone() != ^uintptr(0) {
mgc.go#L491: for work.cycles.Load() == n+1 && !isSweepDone() {
mgc.go#L499: cycle := work.cycles.Load()
mgc.go#L512: nMarks := work.cycles.Load()
mgc.go#L570: if !memstats.enablegc || panicking.Load() != 0 || gcphase != _GCoff {
mgc.go#L589: return int32(t.n-work.cycles.Load()) > 0
mgc.go#L661: if fg := p.mcache.flushGen.Load(); fg != mheap_.sweepgen {
mgcpacer.go#L1271: gcWaitOnMark(work.cycles.Load())
mgcsweep.go#L153: state := a.state.Load()
mgcsweep.go#L170: state := a.state.Load()
mgcsweep.go#L195: state := a.state.Load()
mgcsweep.go#L207: return a.state.Load() &^ sweepDrainedMask
mgcsweep.go#L214: return a.state.Load() == sweepDrainedMask
mprof.go#L177: v := c.value.Load()
mprof.go#L186: prev := c.value.Load()
mprof.go#L203: prev := c.value.Load()
mprof.go#L888: return goroutineProfileState((*atomic.Uint32)(p).Load())
mprof.go#L913: if fingStatus.Load()&fingRunningFinalizer != 0 {
mspanset.go#L247: if block.popped.Load() == 0 {
mspanset.go#L253: if block.popped.Load() == spanSetBlockEntries {
mstats.go#L779: gen := m.gen.Load() % 3
mstats.go#L850: currGen := m.gen.Load()
mstats.go#L873: for p.statsSeq.Load()%2 != 0 {
netpoll.go#L139: return pollInfo(pd.atomicInfo.Load())
netpoll.go#L163: x := pd.atomicInfo.Load()
netpoll.go#L165: x = pd.atomicInfo.Load()
netpoll.go#L174: x := pd.atomicInfo.Load()
netpoll.go#L180: x = pd.atomicInfo.Load()
netpoll.go#L212: if netpollInited.Load() == 0 {
netpoll.go#L215: if netpollInited.Load() == 0 {
netpoll.go#L224: return netpollInited.Load() != 0
preempt.go#L194: if gp.preemptStop && gp.preempt && gp.stackguard0 == stackPreempt && asyncM == gp.m && asyncM.preemptGen.Load() == asyncGen {
preempt.go#L210: asyncGen2 := asyncM2.preemptGen.Load()
print.go#L42: if panicking.Load() == 0 {
proc.go#L277: if runningPanicDefers.Load() != 0 {
proc.go#L280: if runningPanicDefers.Load() == 0 {
proc.go#L286: if panicking.Load() != 0 {
proc.go#L799: count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
proc.go#L977: return gp.atomicstatus.Load()
proc.go#L1060: if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
proc.go#L1067: for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
proc.go#L1724: if mp.signalPending.Load() != 0 {
proc.go#L1908: wait := freem.freeWait.Load()
proc.go#L2099: } else if extraMLength.Load() == 0 {
proc.go#L2946: if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
proc.go#L2977: if netpollinited() && netpollWaiters.Load() > 0 && sched.lastpoll.Load() != 0 {
proc.go#L3072: if !mp.spinning && sched.needspinning.Load() == 1 {
proc.go#L3169: if netpollinited() && (netpollWaiters.Load() > 0 || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
proc.go#L3245: if netpollinited() && netpollWaiters.Load() > 0 && sched.lastpoll.Load() != 0 {
proc.go#L3681: if pp != getg().m.p.ptr() || int(pp.deletedTimers.Load()) <= int(pp.numTimers.Load()/4) {
proc.go#L3706: if pp == getg().m.p.ptr() && int(pp.deletedTimers.Load()) > len(pp.timers)/4 {
proc.go#L4948: if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
proc.go#L5413: if panicking.Load() > 0 {
proc.go#L5422: if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
proc.go#L5633: if scavenger.sysmonWake.Load() != 0 {
proc.go#L5805: print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
proc.go#L6071: if pp.numTimers.Load() > 0 {
proc.go#L6079: if pp.numTimers.Load() == 0 {
profbuf.go#L397: if b.eof.Load() > 0 {
profbuf.go#L478: if b.eof.Load() > 0 {
sema.go#L182: if root.nwait.Load() == 0 {
sema.go#L188: if root.nwait.Load() == 0 {
sema.go#L540: if l.wait.Load() == atomic.Load(&l.notify) {
sema.go#L555: atomic.Store(&l.notify, l.wait.Load())
sema.go#L573: if l.wait.Load() == atomic.Load(&l.notify) {
sema.go#L581: if t == l.wait.Load() {
signal_unix.go#L756: if crashing < mcount()-int32(extraMLength.Load()) {
sigqueue.go#L100: switch sig.state.Load() {
sigqueue.go#L143: switch sig.state.Load() {
sigqueue.go#L185: for sig.delivering.Load() != 0 {
sigqueue.go#L192: for sig.state.Load() != sigReceiving {
symtab.go#L873: if strict && panicking.Load() == 0 {
symtab.go#L916: if panicking.Load() != 0 || !strict {
time.go#L267: if t.status.Load() != timerNoStatus {
time.go#L293: if netpollInited.Load() == 0 {
time.go#L316: switch s := t.status.Load(); s {
time.go#L453: switch status = t.status.Load(); status {
time.go#L581: switch s := t.status.Load(); s {
time.go#L618: switch s := t.status.Load(); s {
time.go#L692: switch s := t.status.Load(); s {
time.go#L780: switch s := t.status.Load(); s {
time.go#L916: switch s := t.status.Load(); s {
time.go#L997: if numTimers := int(pp.numTimers.Load()); len(pp.timers) != numTimers {
traceback.go#L1331: return fingStatus.Load()&fingRunningFinalizer == 0
traceback.go#L1606: if panicking.Load() > 0 || getg().m.curg != getg() {
traceback.go#L1626: if panicking.Load() > 0 || getg().m.curg != getg() {
|
The pages are generated with Golds v0.6.7. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |