// Copyright 2018 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.//go:build amd64 || arm64package runtimeimport ()const (debugCallSystemStack = "executing on Go runtime stack"debugCallUnknownFunc = "call from unknown function"debugCallRuntime = "call from within the Go runtime"debugCallUnsafePoint = "call not at safe point")func ()func ( any)// debugCallCheck checks whether it is safe to inject a debugger// function call with return PC pc. If not, it returns a string// explaining why.////go:nosplitfunc ( uintptr) string {// No user calls from the system stack.ifgetg() != getg().m.curg {returndebugCallSystemStack }if := getcallersp(); !(getg().stack.lo < && <= getg().stack.hi) {// Fast syscalls (nanotime) and racecall switch to the // g0 stack without switching g. We can't safely make // a call in this state. (We can't even safely // systemstack.)returndebugCallSystemStack }// Switch to the system stack to avoid overflowing the user // stack.varstringsystemstack(func() { := findfunc()if !.valid() { = debugCallUnknownFuncreturn } := funcname()switch {case"debugCall32","debugCall64","debugCall128","debugCall256","debugCall512","debugCall1024","debugCall2048","debugCall4096","debugCall8192","debugCall16384","debugCall32768","debugCall65536":// These functions are allowed so that the debugger can initiate multiple function calls. // See: https://golang.org/cl/161137/return }// Disallow calls from the runtime. We could // potentially make this condition tighter (e.g., not // when locks are held), but there are enough tightly // coded sequences (e.g., defer handling) that it's // better to play it safe.if := "runtime."; len() > len() && [:len()] == { = debugCallRuntimereturn }// Check that this isn't an unsafe-point.if != .entry() { -- } := pcdatavalue(, abi.PCDATA_UnsafePoint, , nil)if != abi.UnsafePointSafe {// Not at a safe point. = debugCallUnsafePoint } })return}// debugCallWrap starts a new goroutine to run a debug call and blocks// the calling goroutine. On the goroutine, it prepares to recover// panics from the debug call, and then calls the call dispatching// function at PC dispatch.//// This must be deeply nosplit because there are untyped values on the// stack from debugCallV2.////go:nosplitfunc ( uintptr) {varuint32 := getcallerpc() := getg()// Lock ourselves to the OS thread. // // Debuggers rely on us running on the same thread until we get to // dispatch the function they asked as to. // // We're going to transfer this to the new G we just created.lockOSThread()// Create a new goroutine to execute the call on. Run this on // the system stack to avoid growing our stack.systemstack(func() {// TODO(mknyszek): It would be nice to wrap these arguments in an allocated // closure and start the goroutine with that closure, but the compiler disallows // implicit closure allocation in the runtime. := debugCallWrap1 := newproc1(*(**funcval)(unsafe.Pointer(&)), , ) := &debugCallWrapArgs{dispatch: ,callingG: , } .param = unsafe.Pointer()// Transfer locked-ness to the new goroutine. // Save lock state to restore later. := .mif != .lockedm.ptr() {throw("inconsistent lockedm") }// Save the external lock count and clear it so // that it can't be unlocked from the debug call. // Note: we already locked internally to the thread, // so if we were locked before we're still locked now. = .lockedExt .lockedExt = 0 .lockedg.set() .lockedm.set() .lockedm = 0// Mark the calling goroutine as being at an async // safe-point, since it has a few conservative frames // at the bottom of the stack. This also prevents // stack shrinks. .asyncSafePoint = true// Stash newg away so we can execute it below (mcall's // closure can't capture anything). .schedlink.set() })// Switch to the new goroutine.mcall(func( *g) {// Get newg. := .schedlink.ptr() .schedlink = 0// Park the calling goroutine.iftraceEnabled() {traceGoPark(traceBlockDebugCall, 1) }casGToWaiting(, _Grunning, waitReasonDebugCall)dropg()// Directly execute the new goroutine. The debug // protocol will continue on the new goroutine, so // it's important we not just let the scheduler do // this or it may resume a different goroutine.execute(, true) })// We'll resume here when the call returns.// Restore locked state. := .m .lockedExt = .lockedg.set() .lockedm.set()// Undo the lockOSThread we did earlier.unlockOSThread() .asyncSafePoint = false}typedebugCallWrapArgsstruct {dispatchuintptrcallingG *g}// debugCallWrap1 is the continuation of debugCallWrap on the callee// goroutine.func () { := getg() := (*debugCallWrapArgs)(.param) , := .dispatch, .callingG .param = nil// Dispatch call and trap panics.debugCallWrap2()// Resume the caller goroutine.getg().schedlink.set()mcall(func( *g) { := .schedlink.ptr() .schedlink = 0// Unlock this goroutine from the M if necessary. The // calling G will relock.if .lockedm != 0 { .lockedm = 0 .m.lockedg = 0 }// Switch back to the calling goroutine. At some point // the scheduler will schedule us again and we'll // finish exiting.iftraceEnabled() {traceGoSched() }casgstatus(, _Grunning, _Grunnable)dropg()lock(&sched.lock)globrunqput()unlock(&sched.lock)iftraceEnabled() {traceGoUnpark(, 0) }casgstatus(, _Gwaiting, _Grunnable)execute(, true) })}func ( uintptr) {// Call the dispatch function and trap panics.varfunc() := funcval{} *(*unsafe.Pointer)(unsafe.Pointer(&)) = noescape(unsafe.Pointer(&))varbooldeferfunc() {if ! { := recover()debugCallPanicked() } }() () = true}
The pages are generated with Goldsv0.6.7. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds.