// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

// Metrics implementation exported to runtime/metrics.

import (
	
	
)

var (
	// metrics is a map of runtime/metrics keys to data used by the runtime
	// to sample each metric's value. metricsInit indicates it has been
	// initialized.
	//
	// These fields are protected by metricsSema which should be
	// locked/unlocked with metricsLock() / metricsUnlock().
	metricsSema uint32 = 1
	metricsInit bool
	metrics     map[string]metricData

	sizeClassBuckets []float64
	timeHistBuckets  []float64
)

type metricData struct {
	// deps is the set of runtime statistics that this metric
	// depends on. Before compute is called, the statAggregate
	// which will be passed must ensure() these dependencies.
	deps statDepSet

	// compute is a function that populates a metricValue
	// given a populated statAggregate structure.
	compute func(in *statAggregate, out *metricValue)
}

func () {
	// Acquire the metricsSema but with handoff. Operations are typically
	// expensive enough that queueing up goroutines and handing off between
	// them will be noticeably better-behaved.
	semacquire1(&metricsSema, true, 0, 0, waitReasonSemacquire)
	if raceenabled {
		raceacquire(unsafe.Pointer(&metricsSema))
	}
}

func () {
	if raceenabled {
		racerelease(unsafe.Pointer(&metricsSema))
	}
	semrelease(&metricsSema)
}

// initMetrics initializes the metrics map if it hasn't been yet.
//
// metricsSema must be held.
func () {
	if metricsInit {
		return
	}

	sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1)
	// Skip size class 0 which is a stand-in for large objects, but large
	// objects are tracked separately (and they actually get placed in
	// the last bucket, not the first).
	sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
	for  := 1;  < _NumSizeClasses; ++ {
		// Size classes have an inclusive upper-bound
		// and exclusive lower bound (e.g. 48-byte size class is
		// (32, 48]) whereas we want and inclusive lower-bound
		// and exclusive upper-bound (e.g. 48-byte size class is
		// [33, 49)). We can achieve this by shifting all bucket
		// boundaries up by 1.
		//
		// Also, a float64 can precisely represent integers with
		// value up to 2^53 and size classes are relatively small
		// (nowhere near 2^48 even) so this will give us exact
		// boundaries.
		sizeClassBuckets[] = float64(class_to_size[] + 1)
	}
	sizeClassBuckets = append(sizeClassBuckets, float64Inf())

	timeHistBuckets = timeHistogramMetricsBuckets()
	metrics = map[string]metricData{
		"/cgo/go-to-c-calls:calls": {
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(NumCgoCall())
			},
		},
		"/cpu/classes/gc/mark/assist:cpu-seconds": {
			deps: makeStatDepSet(cpuStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindFloat64
				.scalar = float64bits(nsToSec(.cpuStats.gcAssistTime))
			},
		},
		"/cpu/classes/gc/mark/dedicated:cpu-seconds": {
			deps: makeStatDepSet(cpuStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindFloat64
				.scalar = float64bits(nsToSec(.cpuStats.gcDedicatedTime))
			},
		},
		"/cpu/classes/gc/mark/idle:cpu-seconds": {
			deps: makeStatDepSet(cpuStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindFloat64
				.scalar = float64bits(nsToSec(.cpuStats.gcIdleTime))
			},
		},
		"/cpu/classes/gc/pause:cpu-seconds": {
			deps: makeStatDepSet(cpuStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindFloat64
				.scalar = float64bits(nsToSec(.cpuStats.gcPauseTime))
			},
		},
		"/cpu/classes/gc/total:cpu-seconds": {
			deps: makeStatDepSet(cpuStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindFloat64
				.scalar = float64bits(nsToSec(.cpuStats.gcTotalTime))
			},
		},
		"/cpu/classes/idle:cpu-seconds": {
			deps: makeStatDepSet(cpuStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindFloat64
				.scalar = float64bits(nsToSec(.cpuStats.idleTime))
			},
		},
		"/cpu/classes/scavenge/assist:cpu-seconds": {
			deps: makeStatDepSet(cpuStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindFloat64
				.scalar = float64bits(nsToSec(.cpuStats.scavengeAssistTime))
			},
		},
		"/cpu/classes/scavenge/background:cpu-seconds": {
			deps: makeStatDepSet(cpuStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindFloat64
				.scalar = float64bits(nsToSec(.cpuStats.scavengeBgTime))
			},
		},
		"/cpu/classes/scavenge/total:cpu-seconds": {
			deps: makeStatDepSet(cpuStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindFloat64
				.scalar = float64bits(nsToSec(.cpuStats.scavengeTotalTime))
			},
		},
		"/cpu/classes/total:cpu-seconds": {
			deps: makeStatDepSet(cpuStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindFloat64
				.scalar = float64bits(nsToSec(.cpuStats.totalTime))
			},
		},
		"/cpu/classes/user:cpu-seconds": {
			deps: makeStatDepSet(cpuStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindFloat64
				.scalar = float64bits(nsToSec(.cpuStats.userTime))
			},
		},
		"/gc/cycles/automatic:gc-cycles": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.gcCyclesDone - .sysStats.gcCyclesForced
			},
		},
		"/gc/cycles/forced:gc-cycles": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.gcCyclesForced
			},
		},
		"/gc/cycles/total:gc-cycles": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.gcCyclesDone
			},
		},
		"/gc/scan/globals:bytes": {
			deps: makeStatDepSet(gcStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .gcStats.globalsScan
			},
		},
		"/gc/scan/heap:bytes": {
			deps: makeStatDepSet(gcStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .gcStats.heapScan
			},
		},
		"/gc/scan/stack:bytes": {
			deps: makeStatDepSet(gcStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .gcStats.stackScan
			},
		},
		"/gc/scan/total:bytes": {
			deps: makeStatDepSet(gcStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .gcStats.totalScan
			},
		},
		"/gc/heap/allocs-by-size:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				 := .float64HistOrInit(sizeClassBuckets)
				.counts[len(.counts)-1] = uint64(.heapStats.largeAllocCount)
				// Cut off the first index which is ostensibly for size class 0,
				// but large objects are tracked separately so it's actually unused.
				for ,  := range .heapStats.smallAllocCount[1:] {
					.counts[] = uint64()
				}
			},
		},
		"/gc/heap/allocs:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .heapStats.totalAllocated
			},
		},
		"/gc/heap/allocs:objects": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .heapStats.totalAllocs
			},
		},
		"/gc/heap/frees-by-size:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				 := .float64HistOrInit(sizeClassBuckets)
				.counts[len(.counts)-1] = uint64(.heapStats.largeFreeCount)
				// Cut off the first index which is ostensibly for size class 0,
				// but large objects are tracked separately so it's actually unused.
				for ,  := range .heapStats.smallFreeCount[1:] {
					.counts[] = uint64()
				}
			},
		},
		"/gc/heap/frees:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .heapStats.totalFreed
			},
		},
		"/gc/heap/frees:objects": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .heapStats.totalFrees
			},
		},
		"/gc/heap/goal:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.heapGoal
			},
		},
		"/gc/gomemlimit:bytes": {
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(gcController.memoryLimit.Load())
			},
		},
		"/gc/gogc:percent": {
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(gcController.gcPercent.Load())
			},
		},
		"/gc/heap/live:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = gcController.heapMarked
			},
		},
		"/gc/heap/objects:objects": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .heapStats.numObjects
			},
		},
		"/gc/heap/tiny/allocs:objects": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(.heapStats.tinyAllocCount)
			},
		},
		"/gc/limiter/last-enabled:gc-cycle": {
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load())
			},
		},
		"/gc/pauses:seconds": {
			compute: func( *statAggregate,  *metricValue) {
				 := .float64HistOrInit(timeHistBuckets)
				// The bottom-most bucket, containing negative values, is tracked
				// as a separately as underflow, so fill that in manually and then
				// iterate over the rest.
				.counts[0] = memstats.gcPauseDist.underflow.Load()
				for  := range memstats.gcPauseDist.counts {
					.counts[+1] = memstats.gcPauseDist.counts[].Load()
				}
				.counts[len(.counts)-1] = memstats.gcPauseDist.overflow.Load()
			},
		},
		"/gc/stack/starting-size:bytes": {
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(startingStackSize)
			},
		},
		"/memory/classes/heap/free:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(.heapStats.committed - .heapStats.inHeap -
					.heapStats.inStacks - .heapStats.inWorkBufs -
					.heapStats.inPtrScalarBits)
			},
		},
		"/memory/classes/heap/objects:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .heapStats.inObjects
			},
		},
		"/memory/classes/heap/released:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(.heapStats.released)
			},
		},
		"/memory/classes/heap/stacks:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(.heapStats.inStacks)
			},
		},
		"/memory/classes/heap/unused:bytes": {
			deps: makeStatDepSet(heapStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(.heapStats.inHeap) - .heapStats.inObjects
			},
		},
		"/memory/classes/metadata/mcache/free:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.mCacheSys - .sysStats.mCacheInUse
			},
		},
		"/memory/classes/metadata/mcache/inuse:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.mCacheInUse
			},
		},
		"/memory/classes/metadata/mspan/free:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.mSpanSys - .sysStats.mSpanInUse
			},
		},
		"/memory/classes/metadata/mspan/inuse:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.mSpanInUse
			},
		},
		"/memory/classes/metadata/other:bytes": {
			deps: makeStatDepSet(heapStatsDep, sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(.heapStats.inWorkBufs+.heapStats.inPtrScalarBits) + .sysStats.gcMiscSys
			},
		},
		"/memory/classes/os-stacks:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.stacksSys
			},
		},
		"/memory/classes/other:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.otherSys
			},
		},
		"/memory/classes/profiling/buckets:bytes": {
			deps: makeStatDepSet(sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = .sysStats.buckHashSys
			},
		},
		"/memory/classes/total:bytes": {
			deps: makeStatDepSet(heapStatsDep, sysStatsDep),
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(.heapStats.committed+.heapStats.released) +
					.sysStats.stacksSys + .sysStats.mSpanSys +
					.sysStats.mCacheSys + .sysStats.buckHashSys +
					.sysStats.gcMiscSys + .sysStats.otherSys
			},
		},
		"/sched/gomaxprocs:threads": {
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(gomaxprocs)
			},
		},
		"/sched/goroutines:goroutines": {
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindUint64
				.scalar = uint64(gcount())
			},
		},
		"/sched/latencies:seconds": {
			compute: func( *statAggregate,  *metricValue) {
				 := .float64HistOrInit(timeHistBuckets)
				.counts[0] = sched.timeToRun.underflow.Load()
				for  := range sched.timeToRun.counts {
					.counts[+1] = sched.timeToRun.counts[].Load()
				}
				.counts[len(.counts)-1] = sched.timeToRun.overflow.Load()
			},
		},
		"/sync/mutex/wait/total:seconds": {
			compute: func( *statAggregate,  *metricValue) {
				.kind = metricKindFloat64
				.scalar = float64bits(nsToSec(sched.totalMutexWaitTime.Load()))
			},
		},
	}

	for ,  := range godebugs.All {
		if !.Opaque {
			metrics["/godebug/non-default-behavior/"+.Name+":events"] = metricData{compute: compute0}
		}
	}

	metricsInit = true
}

func ( *statAggregate,  *metricValue) {
	.kind = metricKindUint64
	.scalar = 0
}

type metricReader func() uint64

func ( metricReader) ( *statAggregate,  *metricValue) {
	.kind = metricKindUint64
	.scalar = ()
}

//go:linkname godebug_registerMetric internal/godebug.registerMetric
func ( string,  func() uint64) {
	metricsLock()
	initMetrics()
	,  := metrics[]
	if ! {
		throw("runtime: unexpected metric registration for " + )
	}
	.compute = metricReader().compute
	metrics[] = 
	metricsUnlock()
}

// statDep is a dependency on a group of statistics
// that a metric might have.
type statDep uint

const (
	heapStatsDep statDep = iota // corresponds to heapStatsAggregate
	sysStatsDep                 // corresponds to sysStatsAggregate
	cpuStatsDep                 // corresponds to cpuStatsAggregate
	gcStatsDep                  // corresponds to gcStatsAggregate
	numStatsDeps
)

// statDepSet represents a set of statDeps.
//
// Under the hood, it's a bitmap.
type statDepSet [1]uint64

// makeStatDepSet creates a new statDepSet from a list of statDeps.
func ( ...statDep) statDepSet {
	var  statDepSet
	for ,  := range  {
		[/64] |= 1 << ( % 64)
	}
	return 
}

// difference returns set difference of s from b as a new set.
func ( statDepSet) ( statDepSet) statDepSet {
	var  statDepSet
	for  := range  {
		[] = [] &^ []
	}
	return 
}

// union returns the union of the two sets as a new set.
func ( statDepSet) ( statDepSet) statDepSet {
	var  statDepSet
	for  := range  {
		[] = [] | []
	}
	return 
}

// empty returns true if there are no dependencies in the set.
func ( *statDepSet) () bool {
	for ,  := range  {
		if  != 0 {
			return false
		}
	}
	return true
}

// has returns true if the set contains a given statDep.
func ( *statDepSet) ( statDep) bool {
	return [/64]&(1<<(%64)) != 0
}

// heapStatsAggregate represents memory stats obtained from the
// runtime. This set of stats is grouped together because they
// depend on each other in some way to make sense of the runtime's
// current heap memory use. They're also sharded across Ps, so it
// makes sense to grab them all at once.
type heapStatsAggregate struct {
	heapStatsDelta

	// Derived from values in heapStatsDelta.

	// inObjects is the bytes of memory occupied by objects,
	inObjects uint64

	// numObjects is the number of live objects in the heap.
	numObjects uint64

	// totalAllocated is the total bytes of heap objects allocated
	// over the lifetime of the program.
	totalAllocated uint64

	// totalFreed is the total bytes of heap objects freed
	// over the lifetime of the program.
	totalFreed uint64

	// totalAllocs is the number of heap objects allocated over
	// the lifetime of the program.
	totalAllocs uint64

	// totalFrees is the number of heap objects freed over
	// the lifetime of the program.
	totalFrees uint64
}

// compute populates the heapStatsAggregate with values from the runtime.
func ( *heapStatsAggregate) () {
	memstats.heapStats.read(&.heapStatsDelta)

	// Calculate derived stats.
	.totalAllocs = .largeAllocCount
	.totalFrees = .largeFreeCount
	.totalAllocated = .largeAlloc
	.totalFreed = .largeFree
	for  := range .smallAllocCount {
		 := .smallAllocCount[]
		 := .smallFreeCount[]
		.totalAllocs += 
		.totalFrees += 
		.totalAllocated +=  * uint64(class_to_size[])
		.totalFreed +=  * uint64(class_to_size[])
	}
	.inObjects = .totalAllocated - .totalFreed
	.numObjects = .totalAllocs - .totalFrees
}

// sysStatsAggregate represents system memory stats obtained
// from the runtime. This set of stats is grouped together because
// they're all relatively cheap to acquire and generally independent
// of one another and other runtime memory stats. The fact that they
// may be acquired at different times, especially with respect to
// heapStatsAggregate, means there could be some skew, but because of
// these stats are independent, there's no real consistency issue here.
type sysStatsAggregate struct {
	stacksSys      uint64
	mSpanSys       uint64
	mSpanInUse     uint64
	mCacheSys      uint64
	mCacheInUse    uint64
	buckHashSys    uint64
	gcMiscSys      uint64
	otherSys       uint64
	heapGoal       uint64
	gcCyclesDone   uint64
	gcCyclesForced uint64
}

// compute populates the sysStatsAggregate with values from the runtime.
func ( *sysStatsAggregate) () {
	.stacksSys = memstats.stacks_sys.load()
	.buckHashSys = memstats.buckhash_sys.load()
	.gcMiscSys = memstats.gcMiscSys.load()
	.otherSys = memstats.other_sys.load()
	.heapGoal = gcController.heapGoal()
	.gcCyclesDone = uint64(memstats.numgc)
	.gcCyclesForced = uint64(memstats.numforcedgc)

	systemstack(func() {
		lock(&mheap_.lock)
		.mSpanSys = memstats.mspan_sys.load()
		.mSpanInUse = uint64(mheap_.spanalloc.inuse)
		.mCacheSys = memstats.mcache_sys.load()
		.mCacheInUse = uint64(mheap_.cachealloc.inuse)
		unlock(&mheap_.lock)
	})
}

// cpuStatsAggregate represents CPU stats obtained from the runtime
// acquired together to avoid skew and inconsistencies.
type cpuStatsAggregate struct {
	cpuStats
}

// compute populates the cpuStatsAggregate with values from the runtime.
func ( *cpuStatsAggregate) () {
	.cpuStats = work.cpuStats
	// TODO(mknyszek): Update the CPU stats again so that we're not
	// just relying on the STW snapshot. The issue here is that currently
	// this will cause non-monotonicity in the "user" CPU time metric.
	//
	// a.cpuStats.accumulate(nanotime(), gcphase == _GCmark)
}

// gcStatsAggregate represents various GC stats obtained from the runtime
// acquired together to avoid skew and inconsistencies.
type gcStatsAggregate struct {
	heapScan    uint64
	stackScan   uint64
	globalsScan uint64
	totalScan   uint64
}

// compute populates the gcStatsAggregate with values from the runtime.
func ( *gcStatsAggregate) () {
	.heapScan = gcController.heapScan.Load()
	.stackScan = uint64(gcController.lastStackScan.Load())
	.globalsScan = gcController.globalsScan.Load()
	.totalScan = .heapScan + .stackScan + .globalsScan
}

// nsToSec takes a duration in nanoseconds and converts it to seconds as
// a float64.
func ( int64) float64 {
	return float64() / 1e9
}

// statAggregate is the main driver of the metrics implementation.
//
// It contains multiple aggregates of runtime statistics, as well
// as a set of these aggregates that it has populated. The aggregates
// are populated lazily by its ensure method.
type statAggregate struct {
	ensured   statDepSet
	heapStats heapStatsAggregate
	sysStats  sysStatsAggregate
	cpuStats  cpuStatsAggregate
	gcStats   gcStatsAggregate
}

// ensure populates statistics aggregates determined by deps if they
// haven't yet been populated.
func ( *statAggregate) ( *statDepSet) {
	 := .difference(.ensured)
	if .empty() {
		return
	}
	for  := statDep(0);  < numStatsDeps; ++ {
		if !.has() {
			continue
		}
		switch  {
		case heapStatsDep:
			.heapStats.compute()
		case sysStatsDep:
			.sysStats.compute()
		case cpuStatsDep:
			.cpuStats.compute()
		case gcStatsDep:
			.gcStats.compute()
		}
	}
	.ensured = .ensured.union()
}

// metricKind is a runtime copy of runtime/metrics.ValueKind and
// must be kept structurally identical to that type.
type metricKind int

const (
	// These values must be kept identical to their corresponding Kind* values
	// in the runtime/metrics package.
	metricKindBad metricKind = iota
	metricKindUint64
	metricKindFloat64
	metricKindFloat64Histogram
)

// metricSample is a runtime copy of runtime/metrics.Sample and
// must be kept structurally identical to that type.
type metricSample struct {
	name  string
	value metricValue
}

// metricValue is a runtime copy of runtime/metrics.Sample and
// must be kept structurally identical to that type.
type metricValue struct {
	kind    metricKind
	scalar  uint64         // contains scalar values for scalar Kinds.
	pointer unsafe.Pointer // contains non-scalar values.
}

// float64HistOrInit tries to pull out an existing float64Histogram
// from the value, but if none exists, then it allocates one with
// the given buckets.
func ( *metricValue) ( []float64) *metricFloat64Histogram {
	var  *metricFloat64Histogram
	if .kind == metricKindFloat64Histogram && .pointer != nil {
		 = (*metricFloat64Histogram)(.pointer)
	} else {
		.kind = metricKindFloat64Histogram
		 = new(metricFloat64Histogram)
		.pointer = unsafe.Pointer()
	}
	.buckets = 
	if len(.counts) != len(.buckets)-1 {
		.counts = make([]uint64, len()-1)
	}
	return 
}

// metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram
// and must be kept structurally identical to that type.
type metricFloat64Histogram struct {
	counts  []uint64
	buckets []float64
}

// agg is used by readMetrics, and is protected by metricsSema.
//
// Managed as a global variable because its pointer will be
// an argument to a dynamically-defined function, and we'd
// like to avoid it escaping to the heap.
var agg statAggregate

type metricName struct {
	name string
	kind metricKind
}

// readMetricNames is the implementation of runtime/metrics.readMetricNames,
// used by the runtime/metrics test and otherwise unreferenced.
//
//go:linkname readMetricNames runtime/metrics_test.runtime_readMetricNames
func () []string {
	metricsLock()
	initMetrics()
	 := len(metrics)
	metricsUnlock()

	 := make([]string, 0, )

	metricsLock()
	for  := range metrics {
		 = append(, )
	}
	metricsUnlock()

	return 
}

// readMetrics is the implementation of runtime/metrics.Read.
//
//go:linkname readMetrics runtime/metrics.runtime_readMetrics
func ( unsafe.Pointer,  int,  int) {
	// Construct a slice from the args.
	 := slice{, , }
	 := *(*[]metricSample)(unsafe.Pointer(&))

	metricsLock()

	// Ensure the map is initialized.
	initMetrics()

	// Clear agg defensively.
	agg = statAggregate{}

	// Sample.
	for  := range  {
		 := &[]
		,  := metrics[.name]
		if ! {
			.value.kind = metricKindBad
			continue
		}
		// Ensure we have all the stats we need.
		// agg is populated lazily.
		agg.ensure(&.deps)

		// Compute the value based on the stats we have.
		.compute(&agg, &.value)
	}

	metricsUnlock()
}