...

Source file src/runtime/metrics.go

Documentation: runtime

		 1  // Copyright 2020 The Go Authors. All rights reserved.
		 2  // Use of this source code is governed by a BSD-style
		 3  // license that can be found in the LICENSE file.
		 4  
		 5  package runtime
		 6  
		 7  // Metrics implementation exported to runtime/metrics.
		 8  
		 9  import (
		10  	"runtime/internal/atomic"
		11  	"unsafe"
		12  )
		13  
		14  var (
		15  	// metrics is a map of runtime/metrics keys to data used by the runtime
		16  	// to sample each metric's value. metricsInit indicates it has been
		17  	// initialized.
		18  	//
		19  	// These fields are protected by metricsSema which should be
		20  	// locked/unlocked with metricsLock() / metricsUnlock().
		21  	metricsSema uint32 = 1
		22  	metricsInit bool
		23  	metrics		 map[string]metricData
		24  
		25  	sizeClassBuckets []float64
		26  	timeHistBuckets	[]float64
		27  )
		28  
		29  type metricData struct {
		30  	// deps is the set of runtime statistics that this metric
		31  	// depends on. Before compute is called, the statAggregate
		32  	// which will be passed must ensure() these dependencies.
		33  	deps statDepSet
		34  
		35  	// compute is a function that populates a metricValue
		36  	// given a populated statAggregate structure.
		37  	compute func(in *statAggregate, out *metricValue)
		38  }
		39  
		40  func metricsLock() {
		41  	// Acquire the metricsSema but with handoff. Operations are typically
		42  	// expensive enough that queueing up goroutines and handing off between
		43  	// them will be noticeably better-behaved.
		44  	semacquire1(&metricsSema, true, 0, 0)
		45  	if raceenabled {
		46  		raceacquire(unsafe.Pointer(&metricsSema))
		47  	}
		48  }
		49  
		50  func metricsUnlock() {
		51  	if raceenabled {
		52  		racerelease(unsafe.Pointer(&metricsSema))
		53  	}
		54  	semrelease(&metricsSema)
		55  }
		56  
		57  // initMetrics initializes the metrics map if it hasn't been yet.
		58  //
		59  // metricsSema must be held.
		60  func initMetrics() {
		61  	if metricsInit {
		62  		return
		63  	}
		64  
		65  	sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1)
		66  	// Skip size class 0 which is a stand-in for large objects, but large
		67  	// objects are tracked separately (and they actually get placed in
		68  	// the last bucket, not the first).
		69  	sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
		70  	for i := 1; i < _NumSizeClasses; i++ {
		71  		// Size classes have an inclusive upper-bound
		72  		// and exclusive lower bound (e.g. 48-byte size class is
		73  		// (32, 48]) whereas we want and inclusive lower-bound
		74  		// and exclusive upper-bound (e.g. 48-byte size class is
		75  		// [33, 49). We can achieve this by shifting all bucket
		76  		// boundaries up by 1.
		77  		//
		78  		// Also, a float64 can precisely represent integers with
		79  		// value up to 2^53 and size classes are relatively small
		80  		// (nowhere near 2^48 even) so this will give us exact
		81  		// boundaries.
		82  		sizeClassBuckets[i] = float64(class_to_size[i] + 1)
		83  	}
		84  	sizeClassBuckets = append(sizeClassBuckets, float64Inf())
		85  
		86  	timeHistBuckets = timeHistogramMetricsBuckets()
		87  	metrics = map[string]metricData{
		88  		"/gc/cycles/automatic:gc-cycles": {
		89  			deps: makeStatDepSet(sysStatsDep),
		90  			compute: func(in *statAggregate, out *metricValue) {
		91  				out.kind = metricKindUint64
		92  				out.scalar = in.sysStats.gcCyclesDone - in.sysStats.gcCyclesForced
		93  			},
		94  		},
		95  		"/gc/cycles/forced:gc-cycles": {
		96  			deps: makeStatDepSet(sysStatsDep),
		97  			compute: func(in *statAggregate, out *metricValue) {
		98  				out.kind = metricKindUint64
		99  				out.scalar = in.sysStats.gcCyclesForced
	 100  			},
	 101  		},
	 102  		"/gc/cycles/total:gc-cycles": {
	 103  			deps: makeStatDepSet(sysStatsDep),
	 104  			compute: func(in *statAggregate, out *metricValue) {
	 105  				out.kind = metricKindUint64
	 106  				out.scalar = in.sysStats.gcCyclesDone
	 107  			},
	 108  		},
	 109  		"/gc/heap/allocs-by-size:bytes": {
	 110  			deps: makeStatDepSet(heapStatsDep),
	 111  			compute: func(in *statAggregate, out *metricValue) {
	 112  				hist := out.float64HistOrInit(sizeClassBuckets)
	 113  				hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeAllocCount)
	 114  				// Cut off the first index which is ostensibly for size class 0,
	 115  				// but large objects are tracked separately so it's actually unused.
	 116  				for i, count := range in.heapStats.smallAllocCount[1:] {
	 117  					hist.counts[i] = uint64(count)
	 118  				}
	 119  			},
	 120  		},
	 121  		"/gc/heap/allocs:bytes": {
	 122  			deps: makeStatDepSet(heapStatsDep),
	 123  			compute: func(in *statAggregate, out *metricValue) {
	 124  				out.kind = metricKindUint64
	 125  				out.scalar = in.heapStats.totalAllocated
	 126  			},
	 127  		},
	 128  		"/gc/heap/allocs:objects": {
	 129  			deps: makeStatDepSet(heapStatsDep),
	 130  			compute: func(in *statAggregate, out *metricValue) {
	 131  				out.kind = metricKindUint64
	 132  				out.scalar = in.heapStats.totalAllocs
	 133  			},
	 134  		},
	 135  		"/gc/heap/frees-by-size:bytes": {
	 136  			deps: makeStatDepSet(heapStatsDep),
	 137  			compute: func(in *statAggregate, out *metricValue) {
	 138  				hist := out.float64HistOrInit(sizeClassBuckets)
	 139  				hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeFreeCount)
	 140  				// Cut off the first index which is ostensibly for size class 0,
	 141  				// but large objects are tracked separately so it's actually unused.
	 142  				for i, count := range in.heapStats.smallFreeCount[1:] {
	 143  					hist.counts[i] = uint64(count)
	 144  				}
	 145  			},
	 146  		},
	 147  		"/gc/heap/frees:bytes": {
	 148  			deps: makeStatDepSet(heapStatsDep),
	 149  			compute: func(in *statAggregate, out *metricValue) {
	 150  				out.kind = metricKindUint64
	 151  				out.scalar = in.heapStats.totalFreed
	 152  			},
	 153  		},
	 154  		"/gc/heap/frees:objects": {
	 155  			deps: makeStatDepSet(heapStatsDep),
	 156  			compute: func(in *statAggregate, out *metricValue) {
	 157  				out.kind = metricKindUint64
	 158  				out.scalar = in.heapStats.totalFrees
	 159  			},
	 160  		},
	 161  		"/gc/heap/goal:bytes": {
	 162  			deps: makeStatDepSet(sysStatsDep),
	 163  			compute: func(in *statAggregate, out *metricValue) {
	 164  				out.kind = metricKindUint64
	 165  				out.scalar = in.sysStats.heapGoal
	 166  			},
	 167  		},
	 168  		"/gc/heap/objects:objects": {
	 169  			deps: makeStatDepSet(heapStatsDep),
	 170  			compute: func(in *statAggregate, out *metricValue) {
	 171  				out.kind = metricKindUint64
	 172  				out.scalar = in.heapStats.numObjects
	 173  			},
	 174  		},
	 175  		"/gc/heap/tiny/allocs:objects": {
	 176  			deps: makeStatDepSet(heapStatsDep),
	 177  			compute: func(in *statAggregate, out *metricValue) {
	 178  				out.kind = metricKindUint64
	 179  				out.scalar = uint64(in.heapStats.tinyAllocCount)
	 180  			},
	 181  		},
	 182  		"/gc/pauses:seconds": {
	 183  			compute: func(_ *statAggregate, out *metricValue) {
	 184  				hist := out.float64HistOrInit(timeHistBuckets)
	 185  				// The bottom-most bucket, containing negative values, is tracked
	 186  				// as a separately as underflow, so fill that in manually and then
	 187  				// iterate over the rest.
	 188  				hist.counts[0] = atomic.Load64(&memstats.gcPauseDist.underflow)
	 189  				for i := range memstats.gcPauseDist.counts {
	 190  					hist.counts[i+1] = atomic.Load64(&memstats.gcPauseDist.counts[i])
	 191  				}
	 192  			},
	 193  		},
	 194  		"/memory/classes/heap/free:bytes": {
	 195  			deps: makeStatDepSet(heapStatsDep),
	 196  			compute: func(in *statAggregate, out *metricValue) {
	 197  				out.kind = metricKindUint64
	 198  				out.scalar = uint64(in.heapStats.committed - in.heapStats.inHeap -
	 199  					in.heapStats.inStacks - in.heapStats.inWorkBufs -
	 200  					in.heapStats.inPtrScalarBits)
	 201  			},
	 202  		},
	 203  		"/memory/classes/heap/objects:bytes": {
	 204  			deps: makeStatDepSet(heapStatsDep),
	 205  			compute: func(in *statAggregate, out *metricValue) {
	 206  				out.kind = metricKindUint64
	 207  				out.scalar = in.heapStats.inObjects
	 208  			},
	 209  		},
	 210  		"/memory/classes/heap/released:bytes": {
	 211  			deps: makeStatDepSet(heapStatsDep),
	 212  			compute: func(in *statAggregate, out *metricValue) {
	 213  				out.kind = metricKindUint64
	 214  				out.scalar = uint64(in.heapStats.released)
	 215  			},
	 216  		},
	 217  		"/memory/classes/heap/stacks:bytes": {
	 218  			deps: makeStatDepSet(heapStatsDep),
	 219  			compute: func(in *statAggregate, out *metricValue) {
	 220  				out.kind = metricKindUint64
	 221  				out.scalar = uint64(in.heapStats.inStacks)
	 222  			},
	 223  		},
	 224  		"/memory/classes/heap/unused:bytes": {
	 225  			deps: makeStatDepSet(heapStatsDep),
	 226  			compute: func(in *statAggregate, out *metricValue) {
	 227  				out.kind = metricKindUint64
	 228  				out.scalar = uint64(in.heapStats.inHeap) - in.heapStats.inObjects
	 229  			},
	 230  		},
	 231  		"/memory/classes/metadata/mcache/free:bytes": {
	 232  			deps: makeStatDepSet(sysStatsDep),
	 233  			compute: func(in *statAggregate, out *metricValue) {
	 234  				out.kind = metricKindUint64
	 235  				out.scalar = in.sysStats.mCacheSys - in.sysStats.mCacheInUse
	 236  			},
	 237  		},
	 238  		"/memory/classes/metadata/mcache/inuse:bytes": {
	 239  			deps: makeStatDepSet(sysStatsDep),
	 240  			compute: func(in *statAggregate, out *metricValue) {
	 241  				out.kind = metricKindUint64
	 242  				out.scalar = in.sysStats.mCacheInUse
	 243  			},
	 244  		},
	 245  		"/memory/classes/metadata/mspan/free:bytes": {
	 246  			deps: makeStatDepSet(sysStatsDep),
	 247  			compute: func(in *statAggregate, out *metricValue) {
	 248  				out.kind = metricKindUint64
	 249  				out.scalar = in.sysStats.mSpanSys - in.sysStats.mSpanInUse
	 250  			},
	 251  		},
	 252  		"/memory/classes/metadata/mspan/inuse:bytes": {
	 253  			deps: makeStatDepSet(sysStatsDep),
	 254  			compute: func(in *statAggregate, out *metricValue) {
	 255  				out.kind = metricKindUint64
	 256  				out.scalar = in.sysStats.mSpanInUse
	 257  			},
	 258  		},
	 259  		"/memory/classes/metadata/other:bytes": {
	 260  			deps: makeStatDepSet(heapStatsDep, sysStatsDep),
	 261  			compute: func(in *statAggregate, out *metricValue) {
	 262  				out.kind = metricKindUint64
	 263  				out.scalar = uint64(in.heapStats.inWorkBufs+in.heapStats.inPtrScalarBits) + in.sysStats.gcMiscSys
	 264  			},
	 265  		},
	 266  		"/memory/classes/os-stacks:bytes": {
	 267  			deps: makeStatDepSet(sysStatsDep),
	 268  			compute: func(in *statAggregate, out *metricValue) {
	 269  				out.kind = metricKindUint64
	 270  				out.scalar = in.sysStats.stacksSys
	 271  			},
	 272  		},
	 273  		"/memory/classes/other:bytes": {
	 274  			deps: makeStatDepSet(sysStatsDep),
	 275  			compute: func(in *statAggregate, out *metricValue) {
	 276  				out.kind = metricKindUint64
	 277  				out.scalar = in.sysStats.otherSys
	 278  			},
	 279  		},
	 280  		"/memory/classes/profiling/buckets:bytes": {
	 281  			deps: makeStatDepSet(sysStatsDep),
	 282  			compute: func(in *statAggregate, out *metricValue) {
	 283  				out.kind = metricKindUint64
	 284  				out.scalar = in.sysStats.buckHashSys
	 285  			},
	 286  		},
	 287  		"/memory/classes/total:bytes": {
	 288  			deps: makeStatDepSet(heapStatsDep, sysStatsDep),
	 289  			compute: func(in *statAggregate, out *metricValue) {
	 290  				out.kind = metricKindUint64
	 291  				out.scalar = uint64(in.heapStats.committed+in.heapStats.released) +
	 292  					in.sysStats.stacksSys + in.sysStats.mSpanSys +
	 293  					in.sysStats.mCacheSys + in.sysStats.buckHashSys +
	 294  					in.sysStats.gcMiscSys + in.sysStats.otherSys
	 295  			},
	 296  		},
	 297  		"/sched/goroutines:goroutines": {
	 298  			compute: func(_ *statAggregate, out *metricValue) {
	 299  				out.kind = metricKindUint64
	 300  				out.scalar = uint64(gcount())
	 301  			},
	 302  		},
	 303  		"/sched/latencies:seconds": {
	 304  			compute: func(_ *statAggregate, out *metricValue) {
	 305  				hist := out.float64HistOrInit(timeHistBuckets)
	 306  				hist.counts[0] = atomic.Load64(&sched.timeToRun.underflow)
	 307  				for i := range sched.timeToRun.counts {
	 308  					hist.counts[i+1] = atomic.Load64(&sched.timeToRun.counts[i])
	 309  				}
	 310  			},
	 311  		},
	 312  	}
	 313  	metricsInit = true
	 314  }
	 315  
	 316  // statDep is a dependency on a group of statistics
	 317  // that a metric might have.
	 318  type statDep uint
	 319  
	 320  const (
	 321  	heapStatsDep statDep = iota // corresponds to heapStatsAggregate
	 322  	sysStatsDep								 // corresponds to sysStatsAggregate
	 323  	numStatsDeps
	 324  )
	 325  
	 326  // statDepSet represents a set of statDeps.
	 327  //
	 328  // Under the hood, it's a bitmap.
	 329  type statDepSet [1]uint64
	 330  
	 331  // makeStatDepSet creates a new statDepSet from a list of statDeps.
	 332  func makeStatDepSet(deps ...statDep) statDepSet {
	 333  	var s statDepSet
	 334  	for _, d := range deps {
	 335  		s[d/64] |= 1 << (d % 64)
	 336  	}
	 337  	return s
	 338  }
	 339  
	 340  // differennce returns set difference of s from b as a new set.
	 341  func (s statDepSet) difference(b statDepSet) statDepSet {
	 342  	var c statDepSet
	 343  	for i := range s {
	 344  		c[i] = s[i] &^ b[i]
	 345  	}
	 346  	return c
	 347  }
	 348  
	 349  // union returns the union of the two sets as a new set.
	 350  func (s statDepSet) union(b statDepSet) statDepSet {
	 351  	var c statDepSet
	 352  	for i := range s {
	 353  		c[i] = s[i] | b[i]
	 354  	}
	 355  	return c
	 356  }
	 357  
	 358  // empty returns true if there are no dependencies in the set.
	 359  func (s *statDepSet) empty() bool {
	 360  	for _, c := range s {
	 361  		if c != 0 {
	 362  			return false
	 363  		}
	 364  	}
	 365  	return true
	 366  }
	 367  
	 368  // has returns true if the set contains a given statDep.
	 369  func (s *statDepSet) has(d statDep) bool {
	 370  	return s[d/64]&(1<<(d%64)) != 0
	 371  }
	 372  
	 373  // heapStatsAggregate represents memory stats obtained from the
	 374  // runtime. This set of stats is grouped together because they
	 375  // depend on each other in some way to make sense of the runtime's
	 376  // current heap memory use. They're also sharded across Ps, so it
	 377  // makes sense to grab them all at once.
	 378  type heapStatsAggregate struct {
	 379  	heapStatsDelta
	 380  
	 381  	// Derived from values in heapStatsDelta.
	 382  
	 383  	// inObjects is the bytes of memory occupied by objects,
	 384  	inObjects uint64
	 385  
	 386  	// numObjects is the number of live objects in the heap.
	 387  	numObjects uint64
	 388  
	 389  	// totalAllocated is the total bytes of heap objects allocated
	 390  	// over the lifetime of the program.
	 391  	totalAllocated uint64
	 392  
	 393  	// totalFreed is the total bytes of heap objects freed
	 394  	// over the lifetime of the program.
	 395  	totalFreed uint64
	 396  
	 397  	// totalAllocs is the number of heap objects allocated over
	 398  	// the lifetime of the program.
	 399  	totalAllocs uint64
	 400  
	 401  	// totalFrees is the number of heap objects freed over
	 402  	// the lifetime of the program.
	 403  	totalFrees uint64
	 404  }
	 405  
	 406  // compute populates the heapStatsAggregate with values from the runtime.
	 407  func (a *heapStatsAggregate) compute() {
	 408  	memstats.heapStats.read(&a.heapStatsDelta)
	 409  
	 410  	// Calculate derived stats.
	 411  	a.totalAllocs = a.largeAllocCount
	 412  	a.totalFrees = a.largeFreeCount
	 413  	a.totalAllocated = a.largeAlloc
	 414  	a.totalFreed = a.largeFree
	 415  	for i := range a.smallAllocCount {
	 416  		na := a.smallAllocCount[i]
	 417  		nf := a.smallFreeCount[i]
	 418  		a.totalAllocs += na
	 419  		a.totalFrees += nf
	 420  		a.totalAllocated += na * uint64(class_to_size[i])
	 421  		a.totalFreed += nf * uint64(class_to_size[i])
	 422  	}
	 423  	a.inObjects = a.totalAllocated - a.totalFreed
	 424  	a.numObjects = a.totalAllocs - a.totalFrees
	 425  }
	 426  
	 427  // sysStatsAggregate represents system memory stats obtained
	 428  // from the runtime. This set of stats is grouped together because
	 429  // they're all relatively cheap to acquire and generally independent
	 430  // of one another and other runtime memory stats. The fact that they
	 431  // may be acquired at different times, especially with respect to
	 432  // heapStatsAggregate, means there could be some skew, but because of
	 433  // these stats are independent, there's no real consistency issue here.
	 434  type sysStatsAggregate struct {
	 435  	stacksSys			uint64
	 436  	mSpanSys			 uint64
	 437  	mSpanInUse		 uint64
	 438  	mCacheSys			uint64
	 439  	mCacheInUse		uint64
	 440  	buckHashSys		uint64
	 441  	gcMiscSys			uint64
	 442  	otherSys			 uint64
	 443  	heapGoal			 uint64
	 444  	gcCyclesDone	 uint64
	 445  	gcCyclesForced uint64
	 446  }
	 447  
	 448  // compute populates the sysStatsAggregate with values from the runtime.
	 449  func (a *sysStatsAggregate) compute() {
	 450  	a.stacksSys = memstats.stacks_sys.load()
	 451  	a.buckHashSys = memstats.buckhash_sys.load()
	 452  	a.gcMiscSys = memstats.gcMiscSys.load()
	 453  	a.otherSys = memstats.other_sys.load()
	 454  	a.heapGoal = atomic.Load64(&gcController.heapGoal)
	 455  	a.gcCyclesDone = uint64(memstats.numgc)
	 456  	a.gcCyclesForced = uint64(memstats.numforcedgc)
	 457  
	 458  	systemstack(func() {
	 459  		lock(&mheap_.lock)
	 460  		a.mSpanSys = memstats.mspan_sys.load()
	 461  		a.mSpanInUse = uint64(mheap_.spanalloc.inuse)
	 462  		a.mCacheSys = memstats.mcache_sys.load()
	 463  		a.mCacheInUse = uint64(mheap_.cachealloc.inuse)
	 464  		unlock(&mheap_.lock)
	 465  	})
	 466  }
	 467  
	 468  // statAggregate is the main driver of the metrics implementation.
	 469  //
	 470  // It contains multiple aggregates of runtime statistics, as well
	 471  // as a set of these aggregates that it has populated. The aggergates
	 472  // are populated lazily by its ensure method.
	 473  type statAggregate struct {
	 474  	ensured	 statDepSet
	 475  	heapStats heapStatsAggregate
	 476  	sysStats	sysStatsAggregate
	 477  }
	 478  
	 479  // ensure populates statistics aggregates determined by deps if they
	 480  // haven't yet been populated.
	 481  func (a *statAggregate) ensure(deps *statDepSet) {
	 482  	missing := deps.difference(a.ensured)
	 483  	if missing.empty() {
	 484  		return
	 485  	}
	 486  	for i := statDep(0); i < numStatsDeps; i++ {
	 487  		if !missing.has(i) {
	 488  			continue
	 489  		}
	 490  		switch i {
	 491  		case heapStatsDep:
	 492  			a.heapStats.compute()
	 493  		case sysStatsDep:
	 494  			a.sysStats.compute()
	 495  		}
	 496  	}
	 497  	a.ensured = a.ensured.union(missing)
	 498  }
	 499  
	 500  // metricValidKind is a runtime copy of runtime/metrics.ValueKind and
	 501  // must be kept structurally identical to that type.
	 502  type metricKind int
	 503  
	 504  const (
	 505  	// These values must be kept identical to their corresponding Kind* values
	 506  	// in the runtime/metrics package.
	 507  	metricKindBad metricKind = iota
	 508  	metricKindUint64
	 509  	metricKindFloat64
	 510  	metricKindFloat64Histogram
	 511  )
	 512  
	 513  // metricSample is a runtime copy of runtime/metrics.Sample and
	 514  // must be kept structurally identical to that type.
	 515  type metricSample struct {
	 516  	name	string
	 517  	value metricValue
	 518  }
	 519  
	 520  // metricValue is a runtime copy of runtime/metrics.Sample and
	 521  // must be kept structurally identical to that type.
	 522  type metricValue struct {
	 523  	kind		metricKind
	 524  	scalar	uint64				 // contains scalar values for scalar Kinds.
	 525  	pointer unsafe.Pointer // contains non-scalar values.
	 526  }
	 527  
	 528  // float64HistOrInit tries to pull out an existing float64Histogram
	 529  // from the value, but if none exists, then it allocates one with
	 530  // the given buckets.
	 531  func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram {
	 532  	var hist *metricFloat64Histogram
	 533  	if v.kind == metricKindFloat64Histogram && v.pointer != nil {
	 534  		hist = (*metricFloat64Histogram)(v.pointer)
	 535  	} else {
	 536  		v.kind = metricKindFloat64Histogram
	 537  		hist = new(metricFloat64Histogram)
	 538  		v.pointer = unsafe.Pointer(hist)
	 539  	}
	 540  	hist.buckets = buckets
	 541  	if len(hist.counts) != len(hist.buckets)-1 {
	 542  		hist.counts = make([]uint64, len(buckets)-1)
	 543  	}
	 544  	return hist
	 545  }
	 546  
	 547  // metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram
	 548  // and must be kept structurally identical to that type.
	 549  type metricFloat64Histogram struct {
	 550  	counts	[]uint64
	 551  	buckets []float64
	 552  }
	 553  
	 554  // agg is used by readMetrics, and is protected by metricsSema.
	 555  //
	 556  // Managed as a global variable because its pointer will be
	 557  // an argument to a dynamically-defined function, and we'd
	 558  // like to avoid it escaping to the heap.
	 559  var agg statAggregate
	 560  
	 561  // readMetrics is the implementation of runtime/metrics.Read.
	 562  //
	 563  //go:linkname readMetrics runtime/metrics.runtime_readMetrics
	 564  func readMetrics(samplesp unsafe.Pointer, len int, cap int) {
	 565  	// Construct a slice from the args.
	 566  	sl := slice{samplesp, len, cap}
	 567  	samples := *(*[]metricSample)(unsafe.Pointer(&sl))
	 568  
	 569  	metricsLock()
	 570  
	 571  	// Ensure the map is initialized.
	 572  	initMetrics()
	 573  
	 574  	// Clear agg defensively.
	 575  	agg = statAggregate{}
	 576  
	 577  	// Sample.
	 578  	for i := range samples {
	 579  		sample := &samples[i]
	 580  		data, ok := metrics[sample.name]
	 581  		if !ok {
	 582  			sample.value.kind = metricKindBad
	 583  			continue
	 584  		}
	 585  		// Ensure we have all the stats we need.
	 586  		// agg is populated lazily.
	 587  		agg.ensure(&data.deps)
	 588  
	 589  		// Compute the value based on the stats we have.
	 590  		data.compute(&agg, &sample.value)
	 591  	}
	 592  
	 593  	metricsUnlock()
	 594  }
	 595  

View as plain text