1// Copyright 2023 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5// Trace time and clock. 6 7package runtime 8 9import ( 10 "internal/goarch" 11 _ "unsafe" 12) 13 14// Timestamps in trace are produced through either nanotime or cputicks 15// and divided by traceTimeDiv. nanotime is used everywhere except on 16// platforms where osHasLowResClock is true, because the system clock 17// isn't granular enough to get useful information out of a trace in 18// many cases. 19// 20// This makes absolute values of timestamp diffs smaller, and so they are 21// encoded in fewer bytes. 22// 23// The target resolution in all cases is 64 nanoseconds. 24// This is based on the fact that fundamentally the execution tracer won't emit 25// events more frequently than roughly every 200 ns or so, because that's roughly 26// how long it takes to call through the scheduler. 27// We could be more aggressive and bump this up to 128 ns while still getting 28// useful data, but the extra bit doesn't save us that much and the headroom is 29// nice to have. 30// 31// Hitting this target resolution is easy in the nanotime case: just pick a 32// division of 64. In the cputicks case it's a bit more complex. 33// 34// For x86, on a 3 GHz machine, we'd want to divide by 3*64 to hit our target. 35// To keep the division operation efficient, we round that up to 4*64, or 256. 36// Given what cputicks represents, we use this on all other platforms except 37// for PowerPC. 38// The suggested increment frequency for PowerPC's time base register is 39// 512 MHz according to Power ISA v2.07 section 6.2, so we use 32 on ppc64 40// and ppc64le. 41const traceTimeDiv = (1-osHasLowResClockInt)*64 + osHasLowResClockInt*(256-224*(goarch.IsPpc64|goarch.IsPpc64le)) 42 43// traceTime represents a timestamp for the trace. 44type traceTime uint64 45 46// traceClockNow returns a monotonic timestamp. The clock this function gets 47// the timestamp from is specific to tracing, and shouldn't be mixed with other 48// clock sources. 49// 50// nosplit because it's called from exitsyscall, which is nosplit. 51// 52// traceClockNow is called by golang.org/x/exp/trace using linkname. 53// 54//go:linkname traceClockNow 55//go:nosplit 56func traceClockNow() traceTime { 57 if osHasLowResClock { 58 return traceTime(cputicks() / traceTimeDiv) 59 } 60 return traceTime(nanotime() / traceTimeDiv) 61} 62 63// traceClockUnitsPerSecond estimates the number of trace clock units per 64// second that elapse. 65func traceClockUnitsPerSecond() uint64 { 66 if osHasLowResClock { 67 // We're using cputicks as our clock, so we need a real estimate. 68 return uint64(ticksPerSecond() / traceTimeDiv) 69 } 70 // Our clock is nanotime, so it's just the constant time division. 71 // (trace clock units / nanoseconds) * (1e9 nanoseconds / 1 second) 72 return uint64(1.0 / float64(traceTimeDiv) * 1e9) 73} 74 75// traceFrequency writes a batch with a single EvFrequency event. 76// 77// freq is the number of trace clock units per second. 78func traceFrequency(gen uintptr) { 79 w := unsafeTraceWriter(gen, nil) 80 81 // Ensure we have a place to write to. 82 w, _ = w.ensure(1 + traceBytesPerNumber /* traceEvFrequency + frequency */) 83 84 // Write out the string. 85 w.byte(byte(traceEvFrequency)) 86 w.varint(traceClockUnitsPerSecond()) 87 88 // Immediately flush the buffer. 89 systemstack(func() { 90 lock(&trace.lock) 91 traceBufFlush(w.traceBuf, gen) 92 unlock(&trace.lock) 93 }) 94} 95