Source file src/runtime/tracetime.go
1 // Copyright 2023 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Trace time and clock. 6 7 package runtime 8 9 import ( 10 "internal/goarch" 11 "internal/trace/tracev2" 12 _ "unsafe" 13 ) 14 15 // Timestamps in trace are produced through either nanotime or cputicks 16 // and divided by traceTimeDiv. nanotime is used everywhere except on 17 // platforms where osHasLowResClock is true, because the system clock 18 // isn't granular enough to get useful information out of a trace in 19 // many cases. 20 // 21 // This makes absolute values of timestamp diffs smaller, and so they are 22 // encoded in fewer bytes. 23 // 24 // The target resolution in all cases is 64 nanoseconds. 25 // This is based on the fact that fundamentally the execution tracer won't emit 26 // events more frequently than roughly every 200 ns or so, because that's roughly 27 // how long it takes to call through the scheduler. 28 // We could be more aggressive and bump this up to 128 ns while still getting 29 // useful data, but the extra bit doesn't save us that much and the headroom is 30 // nice to have. 31 // 32 // Hitting this target resolution is easy in the nanotime case: just pick a 33 // division of 64. In the cputicks case it's a bit more complex. 34 // 35 // For x86, on a 3 GHz machine, we'd want to divide by 3*64 to hit our target. 36 // To keep the division operation efficient, we round that up to 4*64, or 256. 37 // Given what cputicks represents, we use this on all other platforms except 38 // for PowerPC. 39 // The suggested increment frequency for PowerPC's time base register is 40 // 512 MHz according to Power ISA v2.07 section 6.2, so we use 32 on ppc64 41 // and ppc64le. 42 const traceTimeDiv = (1-osHasLowResClockInt)*64 + osHasLowResClockInt*(256-224*(goarch.IsPpc64|goarch.IsPpc64le)) 43 44 // traceTime represents a timestamp for the trace. 45 type traceTime uint64 46 47 // traceClockNow returns a monotonic timestamp. The clock this function gets 48 // the timestamp from is specific to tracing, and shouldn't be mixed with other 49 // clock sources. 50 // 51 // nosplit because it's called from exitsyscall and various trace writing functions, 52 // which are nosplit. 53 // 54 // traceClockNow is called by golang.org/x/exp/trace using linkname. 55 // 56 //go:linkname traceClockNow 57 //go:nosplit 58 func traceClockNow() traceTime { 59 if osHasLowResClock { 60 return traceTime(cputicks() / traceTimeDiv) 61 } 62 return traceTime(nanotime() / traceTimeDiv) 63 } 64 65 // traceClockUnitsPerSecond estimates the number of trace clock units per 66 // second that elapse. 67 func traceClockUnitsPerSecond() uint64 { 68 if osHasLowResClock { 69 // We're using cputicks as our clock, so we need a real estimate. 70 return uint64(ticksPerSecond() / traceTimeDiv) 71 } 72 // Our clock is nanotime, so it's just the constant time division. 73 // (trace clock units / nanoseconds) * (1e9 nanoseconds / 1 second) 74 return uint64(1.0 / float64(traceTimeDiv) * 1e9) 75 } 76 77 // traceFrequency writes a batch with a single EvFrequency event. 78 // 79 // freq is the number of trace clock units per second. 80 func traceFrequency(gen uintptr) { 81 w := unsafeTraceWriter(gen, nil) 82 83 // Ensure we have a place to write to. 84 w, _ = w.ensure(1 + traceBytesPerNumber /* tracev2.EvFrequency + frequency */) 85 86 // Write out the string. 87 w.byte(byte(tracev2.EvFrequency)) 88 w.varint(traceClockUnitsPerSecond()) 89 90 // Immediately flush the buffer. 91 systemstack(func() { 92 lock(&trace.lock) 93 traceBufFlush(w.traceBuf, gen) 94 unlock(&trace.lock) 95 }) 96 } 97