Source file
src/runtime/tracebuf.go
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/runtime/sys"
11 "internal/trace/tracev2"
12 "unsafe"
13 )
14
15
16 const traceBytesPerNumber = 10
17
18
19
20
21
22
23
24
25
26 type traceWriter struct {
27 traceLocker
28 exp tracev2.Experiment
29 *traceBuf
30 }
31
32
33
34
35
36
37
38
39
40
41
42
43 func (tl traceLocker) writer() traceWriter {
44 if debugTraceReentrancy {
45
46 gp := getg()
47 if gp == gp.m.curg {
48 tl.mp.trace.oldthrowsplit = gp.throwsplit
49 gp.throwsplit = true
50 }
51 }
52 return traceWriter{traceLocker: tl, traceBuf: tl.mp.trace.buf[tl.gen%2][tracev2.NoExperiment]}
53 }
54
55
56
57
58
59
60
61
62
63
64 func unsafeTraceWriter(gen uintptr, buf *traceBuf) traceWriter {
65 return traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf}
66 }
67
68
69
70
71
72
73
74 func (w traceWriter) event(ev tracev2.EventType, args ...traceArg) traceWriter {
75
76
77
78
79 w, _ = w.ensure(1 + (len(args)+1)*traceBytesPerNumber)
80
81
82 ts := traceClockNow()
83 if ts <= w.traceBuf.lastTime {
84 ts = w.traceBuf.lastTime + 1
85 }
86 tsDiff := uint64(ts - w.traceBuf.lastTime)
87 w.traceBuf.lastTime = ts
88
89
90 w.byte(byte(ev))
91 w.varint(tsDiff)
92 for _, arg := range args {
93 w.varint(uint64(arg))
94 }
95 return w
96 }
97
98
99
100
101
102
103
104 func (w traceWriter) end() {
105 if w.mp == nil {
106
107
108 return
109 }
110 w.mp.trace.buf[w.gen%2][w.exp] = w.traceBuf
111 if debugTraceReentrancy {
112
113
114 gp := getg()
115 if gp == gp.m.curg {
116 gp.throwsplit = w.mp.trace.oldthrowsplit
117 }
118 }
119 }
120
121
122
123
124
125
126
127
128
129 func (w traceWriter) ensure(maxSize int) (traceWriter, bool) {
130 refill := w.traceBuf == nil || !w.available(maxSize)
131 if refill {
132 w = w.refill()
133 }
134 return w, refill
135 }
136
137
138
139
140
141
142
143 func (w traceWriter) flush() traceWriter {
144 systemstack(func() {
145 lock(&trace.lock)
146 if w.traceBuf != nil {
147 traceBufFlush(w.traceBuf, w.gen)
148 }
149 unlock(&trace.lock)
150 })
151 w.traceBuf = nil
152 return w
153 }
154
155
156 func (w traceWriter) refill() traceWriter {
157 systemstack(func() {
158 lock(&trace.lock)
159 if w.traceBuf != nil {
160 traceBufFlush(w.traceBuf, w.gen)
161 }
162 if trace.empty != nil {
163 w.traceBuf = trace.empty
164 trace.empty = w.traceBuf.link
165 unlock(&trace.lock)
166 } else {
167 unlock(&trace.lock)
168 w.traceBuf = (*traceBuf)(sysAlloc(unsafe.Sizeof(traceBuf{}), &memstats.other_sys, "trace buffer"))
169 if w.traceBuf == nil {
170 throw("trace: out of memory")
171 }
172 }
173 })
174
175 ts := traceClockNow()
176 if ts <= w.traceBuf.lastTime {
177 ts = w.traceBuf.lastTime + 1
178 }
179 w.traceBuf.lastTime = ts
180 w.traceBuf.link = nil
181 w.traceBuf.pos = 0
182
183
184 mID := ^uint64(0)
185 if w.mp != nil {
186 mID = uint64(w.mp.procid)
187 }
188
189
190 if w.exp == tracev2.NoExperiment {
191 w.byte(byte(tracev2.EvEventBatch))
192 } else {
193 w.byte(byte(tracev2.EvExperimentalBatch))
194 w.byte(byte(w.exp))
195 }
196 w.varint(uint64(w.gen))
197 w.varint(uint64(mID))
198 w.varint(uint64(ts))
199 w.traceBuf.lenPos = w.varintReserve()
200 return w
201 }
202
203
204
205 func (tl traceLocker) expWriter(exp tracev2.Experiment) traceWriter {
206 return traceWriter{traceLocker: tl, traceBuf: tl.mp.trace.buf[tl.gen%2][exp], exp: exp}
207 }
208
209
210
211
212
213
214
215
216
217
218
219
220 func unsafeTraceExpWriter(gen uintptr, buf *traceBuf, exp tracev2.Experiment) traceWriter {
221 return traceWriter{traceLocker: traceLocker{gen: gen}, traceBuf: buf, exp: exp}
222 }
223
224
225 type traceBufQueue struct {
226 head, tail *traceBuf
227 }
228
229
230 func (q *traceBufQueue) push(buf *traceBuf) {
231 buf.link = nil
232 if q.head == nil {
233 q.head = buf
234 } else {
235 q.tail.link = buf
236 }
237 q.tail = buf
238 }
239
240
241 func (q *traceBufQueue) pop() *traceBuf {
242 buf := q.head
243 if buf == nil {
244 return nil
245 }
246 q.head = buf.link
247 if q.head == nil {
248 q.tail = nil
249 }
250 buf.link = nil
251 return buf
252 }
253
254 func (q *traceBufQueue) empty() bool {
255 return q.head == nil
256 }
257
258
259 type traceBufHeader struct {
260 link *traceBuf
261 lastTime traceTime
262 pos int
263 lenPos int
264 }
265
266
267
268
269 type traceBuf struct {
270 _ sys.NotInHeap
271 traceBufHeader
272 arr [tracev2.MaxBatchSize - unsafe.Sizeof(traceBufHeader{})]byte
273 }
274
275
276
277
278
279
280
281 func (buf *traceBuf) byte(v byte) {
282 buf.arr[buf.pos] = v
283 buf.pos++
284 }
285
286
287
288
289
290
291
292 func (buf *traceBuf) varint(v uint64) {
293 pos := buf.pos
294 arr := buf.arr[pos : pos+traceBytesPerNumber]
295 for i := range arr {
296 if v < 0x80 {
297 pos += i + 1
298 arr[i] = byte(v)
299 break
300 }
301 arr[i] = 0x80 | byte(v)
302 v >>= 7
303 }
304 buf.pos = pos
305 }
306
307
308
309
310
311
312
313
314
315 func (buf *traceBuf) varintReserve() int {
316 p := buf.pos
317 buf.pos += traceBytesPerNumber
318 return p
319 }
320
321
322
323
324
325
326
327 func (buf *traceBuf) stringData(s string) {
328 buf.pos += copy(buf.arr[buf.pos:], s)
329 }
330
331
332
333
334
335 func (buf *traceBuf) available(size int) bool {
336 return len(buf.arr)-buf.pos >= size
337 }
338
339
340
341
342
343
344
345
346
347
348 func (buf *traceBuf) varintAt(pos int, v uint64) {
349 for i := 0; i < traceBytesPerNumber; i++ {
350 if i < traceBytesPerNumber-1 {
351 buf.arr[pos] = 0x80 | byte(v)
352 } else {
353 buf.arr[pos] = byte(v)
354 }
355 v >>= 7
356 pos++
357 }
358 if v != 0 {
359 throw("v could not fit in traceBytesPerNumber")
360 }
361 }
362
363
364
365
366
367
368 func traceBufFlush(buf *traceBuf, gen uintptr) {
369 assertLockHeld(&trace.lock)
370
371
372
373
374
375
376
377
378
379 buf.varintAt(buf.lenPos, uint64(buf.pos-(buf.lenPos+traceBytesPerNumber)))
380 trace.full[gen%2].push(buf)
381
382
383
384 if !trace.workAvailable.Load() {
385 trace.workAvailable.Store(true)
386 }
387 }
388
View as plain text