Source file src/runtime/time.go
1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Time-related runtime and pieces of package time. 6 7 package runtime 8 9 import ( 10 "internal/abi" 11 "internal/runtime/atomic" 12 "internal/runtime/sys" 13 "unsafe" 14 ) 15 16 //go:linkname time_runtimeNow time.runtimeNow 17 func time_runtimeNow() (sec int64, nsec int32, mono int64) { 18 if sg := getg().syncGroup; sg != nil { 19 sec = sg.now / (1000 * 1000 * 1000) 20 nsec = int32(sg.now % (1000 * 1000 * 1000)) 21 // Don't return a monotonic time inside a synctest bubble. 22 // If we return a monotonic time based on the fake clock, 23 // arithmetic on times created inside/outside bubbles is confusing. 24 // If we return a monotonic time based on the real monotonic clock, 25 // arithmetic on times created in the same bubble is confusing. 26 // Simplest is to omit the monotonic time within a bubble. 27 return sec, nsec, 0 28 } 29 return time_now() 30 } 31 32 //go:linkname time_runtimeNano time.runtimeNano 33 func time_runtimeNano() int64 { 34 gp := getg() 35 if gp.syncGroup != nil { 36 return gp.syncGroup.now 37 } 38 return nanotime() 39 } 40 41 //go:linkname time_runtimeIsBubbled time.runtimeIsBubbled 42 func time_runtimeIsBubbled() bool { 43 return getg().syncGroup != nil 44 } 45 46 // A timer is a potentially repeating trigger for calling t.f(t.arg, t.seq). 47 // Timers are allocated by client code, often as part of other data structures. 48 // Each P has a heap of pointers to timers that it manages. 49 // 50 // A timer is expected to be used by only one client goroutine at a time, 51 // but there will be concurrent access by the P managing that timer. 52 // Timer accesses are protected by the lock t.mu, with a snapshot of 53 // t's state bits published in t.astate to enable certain fast paths to make 54 // decisions about a timer without acquiring the lock. 55 type timer struct { 56 // mu protects reads and writes to all fields, with exceptions noted below. 57 mu mutex 58 59 astate atomic.Uint8 // atomic copy of state bits at last unlock 60 state uint8 // state bits 61 isChan bool // timer has a channel; immutable; can be read without lock 62 isFake bool // timer is using fake time; immutable; can be read without lock 63 64 blocked uint32 // number of goroutines blocked on timer's channel 65 66 // Timer wakes up at when, and then at when+period, ... (period > 0 only) 67 // each time calling f(arg, seq, delay) in the timer goroutine, so f must be 68 // a well-behaved function and not block. 69 // 70 // The arg and seq are client-specified opaque arguments passed back to f. 71 // When used from netpoll, arg and seq have meanings defined by netpoll 72 // and are completely opaque to this code; in that context, seq is a sequence 73 // number to recognize and squelch stale function invocations. 74 // When used from package time, arg is a channel (for After, NewTicker) 75 // or the function to call (for AfterFunc) and seq is unused (0). 76 // 77 // Package time does not know about seq, but if this is a channel timer (t.isChan == true), 78 // this file uses t.seq as a sequence number to recognize and squelch 79 // sends that correspond to an earlier (stale) timer configuration, 80 // similar to its use in netpoll. In this usage (that is, when t.isChan == true), 81 // writes to seq are protected by both t.mu and t.sendLock, 82 // so reads are allowed when holding either of the two mutexes. 83 // 84 // The delay argument is nanotime() - t.when, meaning the delay in ns between 85 // when the timer should have gone off and now. Normally that amount is 86 // small enough not to matter, but for channel timers that are fed lazily, 87 // the delay can be arbitrarily long; package time subtracts it out to make 88 // it look like the send happened earlier than it actually did. 89 // (No one looked at the channel since then, or the send would have 90 // not happened so late, so no one can tell the difference.) 91 when int64 92 period int64 93 f func(arg any, seq uintptr, delay int64) 94 arg any 95 seq uintptr 96 97 // If non-nil, the timers containing t. 98 ts *timers 99 100 // sendLock protects sends on the timer's channel. 101 // Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0. 102 sendLock mutex 103 104 // isSending is used to handle races between running a 105 // channel timer and stopping or resetting the timer. 106 // It is used only for channel timers (t.isChan == true). 107 // It is not used for tickers. 108 // The value is incremented when about to send a value on the channel, 109 // and decremented after sending the value. 110 // The stop/reset code uses this to detect whether it 111 // stopped the channel send. 112 // 113 // isSending is incremented only when t.mu is held. 114 // isSending is decremented only when t.sendLock is held. 115 // isSending is read only when both t.mu and t.sendLock are held. 116 isSending atomic.Int32 117 } 118 119 // init initializes a newly allocated timer t. 120 // Any code that allocates a timer must call t.init before using it. 121 // The arg and f can be set during init, or they can be nil in init 122 // and set by a future call to t.modify. 123 func (t *timer) init(f func(arg any, seq uintptr, delay int64), arg any) { 124 lockInit(&t.mu, lockRankTimer) 125 t.f = f 126 t.arg = arg 127 } 128 129 // A timers is a per-P set of timers. 130 type timers struct { 131 // mu protects timers; timers are per-P, but the scheduler can 132 // access the timers of another P, so we have to lock. 133 mu mutex 134 135 // heap is the set of timers, ordered by heap[i].when. 136 // Must hold lock to access. 137 heap []timerWhen 138 139 // len is an atomic copy of len(heap). 140 len atomic.Uint32 141 142 // zombies is the number of timers in the heap 143 // that are marked for removal. 144 zombies atomic.Int32 145 146 // raceCtx is the race context used while executing timer functions. 147 raceCtx uintptr 148 149 // minWhenHeap is the minimum heap[i].when value (= heap[0].when). 150 // The wakeTime method uses minWhenHeap and minWhenModified 151 // to determine the next wake time. 152 // If minWhenHeap = 0, it means there are no timers in the heap. 153 minWhenHeap atomic.Int64 154 155 // minWhenModified is a lower bound on the minimum 156 // heap[i].when over timers with the timerModified bit set. 157 // If minWhenModified = 0, it means there are no timerModified timers in the heap. 158 minWhenModified atomic.Int64 159 160 syncGroup *synctestGroup 161 } 162 163 type timerWhen struct { 164 timer *timer 165 when int64 166 } 167 168 func (ts *timers) lock() { 169 lock(&ts.mu) 170 } 171 172 func (ts *timers) unlock() { 173 // Update atomic copy of len(ts.heap). 174 // We only update at unlock so that the len is always 175 // the most recent unlocked length, not an ephemeral length. 176 // This matters if we lock ts, delete the only timer from the heap, 177 // add it back, and unlock. We want ts.len.Load to return 1 the 178 // entire time, never 0. This is important for pidleput deciding 179 // whether ts is empty. 180 ts.len.Store(uint32(len(ts.heap))) 181 182 unlock(&ts.mu) 183 } 184 185 // Timer state field. 186 const ( 187 // timerHeaped is set when the timer is stored in some P's heap. 188 timerHeaped uint8 = 1 << iota 189 190 // timerModified is set when t.when has been modified 191 // but the heap's heap[i].when entry still needs to be updated. 192 // That change waits until the heap in which 193 // the timer appears can be locked and rearranged. 194 // timerModified is only set when timerHeaped is also set. 195 timerModified 196 197 // timerZombie is set when the timer has been stopped 198 // but is still present in some P's heap. 199 // Only set when timerHeaped is also set. 200 // It is possible for timerModified and timerZombie to both 201 // be set, meaning that the timer was modified and then stopped. 202 // A timer sending to a channel may be placed in timerZombie 203 // to take it out of the heap even though the timer is not stopped, 204 // as long as nothing is reading from the channel. 205 timerZombie 206 ) 207 208 // timerDebug enables printing a textual debug trace of all timer operations to stderr. 209 const timerDebug = false 210 211 func (t *timer) trace(op string) { 212 if timerDebug { 213 t.trace1(op) 214 } 215 } 216 217 func (t *timer) trace1(op string) { 218 if !timerDebug { 219 return 220 } 221 bits := [4]string{"h", "m", "z", "c"} 222 for i := range 3 { 223 if t.state&(1<<i) == 0 { 224 bits[i] = "-" 225 } 226 } 227 if !t.isChan { 228 bits[3] = "-" 229 } 230 print("T ", t, " ", bits[0], bits[1], bits[2], bits[3], " b=", t.blocked, " ", op, "\n") 231 } 232 233 func (ts *timers) trace(op string) { 234 if timerDebug { 235 println("TS", ts, op) 236 } 237 } 238 239 // lock locks the timer, allowing reading or writing any of the timer fields. 240 func (t *timer) lock() { 241 lock(&t.mu) 242 t.trace("lock") 243 } 244 245 // unlock updates t.astate and unlocks the timer. 246 func (t *timer) unlock() { 247 t.trace("unlock") 248 // Let heap fast paths know whether heap[i].when is accurate. 249 // Also let maybeRunChan know whether channel is in heap. 250 t.astate.Store(t.state) 251 unlock(&t.mu) 252 } 253 254 // hchan returns the channel in t.arg. 255 // t must be a timer with a channel. 256 func (t *timer) hchan() *hchan { 257 if !t.isChan { 258 badTimer() 259 } 260 // Note: t.arg is a chan time.Time, 261 // and runtime cannot refer to that type, 262 // so we cannot use a type assertion. 263 return (*hchan)(efaceOf(&t.arg).data) 264 } 265 266 // updateHeap updates t as directed by t.state, updating t.state 267 // and returning a bool indicating whether the state (and ts.heap[0].when) changed. 268 // The caller must hold t's lock, or the world can be stopped instead. 269 // The timer set t.ts must be non-nil and locked, t must be t.ts.heap[0], and updateHeap 270 // takes care of moving t within the timers heap to preserve the heap invariants. 271 // If ts == nil, then t must not be in a heap (or is in a heap that is 272 // temporarily not maintaining its invariant, such as during timers.adjust). 273 func (t *timer) updateHeap() (updated bool) { 274 assertWorldStoppedOrLockHeld(&t.mu) 275 t.trace("updateHeap") 276 ts := t.ts 277 if ts == nil || t != ts.heap[0].timer { 278 badTimer() 279 } 280 assertLockHeld(&ts.mu) 281 if t.state&timerZombie != 0 { 282 // Take timer out of heap. 283 t.state &^= timerHeaped | timerZombie | timerModified 284 ts.zombies.Add(-1) 285 ts.deleteMin() 286 return true 287 } 288 289 if t.state&timerModified != 0 { 290 // Update ts.heap[0].when and move within heap. 291 t.state &^= timerModified 292 ts.heap[0].when = t.when 293 ts.siftDown(0) 294 ts.updateMinWhenHeap() 295 return true 296 } 297 298 return false 299 } 300 301 // maxWhen is the maximum value for timer's when field. 302 const maxWhen = 1<<63 - 1 303 304 // verifyTimers can be set to true to add debugging checks that the 305 // timer heaps are valid. 306 const verifyTimers = false 307 308 // Package time APIs. 309 // Godoc uses the comments in package time, not these. 310 311 // time.now is implemented in assembly. 312 313 // timeSleep puts the current goroutine to sleep for at least ns nanoseconds. 314 // 315 //go:linkname timeSleep time.Sleep 316 func timeSleep(ns int64) { 317 if ns <= 0 { 318 return 319 } 320 321 gp := getg() 322 t := gp.timer 323 if t == nil { 324 t = new(timer) 325 t.init(goroutineReady, gp) 326 if gp.syncGroup != nil { 327 t.isFake = true 328 } 329 gp.timer = t 330 } 331 var now int64 332 if sg := gp.syncGroup; sg != nil { 333 now = sg.now 334 } else { 335 now = nanotime() 336 } 337 when := now + ns 338 if when < 0 { // check for overflow. 339 when = maxWhen 340 } 341 gp.sleepWhen = when 342 if t.isFake { 343 // Call timer.reset in this goroutine, since it's the one in a syncGroup. 344 // We don't need to worry about the timer function running before the goroutine 345 // is parked, because time won't advance until we park. 346 resetForSleep(gp, nil) 347 gopark(nil, nil, waitReasonSleep, traceBlockSleep, 1) 348 } else { 349 gopark(resetForSleep, nil, waitReasonSleep, traceBlockSleep, 1) 350 } 351 } 352 353 // resetForSleep is called after the goroutine is parked for timeSleep. 354 // We can't call timer.reset in timeSleep itself because if this is a short 355 // sleep and there are many goroutines then the P can wind up running the 356 // timer function, goroutineReady, before the goroutine has been parked. 357 func resetForSleep(gp *g, _ unsafe.Pointer) bool { 358 gp.timer.reset(gp.sleepWhen, 0) 359 return true 360 } 361 362 // A timeTimer is a runtime-allocated time.Timer or time.Ticker 363 // with the additional runtime state following it. 364 // The runtime state is inaccessible to package time. 365 type timeTimer struct { 366 c unsafe.Pointer // <-chan time.Time 367 init bool 368 timer 369 } 370 371 // newTimer allocates and returns a new time.Timer or time.Ticker (same layout) 372 // with the given parameters. 373 // 374 //go:linkname newTimer time.newTimer 375 func newTimer(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, c *hchan) *timeTimer { 376 t := new(timeTimer) 377 t.timer.init(nil, nil) 378 t.trace("new") 379 if raceenabled { 380 racerelease(unsafe.Pointer(&t.timer)) 381 } 382 if c != nil { 383 lockInit(&t.sendLock, lockRankTimerSend) 384 t.isChan = true 385 c.timer = &t.timer 386 if c.dataqsiz == 0 { 387 throw("invalid timer channel: no capacity") 388 } 389 } 390 if gr := getg().syncGroup; gr != nil { 391 t.isFake = true 392 } 393 t.modify(when, period, f, arg, 0) 394 t.init = true 395 return t 396 } 397 398 // stopTimer stops a timer. 399 // It reports whether t was stopped before being run. 400 // 401 //go:linkname stopTimer time.stopTimer 402 func stopTimer(t *timeTimer) bool { 403 if t.isFake && getg().syncGroup == nil { 404 panic("stop of synctest timer from outside bubble") 405 } 406 return t.stop() 407 } 408 409 // resetTimer resets an inactive timer, adding it to the timer heap. 410 // 411 // Reports whether the timer was modified before it was run. 412 // 413 //go:linkname resetTimer time.resetTimer 414 func resetTimer(t *timeTimer, when, period int64) bool { 415 if raceenabled { 416 racerelease(unsafe.Pointer(&t.timer)) 417 } 418 if t.isFake && getg().syncGroup == nil { 419 panic("reset of synctest timer from outside bubble") 420 } 421 return t.reset(when, period) 422 } 423 424 // Go runtime. 425 426 // Ready the goroutine arg. 427 func goroutineReady(arg any, _ uintptr, _ int64) { 428 goready(arg.(*g), 0) 429 } 430 431 // addHeap adds t to the timers heap. 432 // The caller must hold ts.lock or the world must be stopped. 433 // The caller must also have checked that t belongs in the heap. 434 // Callers that are not sure can call t.maybeAdd instead, 435 // but note that maybeAdd has different locking requirements. 436 func (ts *timers) addHeap(t *timer) { 437 assertWorldStoppedOrLockHeld(&ts.mu) 438 // Timers rely on the network poller, so make sure the poller 439 // has started. 440 if netpollInited.Load() == 0 { 441 netpollGenericInit() 442 } 443 444 if t.ts != nil { 445 throw("ts set in timer") 446 } 447 t.ts = ts 448 ts.heap = append(ts.heap, timerWhen{t, t.when}) 449 ts.siftUp(len(ts.heap) - 1) 450 if t == ts.heap[0].timer { 451 ts.updateMinWhenHeap() 452 } 453 } 454 455 // maybeRunAsync checks whether t needs to be triggered and runs it if so. 456 // The caller is responsible for locking the timer and for checking that we 457 // are running timers in async mode. If the timer needs to be run, 458 // maybeRunAsync will unlock and re-lock it. 459 // The timer is always locked on return. 460 func (t *timer) maybeRunAsync() { 461 assertLockHeld(&t.mu) 462 if t.state&timerHeaped == 0 && t.isChan && t.when > 0 { 463 // If timer should have triggered already (but nothing looked at it yet), 464 // trigger now, so that a receive after the stop sees the "old" value 465 // that should be there. 466 // (It is possible to have t.blocked > 0 if there is a racing receive 467 // in blockTimerChan, but timerHeaped not being set means 468 // it hasn't run t.maybeAdd yet; in that case, running the 469 // timer ourselves now is fine.) 470 if now := nanotime(); t.when <= now { 471 systemstack(func() { 472 t.unlockAndRun(now) // resets t.when 473 }) 474 t.lock() 475 } 476 } 477 } 478 479 // stop stops the timer t. It may be on some other P, so we can't 480 // actually remove it from the timers heap. We can only mark it as stopped. 481 // It will be removed in due course by the P whose heap it is on. 482 // Reports whether the timer was stopped before it was run. 483 func (t *timer) stop() bool { 484 async := debug.asynctimerchan.Load() != 0 485 if !async && t.isChan { 486 lock(&t.sendLock) 487 } 488 489 t.lock() 490 t.trace("stop") 491 if async { 492 t.maybeRunAsync() 493 } 494 if t.state&timerHeaped != 0 { 495 t.state |= timerModified 496 if t.state&timerZombie == 0 { 497 t.state |= timerZombie 498 t.ts.zombies.Add(1) 499 } 500 } 501 pending := t.when > 0 502 t.when = 0 503 504 if !async && t.isChan { 505 // Stop any future sends with stale values. 506 // See timer.unlockAndRun. 507 t.seq++ 508 509 // If there is currently a send in progress, 510 // incrementing seq is going to prevent that 511 // send from actually happening. That means 512 // that we should return true: the timer was 513 // stopped, even though t.when may be zero. 514 if t.period == 0 && t.isSending.Load() > 0 { 515 pending = true 516 } 517 } 518 t.unlock() 519 if !async && t.isChan { 520 unlock(&t.sendLock) 521 if timerchandrain(t.hchan()) { 522 pending = true 523 } 524 } 525 526 return pending 527 } 528 529 // deleteMin removes timer 0 from ts. 530 // ts must be locked. 531 func (ts *timers) deleteMin() { 532 assertLockHeld(&ts.mu) 533 t := ts.heap[0].timer 534 if t.ts != ts { 535 throw("wrong timers") 536 } 537 t.ts = nil 538 last := len(ts.heap) - 1 539 if last > 0 { 540 ts.heap[0] = ts.heap[last] 541 } 542 ts.heap[last] = timerWhen{} 543 ts.heap = ts.heap[:last] 544 if last > 0 { 545 ts.siftDown(0) 546 } 547 ts.updateMinWhenHeap() 548 if last == 0 { 549 // If there are no timers, then clearly there are no timerModified timers. 550 ts.minWhenModified.Store(0) 551 } 552 } 553 554 // modify modifies an existing timer. 555 // This is called by the netpoll code or time.Ticker.Reset or time.Timer.Reset. 556 // Reports whether the timer was modified before it was run. 557 // If f == nil, then t.f, t.arg, and t.seq are not modified. 558 func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, seq uintptr) bool { 559 if when <= 0 { 560 throw("timer when must be positive") 561 } 562 if period < 0 { 563 throw("timer period must be non-negative") 564 } 565 async := debug.asynctimerchan.Load() != 0 566 567 if !async && t.isChan { 568 lock(&t.sendLock) 569 } 570 571 t.lock() 572 if async { 573 t.maybeRunAsync() 574 } 575 t.trace("modify") 576 oldPeriod := t.period 577 t.period = period 578 if f != nil { 579 t.f = f 580 t.arg = arg 581 t.seq = seq 582 } 583 584 wake := false 585 pending := t.when > 0 586 t.when = when 587 if t.state&timerHeaped != 0 { 588 t.state |= timerModified 589 if t.state&timerZombie != 0 { 590 // In the heap but marked for removal (by a Stop). 591 // Unmark it, since it has been Reset and will be running again. 592 t.ts.zombies.Add(-1) 593 t.state &^= timerZombie 594 } 595 // The corresponding heap[i].when is updated later. 596 // See comment in type timer above and in timers.adjust below. 597 if min := t.ts.minWhenModified.Load(); min == 0 || when < min { 598 wake = true 599 // Force timerModified bit out to t.astate before updating t.minWhenModified, 600 // to synchronize with t.ts.adjust. See comment in adjust. 601 t.astate.Store(t.state) 602 t.ts.updateMinWhenModified(when) 603 } 604 } 605 606 add := t.needsAdd() 607 608 if !async && t.isChan { 609 // Stop any future sends with stale values. 610 // See timer.unlockAndRun. 611 t.seq++ 612 613 // If there is currently a send in progress, 614 // incrementing seq is going to prevent that 615 // send from actually happening. That means 616 // that we should return true: the timer was 617 // stopped, even though t.when may be zero. 618 if oldPeriod == 0 && t.isSending.Load() > 0 { 619 pending = true 620 } 621 } 622 t.unlock() 623 if !async && t.isChan { 624 if timerchandrain(t.hchan()) { 625 pending = true 626 } 627 unlock(&t.sendLock) 628 } 629 630 if add { 631 t.maybeAdd() 632 } 633 if wake { 634 wakeNetPoller(when) 635 } 636 637 return pending 638 } 639 640 // needsAdd reports whether t needs to be added to a timers heap. 641 // t must be locked. 642 func (t *timer) needsAdd() bool { 643 assertLockHeld(&t.mu) 644 need := t.state&timerHeaped == 0 && t.when > 0 && (!t.isChan || t.isFake || t.blocked > 0) 645 if need { 646 t.trace("needsAdd+") 647 } else { 648 t.trace("needsAdd-") 649 } 650 return need 651 } 652 653 // maybeAdd adds t to the local timers heap if it needs to be in a heap. 654 // The caller must not hold t's lock nor any timers heap lock. 655 // The caller probably just unlocked t, but that lock must be dropped 656 // in order to acquire a ts.lock, to avoid lock inversions. 657 // (timers.adjust holds ts.lock while acquiring each t's lock, 658 // so we cannot hold any t's lock while acquiring ts.lock). 659 // 660 // Strictly speaking it *might* be okay to hold t.lock and 661 // acquire ts.lock at the same time, because we know that 662 // t is not in any ts.heap, so nothing holding a ts.lock would 663 // be acquiring the t.lock at the same time, meaning there 664 // isn't a possible deadlock. But it is easier and safer not to be 665 // too clever and respect the static ordering. 666 // (If we don't, we have to change the static lock checking of t and ts.) 667 // 668 // Concurrent calls to time.Timer.Reset or blockTimerChan 669 // may result in concurrent calls to t.maybeAdd, 670 // so we cannot assume that t is not in a heap on entry to t.maybeAdd. 671 func (t *timer) maybeAdd() { 672 // Note: Not holding any locks on entry to t.maybeAdd, 673 // so the current g can be rescheduled to a different M and P 674 // at any time, including between the ts := assignment and the 675 // call to ts.lock. If a reschedule happened then, we would be 676 // adding t to some other P's timers, perhaps even a P that the scheduler 677 // has marked as idle with no timers, in which case the timer could 678 // go unnoticed until long after t.when. 679 // Calling acquirem instead of using getg().m makes sure that 680 // we end up locking and inserting into the current P's timers. 681 mp := acquirem() 682 var ts *timers 683 if t.isFake { 684 sg := getg().syncGroup 685 if sg == nil { 686 throw("invalid timer: fake time but no syncgroup") 687 } 688 ts = &sg.timers 689 } else { 690 ts = &mp.p.ptr().timers 691 } 692 ts.lock() 693 ts.cleanHead() 694 t.lock() 695 t.trace("maybeAdd") 696 when := int64(0) 697 wake := false 698 if t.needsAdd() { 699 t.state |= timerHeaped 700 when = t.when 701 wakeTime := ts.wakeTime() 702 wake = wakeTime == 0 || when < wakeTime 703 ts.addHeap(t) 704 } 705 t.unlock() 706 ts.unlock() 707 releasem(mp) 708 if wake { 709 wakeNetPoller(when) 710 } 711 } 712 713 // reset resets the time when a timer should fire. 714 // If used for an inactive timer, the timer will become active. 715 // Reports whether the timer was active and was stopped. 716 func (t *timer) reset(when, period int64) bool { 717 return t.modify(when, period, nil, nil, 0) 718 } 719 720 // cleanHead cleans up the head of the timer queue. This speeds up 721 // programs that create and delete timers; leaving them in the heap 722 // slows down heap operations. 723 // The caller must have locked ts. 724 func (ts *timers) cleanHead() { 725 ts.trace("cleanHead") 726 assertLockHeld(&ts.mu) 727 gp := getg() 728 for { 729 if len(ts.heap) == 0 { 730 return 731 } 732 733 // This loop can theoretically run for a while, and because 734 // it is holding timersLock it cannot be preempted. 735 // If someone is trying to preempt us, just return. 736 // We can clean the timers later. 737 if gp.preemptStop { 738 return 739 } 740 741 // Delete zombies from tail of heap. It requires no heap adjustments at all, 742 // and doing so increases the chances that when we swap out a zombie 743 // in heap[0] for the tail of the heap, we'll get a non-zombie timer, 744 // shortening this loop. 745 n := len(ts.heap) 746 if t := ts.heap[n-1].timer; t.astate.Load()&timerZombie != 0 { 747 t.lock() 748 if t.state&timerZombie != 0 { 749 t.state &^= timerHeaped | timerZombie | timerModified 750 t.ts = nil 751 ts.zombies.Add(-1) 752 ts.heap[n-1] = timerWhen{} 753 ts.heap = ts.heap[:n-1] 754 } 755 t.unlock() 756 continue 757 } 758 759 t := ts.heap[0].timer 760 if t.ts != ts { 761 throw("bad ts") 762 } 763 764 if t.astate.Load()&(timerModified|timerZombie) == 0 { 765 // Fast path: head of timers does not need adjustment. 766 return 767 } 768 769 t.lock() 770 updated := t.updateHeap() 771 t.unlock() 772 if !updated { 773 // Head of timers does not need adjustment. 774 return 775 } 776 } 777 } 778 779 // take moves any timers from src into ts 780 // and then clears the timer state from src, 781 // because src is being destroyed. 782 // The caller must not have locked either timers. 783 // For now this is only called when the world is stopped. 784 func (ts *timers) take(src *timers) { 785 ts.trace("take") 786 assertWorldStopped() 787 if len(src.heap) > 0 { 788 // The world is stopped, so we ignore the locking of ts and src here. 789 // That would introduce a sched < timers lock ordering, 790 // which we'd rather avoid in the static ranking. 791 for _, tw := range src.heap { 792 t := tw.timer 793 t.ts = nil 794 if t.state&timerZombie != 0 { 795 t.state &^= timerHeaped | timerZombie | timerModified 796 } else { 797 t.state &^= timerModified 798 ts.addHeap(t) 799 } 800 } 801 src.heap = nil 802 src.zombies.Store(0) 803 src.minWhenHeap.Store(0) 804 src.minWhenModified.Store(0) 805 src.len.Store(0) 806 ts.len.Store(uint32(len(ts.heap))) 807 } 808 } 809 810 // adjust looks through the timers in ts.heap for 811 // any timers that have been modified to run earlier, and puts them in 812 // the correct place in the heap. While looking for those timers, 813 // it also moves timers that have been modified to run later, 814 // and removes deleted timers. The caller must have locked ts. 815 func (ts *timers) adjust(now int64, force bool) { 816 ts.trace("adjust") 817 assertLockHeld(&ts.mu) 818 // If we haven't yet reached the time of the earliest modified 819 // timer, don't do anything. This speeds up programs that adjust 820 // a lot of timers back and forth if the timers rarely expire. 821 // We'll postpone looking through all the adjusted timers until 822 // one would actually expire. 823 if !force { 824 first := ts.minWhenModified.Load() 825 if first == 0 || first > now { 826 if verifyTimers { 827 ts.verify() 828 } 829 return 830 } 831 } 832 833 // minWhenModified is a lower bound on the earliest t.when 834 // among the timerModified timers. We want to make it more precise: 835 // we are going to scan the heap and clean out all the timerModified bits, 836 // at which point minWhenModified can be set to 0 (indicating none at all). 837 // 838 // Other P's can be calling ts.wakeTime concurrently, and we'd like to 839 // keep ts.wakeTime returning an accurate value throughout this entire process. 840 // 841 // Setting minWhenModified = 0 *before* the scan could make wakeTime 842 // return an incorrect value: if minWhenModified < minWhenHeap, then clearing 843 // it to 0 will make wakeTime return minWhenHeap (too late) until the scan finishes. 844 // To avoid that, we want to set minWhenModified to 0 *after* the scan. 845 // 846 // Setting minWhenModified = 0 *after* the scan could result in missing 847 // concurrent timer modifications in other goroutines; those will lock 848 // the specific timer, set the timerModified bit, and set t.when. 849 // To avoid that, we want to set minWhenModified to 0 *before* the scan. 850 // 851 // The way out of this dilemma is to preserve wakeTime a different way. 852 // wakeTime is min(minWhenHeap, minWhenModified), and minWhenHeap 853 // is protected by ts.lock, which we hold, so we can modify it however we like 854 // in service of keeping wakeTime accurate. 855 // 856 // So we can: 857 // 858 // 1. Set minWhenHeap = min(minWhenHeap, minWhenModified) 859 // 2. Set minWhenModified = 0 860 // (Other goroutines may modify timers and update minWhenModified now.) 861 // 3. Scan timers 862 // 4. Set minWhenHeap = heap[0].when 863 // 864 // That order preserves a correct value of wakeTime throughout the entire 865 // operation: 866 // Step 1 “locks in” an accurate wakeTime even with minWhenModified cleared. 867 // Step 2 makes sure concurrent t.when updates are not lost during the scan. 868 // Step 3 processes all modified timer values, justifying minWhenModified = 0. 869 // Step 4 corrects minWhenHeap to a precise value. 870 // 871 // The wakeTime method implementation reads minWhenModified *before* minWhenHeap, 872 // so that if the minWhenModified is observed to be 0, that means the minWhenHeap that 873 // follows will include the information that was zeroed out of it. 874 // 875 // Originally Step 3 locked every timer, which made sure any timer update that was 876 // already in progress during Steps 1+2 completed and was observed by Step 3. 877 // All that locking was too expensive, so now we do an atomic load of t.astate to 878 // decide whether we need to do a full lock. To make sure that we still observe any 879 // timer update already in progress during Steps 1+2, t.modify sets timerModified 880 // in t.astate *before* calling t.updateMinWhenModified. That ensures that the 881 // overwrite in Step 2 cannot lose an update: if it does overwrite an update, Step 3 882 // will see the timerModified and do a full lock. 883 ts.minWhenHeap.Store(ts.wakeTime()) 884 ts.minWhenModified.Store(0) 885 886 changed := false 887 for i := 0; i < len(ts.heap); i++ { 888 tw := &ts.heap[i] 889 t := tw.timer 890 if t.ts != ts { 891 throw("bad ts") 892 } 893 894 if t.astate.Load()&(timerModified|timerZombie) == 0 { 895 // Does not need adjustment. 896 continue 897 } 898 899 t.lock() 900 switch { 901 case t.state&timerHeaped == 0: 902 badTimer() 903 904 case t.state&timerZombie != 0: 905 ts.zombies.Add(-1) 906 t.state &^= timerHeaped | timerZombie | timerModified 907 n := len(ts.heap) 908 ts.heap[i] = ts.heap[n-1] 909 ts.heap[n-1] = timerWhen{} 910 ts.heap = ts.heap[:n-1] 911 t.ts = nil 912 i-- 913 changed = true 914 915 case t.state&timerModified != 0: 916 tw.when = t.when 917 t.state &^= timerModified 918 changed = true 919 } 920 t.unlock() 921 } 922 923 if changed { 924 ts.initHeap() 925 } 926 ts.updateMinWhenHeap() 927 928 if verifyTimers { 929 ts.verify() 930 } 931 } 932 933 // wakeTime looks at ts's timers and returns the time when we 934 // should wake up the netpoller. It returns 0 if there are no timers. 935 // This function is invoked when dropping a P, so it must run without 936 // any write barriers. 937 // 938 //go:nowritebarrierrec 939 func (ts *timers) wakeTime() int64 { 940 // Note that the order of these two loads matters: 941 // adjust updates minWhen to make it safe to clear minNextWhen. 942 // We read minWhen after reading minNextWhen so that 943 // if we see a cleared minNextWhen, we are guaranteed to see 944 // the updated minWhen. 945 nextWhen := ts.minWhenModified.Load() 946 when := ts.minWhenHeap.Load() 947 if when == 0 || (nextWhen != 0 && nextWhen < when) { 948 when = nextWhen 949 } 950 return when 951 } 952 953 // check runs any timers in ts that are ready. 954 // If now is not 0 it is the current time. 955 // It returns the passed time or the current time if now was passed as 0. 956 // and the time when the next timer should run or 0 if there is no next timer, 957 // and reports whether it ran any timers. 958 // If the time when the next timer should run is not 0, 959 // it is always larger than the returned time. 960 // We pass now in and out to avoid extra calls of nanotime. 961 // 962 //go:yeswritebarrierrec 963 func (ts *timers) check(now int64) (rnow, pollUntil int64, ran bool) { 964 ts.trace("check") 965 // If it's not yet time for the first timer, or the first adjusted 966 // timer, then there is nothing to do. 967 next := ts.wakeTime() 968 if next == 0 { 969 // No timers to run or adjust. 970 return now, 0, false 971 } 972 973 if now == 0 { 974 now = nanotime() 975 } 976 977 // If this is the local P, and there are a lot of deleted timers, 978 // clear them out. We only do this for the local P to reduce 979 // lock contention on timersLock. 980 zombies := ts.zombies.Load() 981 if zombies < 0 { 982 badTimer() 983 } 984 force := ts == &getg().m.p.ptr().timers && int(zombies) > int(ts.len.Load())/4 985 986 if now < next && !force { 987 // Next timer is not ready to run, and we don't need to clear deleted timers. 988 return now, next, false 989 } 990 991 ts.lock() 992 if len(ts.heap) > 0 { 993 ts.adjust(now, false) 994 for len(ts.heap) > 0 { 995 // Note that runtimer may temporarily unlock ts. 996 if tw := ts.run(now); tw != 0 { 997 if tw > 0 { 998 pollUntil = tw 999 } 1000 break 1001 } 1002 ran = true 1003 } 1004 1005 // Note: Delaying the forced adjustment until after the ts.run 1006 // (as opposed to calling ts.adjust(now, force) above) 1007 // is significantly faster under contention, such as in 1008 // package time's BenchmarkTimerAdjust10000, 1009 // though we do not fully understand why. 1010 force = ts == &getg().m.p.ptr().timers && int(ts.zombies.Load()) > int(ts.len.Load())/4 1011 if force { 1012 ts.adjust(now, true) 1013 } 1014 } 1015 ts.unlock() 1016 1017 return now, pollUntil, ran 1018 } 1019 1020 // run examines the first timer in ts. If it is ready based on now, 1021 // it runs the timer and removes or updates it. 1022 // Returns 0 if it ran a timer, -1 if there are no more timers, or the time 1023 // when the first timer should run. 1024 // The caller must have locked ts. 1025 // If a timer is run, this will temporarily unlock ts. 1026 // 1027 //go:systemstack 1028 func (ts *timers) run(now int64) int64 { 1029 ts.trace("run") 1030 assertLockHeld(&ts.mu) 1031 Redo: 1032 if len(ts.heap) == 0 { 1033 return -1 1034 } 1035 tw := ts.heap[0] 1036 t := tw.timer 1037 if t.ts != ts { 1038 throw("bad ts") 1039 } 1040 1041 if t.astate.Load()&(timerModified|timerZombie) == 0 && tw.when > now { 1042 // Fast path: not ready to run. 1043 return tw.when 1044 } 1045 1046 t.lock() 1047 if t.updateHeap() { 1048 t.unlock() 1049 goto Redo 1050 } 1051 1052 if t.state&timerHeaped == 0 || t.state&timerModified != 0 { 1053 badTimer() 1054 } 1055 1056 if t.when > now { 1057 // Not ready to run. 1058 t.unlock() 1059 return t.when 1060 } 1061 1062 t.unlockAndRun(now) 1063 assertLockHeld(&ts.mu) // t is unlocked now, but not ts 1064 return 0 1065 } 1066 1067 // unlockAndRun unlocks and runs the timer t (which must be locked). 1068 // If t is in a timer set (t.ts != nil), the caller must also have locked the timer set, 1069 // and this call will temporarily unlock the timer set while running the timer function. 1070 // unlockAndRun returns with t unlocked and t.ts (re-)locked. 1071 // 1072 //go:systemstack 1073 func (t *timer) unlockAndRun(now int64) { 1074 t.trace("unlockAndRun") 1075 assertLockHeld(&t.mu) 1076 if t.ts != nil { 1077 assertLockHeld(&t.ts.mu) 1078 } 1079 if raceenabled { 1080 // Note that we are running on a system stack, 1081 // so there is no chance of getg().m being reassigned 1082 // out from under us while this function executes. 1083 gp := getg() 1084 var tsLocal *timers 1085 if t.ts == nil || t.ts.syncGroup == nil { 1086 tsLocal = &gp.m.p.ptr().timers 1087 } else { 1088 tsLocal = &t.ts.syncGroup.timers 1089 } 1090 if tsLocal.raceCtx == 0 { 1091 tsLocal.raceCtx = racegostart(abi.FuncPCABIInternal((*timers).run) + sys.PCQuantum) 1092 } 1093 raceacquirectx(tsLocal.raceCtx, unsafe.Pointer(t)) 1094 } 1095 1096 if t.state&(timerModified|timerZombie) != 0 { 1097 badTimer() 1098 } 1099 1100 f := t.f 1101 arg := t.arg 1102 seq := t.seq 1103 var next int64 1104 delay := now - t.when 1105 if t.period > 0 { 1106 // Leave in heap but adjust next time to fire. 1107 next = t.when + t.period*(1+delay/t.period) 1108 if next < 0 { // check for overflow. 1109 next = maxWhen 1110 } 1111 } else { 1112 next = 0 1113 } 1114 ts := t.ts 1115 t.when = next 1116 if t.state&timerHeaped != 0 { 1117 t.state |= timerModified 1118 if next == 0 { 1119 t.state |= timerZombie 1120 t.ts.zombies.Add(1) 1121 } 1122 t.updateHeap() 1123 } 1124 1125 async := debug.asynctimerchan.Load() != 0 1126 if !async && t.isChan && t.period == 0 { 1127 // Tell Stop/Reset that we are sending a value. 1128 if t.isSending.Add(1) < 0 { 1129 throw("too many concurrent timer firings") 1130 } 1131 } 1132 1133 t.unlock() 1134 1135 if raceenabled { 1136 // Temporarily use the current P's racectx for g0. 1137 gp := getg() 1138 if gp.racectx != 0 { 1139 throw("unexpected racectx") 1140 } 1141 if ts == nil || ts.syncGroup == nil { 1142 gp.racectx = gp.m.p.ptr().timers.raceCtx 1143 } else { 1144 gp.racectx = ts.syncGroup.timers.raceCtx 1145 } 1146 } 1147 1148 if ts != nil { 1149 ts.unlock() 1150 } 1151 1152 if ts != nil && ts.syncGroup != nil { 1153 // Temporarily use the timer's synctest group for the G running this timer. 1154 gp := getg() 1155 if gp.syncGroup != nil { 1156 throw("unexpected syncgroup set") 1157 } 1158 gp.syncGroup = ts.syncGroup 1159 ts.syncGroup.changegstatus(gp, _Gdead, _Grunning) 1160 } 1161 1162 if !async && t.isChan { 1163 // For a timer channel, we want to make sure that no stale sends 1164 // happen after a t.stop or t.modify, but we cannot hold t.mu 1165 // during the actual send (which f does) due to lock ordering. 1166 // It can happen that we are holding t's lock above, we decide 1167 // it's time to send a time value (by calling f), grab the parameters, 1168 // unlock above, and then a t.stop or t.modify changes the timer 1169 // and returns. At that point, the send needs not to happen after all. 1170 // The way we arrange for it not to happen is that t.stop and t.modify 1171 // both increment t.seq while holding both t.mu and t.sendLock. 1172 // We copied the seq value above while holding t.mu. 1173 // Now we can acquire t.sendLock (which will be held across the send) 1174 // and double-check that t.seq is still the seq value we saw above. 1175 // If not, the timer has been updated and we should skip the send. 1176 // We skip the send by reassigning f to a no-op function. 1177 // 1178 // The isSending field tells t.stop or t.modify that we have 1179 // started to send the value. That lets them correctly return 1180 // true meaning that no value was sent. 1181 lock(&t.sendLock) 1182 1183 if t.period == 0 { 1184 // We are committed to possibly sending a value 1185 // based on seq, so no need to keep telling 1186 // stop/modify that we are sending. 1187 if t.isSending.Add(-1) < 0 { 1188 throw("mismatched isSending updates") 1189 } 1190 } 1191 1192 if t.seq != seq { 1193 f = func(any, uintptr, int64) {} 1194 } 1195 } 1196 1197 f(arg, seq, delay) 1198 1199 if !async && t.isChan { 1200 unlock(&t.sendLock) 1201 } 1202 1203 if ts != nil && ts.syncGroup != nil { 1204 gp := getg() 1205 ts.syncGroup.changegstatus(gp, _Grunning, _Gdead) 1206 if raceenabled { 1207 // Establish a happens-before between this timer event and 1208 // the next synctest.Wait call. 1209 racereleasemergeg(gp, ts.syncGroup.raceaddr()) 1210 } 1211 gp.syncGroup = nil 1212 } 1213 1214 if ts != nil { 1215 ts.lock() 1216 } 1217 1218 if raceenabled { 1219 gp := getg() 1220 gp.racectx = 0 1221 } 1222 } 1223 1224 // verifyTimerHeap verifies that the timers is in a valid state. 1225 // This is only for debugging, and is only called if verifyTimers is true. 1226 // The caller must have locked ts. 1227 func (ts *timers) verify() { 1228 assertLockHeld(&ts.mu) 1229 for i, tw := range ts.heap { 1230 if i == 0 { 1231 // First timer has no parent. 1232 continue 1233 } 1234 1235 // The heap is timerHeapN-ary. See siftupTimer and siftdownTimer. 1236 p := int(uint(i-1) / timerHeapN) 1237 if tw.when < ts.heap[p].when { 1238 print("bad timer heap at ", i, ": ", p, ": ", ts.heap[p].when, ", ", i, ": ", tw.when, "\n") 1239 throw("bad timer heap") 1240 } 1241 } 1242 if n := int(ts.len.Load()); len(ts.heap) != n { 1243 println("timer heap len", len(ts.heap), "!= atomic len", n) 1244 throw("bad timer heap len") 1245 } 1246 } 1247 1248 // updateMinWhenHeap sets ts.minWhenHeap to ts.heap[0].when. 1249 // The caller must have locked ts or the world must be stopped. 1250 func (ts *timers) updateMinWhenHeap() { 1251 assertWorldStoppedOrLockHeld(&ts.mu) 1252 if len(ts.heap) == 0 { 1253 ts.minWhenHeap.Store(0) 1254 } else { 1255 ts.minWhenHeap.Store(ts.heap[0].when) 1256 } 1257 } 1258 1259 // updateMinWhenModified updates ts.minWhenModified to be <= when. 1260 // ts need not be (and usually is not) locked. 1261 func (ts *timers) updateMinWhenModified(when int64) { 1262 for { 1263 old := ts.minWhenModified.Load() 1264 if old != 0 && old < when { 1265 return 1266 } 1267 if ts.minWhenModified.CompareAndSwap(old, when) { 1268 return 1269 } 1270 } 1271 } 1272 1273 // timeSleepUntil returns the time when the next timer should fire. Returns 1274 // maxWhen if there are no timers. 1275 // This is only called by sysmon and checkdead. 1276 func timeSleepUntil() int64 { 1277 next := int64(maxWhen) 1278 1279 // Prevent allp slice changes. This is like retake. 1280 lock(&allpLock) 1281 for _, pp := range allp { 1282 if pp == nil { 1283 // This can happen if procresize has grown 1284 // allp but not yet created new Ps. 1285 continue 1286 } 1287 1288 if w := pp.timers.wakeTime(); w != 0 { 1289 next = min(next, w) 1290 } 1291 } 1292 unlock(&allpLock) 1293 1294 return next 1295 } 1296 1297 const timerHeapN = 4 1298 1299 // Heap maintenance algorithms. 1300 // These algorithms check for slice index errors manually. 1301 // Slice index error can happen if the program is using racy 1302 // access to timers. We don't want to panic here, because 1303 // it will cause the program to crash with a mysterious 1304 // "panic holding locks" message. Instead, we panic while not 1305 // holding a lock. 1306 1307 // siftUp puts the timer at position i in the right place 1308 // in the heap by moving it up toward the top of the heap. 1309 func (ts *timers) siftUp(i int) { 1310 heap := ts.heap 1311 if i >= len(heap) { 1312 badTimer() 1313 } 1314 tw := heap[i] 1315 when := tw.when 1316 if when <= 0 { 1317 badTimer() 1318 } 1319 for i > 0 { 1320 p := int(uint(i-1) / timerHeapN) // parent 1321 if when >= heap[p].when { 1322 break 1323 } 1324 heap[i] = heap[p] 1325 i = p 1326 } 1327 if heap[i].timer != tw.timer { 1328 heap[i] = tw 1329 } 1330 } 1331 1332 // siftDown puts the timer at position i in the right place 1333 // in the heap by moving it down toward the bottom of the heap. 1334 func (ts *timers) siftDown(i int) { 1335 heap := ts.heap 1336 n := len(heap) 1337 if i >= n { 1338 badTimer() 1339 } 1340 if i*timerHeapN+1 >= n { 1341 return 1342 } 1343 tw := heap[i] 1344 when := tw.when 1345 if when <= 0 { 1346 badTimer() 1347 } 1348 for { 1349 leftChild := i*timerHeapN + 1 1350 if leftChild >= n { 1351 break 1352 } 1353 w := when 1354 c := -1 1355 for j, tw := range heap[leftChild:min(leftChild+timerHeapN, n)] { 1356 if tw.when < w { 1357 w = tw.when 1358 c = leftChild + j 1359 } 1360 } 1361 if c < 0 { 1362 break 1363 } 1364 heap[i] = heap[c] 1365 i = c 1366 } 1367 if heap[i].timer != tw.timer { 1368 heap[i] = tw 1369 } 1370 } 1371 1372 // initHeap reestablishes the heap order in the slice ts.heap. 1373 // It takes O(n) time for n=len(ts.heap), not the O(n log n) of n repeated add operations. 1374 func (ts *timers) initHeap() { 1375 // Last possible element that needs sifting down is parent of last element; 1376 // last element is len(t)-1; parent of last element is (len(t)-1-1)/timerHeapN. 1377 if len(ts.heap) <= 1 { 1378 return 1379 } 1380 for i := int(uint(len(ts.heap)-1-1) / timerHeapN); i >= 0; i-- { 1381 ts.siftDown(i) 1382 } 1383 } 1384 1385 // badTimer is called if the timer data structures have been corrupted, 1386 // presumably due to racy use by the program. We panic here rather than 1387 // panicking due to invalid slice access while holding locks. 1388 // See issue #25686. 1389 func badTimer() { 1390 throw("timer data corruption") 1391 } 1392 1393 // Timer channels. 1394 1395 // maybeRunChan checks whether the timer needs to run 1396 // to send a value to its associated channel. If so, it does. 1397 // The timer must not be locked. 1398 func (t *timer) maybeRunChan() { 1399 if t.isFake { 1400 t.lock() 1401 var timerGroup *synctestGroup 1402 if t.ts != nil { 1403 timerGroup = t.ts.syncGroup 1404 } 1405 t.unlock() 1406 sg := getg().syncGroup 1407 if sg == nil { 1408 panic(plainError("synctest timer accessed from outside bubble")) 1409 } 1410 if timerGroup != nil && sg != timerGroup { 1411 panic(plainError("timer moved between synctest bubbles")) 1412 } 1413 // No need to do anything here. 1414 // synctest.Run will run the timer when it advances its fake clock. 1415 return 1416 } 1417 if t.astate.Load()&timerHeaped != 0 { 1418 // If the timer is in the heap, the ordinary timer code 1419 // is in charge of sending when appropriate. 1420 return 1421 } 1422 1423 t.lock() 1424 now := nanotime() 1425 if t.state&timerHeaped != 0 || t.when == 0 || t.when > now { 1426 t.trace("maybeRunChan-") 1427 // Timer in the heap, or not running at all, or not triggered. 1428 t.unlock() 1429 return 1430 } 1431 t.trace("maybeRunChan+") 1432 systemstack(func() { 1433 t.unlockAndRun(now) 1434 }) 1435 } 1436 1437 // blockTimerChan is called when a channel op has decided to block on c. 1438 // The caller holds the channel lock for c and possibly other channels. 1439 // blockTimerChan makes sure that c is in a timer heap, 1440 // adding it if needed. 1441 func blockTimerChan(c *hchan) { 1442 t := c.timer 1443 if t.isFake { 1444 return 1445 } 1446 t.lock() 1447 t.trace("blockTimerChan") 1448 if !t.isChan { 1449 badTimer() 1450 } 1451 1452 t.blocked++ 1453 1454 // If this is the first enqueue after a recent dequeue, 1455 // the timer may still be in the heap but marked as a zombie. 1456 // Unmark it in this case, if the timer is still pending. 1457 if t.state&timerHeaped != 0 && t.state&timerZombie != 0 && t.when > 0 { 1458 t.state &^= timerZombie 1459 t.ts.zombies.Add(-1) 1460 } 1461 1462 // t.maybeAdd must be called with t unlocked, 1463 // because it needs to lock t.ts before t. 1464 // Then it will do nothing if t.needsAdd(state) is false. 1465 // Check that now before the unlock, 1466 // avoiding the extra lock-lock-unlock-unlock 1467 // inside maybeAdd when t does not need to be added. 1468 add := t.needsAdd() 1469 t.unlock() 1470 if add { 1471 t.maybeAdd() 1472 } 1473 } 1474 1475 // unblockTimerChan is called when a channel op that was blocked on c 1476 // is no longer blocked. Every call to blockTimerChan must be paired with 1477 // a call to unblockTimerChan. 1478 // The caller holds the channel lock for c and possibly other channels. 1479 // unblockTimerChan removes c from the timer heap when nothing is 1480 // blocked on it anymore. 1481 func unblockTimerChan(c *hchan) { 1482 t := c.timer 1483 if t.isFake { 1484 return 1485 } 1486 t.lock() 1487 t.trace("unblockTimerChan") 1488 if !t.isChan || t.blocked == 0 { 1489 badTimer() 1490 } 1491 t.blocked-- 1492 if t.blocked == 0 && t.state&timerHeaped != 0 && t.state&timerZombie == 0 { 1493 // Last goroutine that was blocked on this timer. 1494 // Mark for removal from heap but do not clear t.when, 1495 // so that we know what time it is still meant to trigger. 1496 t.state |= timerZombie 1497 t.ts.zombies.Add(1) 1498 } 1499 t.unlock() 1500 } 1501