github.com/ice-blockchain/go/src@v0.0.0-20240403114104-1564d284e521/runtime/time.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Time-related runtime and pieces of package time. 6 7 package runtime 8 9 import ( 10 "internal/abi" 11 "runtime/internal/atomic" 12 "runtime/internal/sys" 13 "unsafe" 14 ) 15 16 // A timer is a potentially repeating trigger for calling t.f(t.arg, t.seq). 17 // Timers are allocated by client code, often as part of other data structures. 18 // Each P has a heap of pointers to timers that it manages. 19 // 20 // A timer is expected to be used by only one client goroutine at a time, 21 // but there will be concurrent access by the P managing that timer. 22 // Timer accesses are protected by the lock t.mu, with a snapshot of 23 // t's state bits published in t.astate to enable certain fast paths to make 24 // decisions about a timer without acquiring the lock. 25 type timer struct { 26 // mu protects reads and writes to all fields, with exceptions noted below. 27 mu mutex 28 29 astate atomic.Uint8 // atomic copy of state bits at last unlock 30 state uint8 // state bits 31 isChan bool // timer has a channel; immutable; can be read without lock 32 blocked uint32 // number of goroutines blocked on timer's channel 33 34 // Timer wakes up at when, and then at when+period, ... (period > 0 only) 35 // each time calling f(arg, seq, delay) in the timer goroutine, so f must be 36 // a well-behaved function and not block. 37 // 38 // The arg and seq are client-specified opaque arguments passed back to f. 39 // When used from netpoll, arg and seq have meanings defined by netpoll 40 // and are completely opaque to this code; in that context, seq is a sequence 41 // number to recognize and squech stale function invocations. 42 // When used from package time, arg is a channel (for After, NewTicker) 43 // or the function to call (for AfterFunc) and seq is unused (0). 44 // 45 // Package time does not know about seq, but if this is a channel timer (t.isChan == true), 46 // this file uses t.seq as a sequence number to recognize and squelch 47 // sends that correspond to an earlier (stale) timer configuration, 48 // similar to its use in netpoll. In this usage (that is, when t.isChan == true), 49 // writes to seq are protected by both t.mu and t.sendLock, 50 // so reads are allowed when holding either of the two mutexes. 51 // 52 // The delay argument is nanotime() - t.when, meaning the delay in ns between 53 // when the timer should have gone off and now. Normally that amount is 54 // small enough not to matter, but for channel timers that are fed lazily, 55 // the delay can be arbitrarily long; package time subtracts it out to make 56 // it look like the send happened earlier than it actually did. 57 // (No one looked at the channel since then, or the send would have 58 // not happened so late, so no one can tell the difference.) 59 when int64 60 period int64 61 f func(arg any, seq uintptr, delay int64) 62 arg any 63 seq uintptr 64 65 // If non-nil, the timers containing t. 66 ts *timers 67 68 // whenHeap is a (perhaps outdated) copy of t.when for use 69 // ordering t within t.ts.heap. 70 // When t is in a heap but t.whenHeap is outdated, 71 // the timerModified state bit is set. 72 // The actual update t.whenHeap = t.when must be 73 // delayed until the heap can be reordered at the same time 74 // (meaning t's lock must be held for whenHeap, 75 // and t.ts's lock must be held for the heap reordering). 76 // Since writes to whenHeap are protected by two locks (t.mu and t.ts.mu), 77 // it is permitted to read whenHeap when holding either one. 78 whenHeap int64 79 80 // sendLock protects sends on the timer's channel. 81 // Not used for async (pre-Go 1.23) behavior when debug.asynctimerchan.Load() != 0. 82 sendLock mutex 83 } 84 85 // init initializes a newly allocated timer t. 86 // Any code that allocates a timer must call t.init before using it. 87 // The arg and f can be set during init, or they can be nil in init 88 // and set by a future call to t.modify. 89 func (t *timer) init(f func(arg any, seq uintptr, delay int64), arg any) { 90 lockInit(&t.mu, lockRankTimer) 91 t.f = f 92 t.arg = arg 93 } 94 95 // A timers is a per-P set of timers. 96 type timers struct { 97 // mu protects timers; timers are per-P, but the scheduler can 98 // access the timers of another P, so we have to lock. 99 mu mutex 100 101 // heap is the set of timers, ordered by t.whenHeap. 102 // Must hold lock to access. 103 heap []*timer 104 105 // len is an atomic copy of len(heap). 106 len atomic.Uint32 107 108 // zombies is the number of timers in the heap 109 // that are marked for removal. 110 zombies atomic.Int32 111 112 // raceCtx is the race context used while executing timer functions. 113 raceCtx uintptr 114 115 // minWhenHeap is the minimum heap[i].whenHeap value (= heap[0].whenHeap). 116 // The wakeTime method uses minWhenHeap and minWhenModified 117 // to determine the next wake time. 118 // If minWhenHeap = 0, it means there are no timers in the heap. 119 minWhenHeap atomic.Int64 120 121 // minWhenModified is a lower bound on the minimum 122 // heap[i].when over timers with the timerModified bit set. 123 // If minWhenModified = 0, it means there are no timerModified timers in the heap. 124 minWhenModified atomic.Int64 125 } 126 127 func (ts *timers) lock() { 128 lock(&ts.mu) 129 } 130 131 func (ts *timers) unlock() { 132 // Update atomic copy of len(ts.heap). 133 // We only update at unlock so that the len is always 134 // the most recent unlocked length, not an ephemeral length. 135 // This matters if we lock ts, delete the only timer from the heap, 136 // add it back, and unlock. We want ts.len.Load to return 1 the 137 // entire time, never 0. This is important for pidleput deciding 138 // whether ts is empty. 139 ts.len.Store(uint32(len(ts.heap))) 140 141 unlock(&ts.mu) 142 } 143 144 // Timer state field. 145 const ( 146 // timerHeaped is set when the timer is stored in some P's heap. 147 timerHeaped uint8 = 1 << iota 148 149 // timerModified is set when t.when has been modified but 150 // t.whenHeap still needs to be updated as well. 151 // The change to t.whenHeap waits until the heap in which 152 // the timer appears can be locked and rearranged. 153 // timerModified is only set when timerHeaped is also set. 154 timerModified 155 156 // timerZombie is set when the timer has been stopped 157 // but is still present in some P's heap. 158 // Only set when timerHeaped is also set. 159 // It is possible for timerModified and timerZombie to both 160 // be set, meaning that the timer was modified and then stopped. 161 // A timer sending to a channel may be placed in timerZombie 162 // to take it out of the heap even though the timer is not stopped, 163 // as long as nothing is reading from the channel. 164 timerZombie 165 ) 166 167 // timerDebug enables printing a textual debug trace of all timer operations to stderr. 168 const timerDebug = false 169 170 func (t *timer) trace(op string) { 171 if timerDebug { 172 t.trace1(op) 173 } 174 } 175 176 func (t *timer) trace1(op string) { 177 if !timerDebug { 178 return 179 } 180 bits := [4]string{"h", "m", "z", "c"} 181 for i := range 3 { 182 if t.state&(1<<i) == 0 { 183 bits[i] = "-" 184 } 185 } 186 if !t.isChan { 187 bits[3] = "-" 188 } 189 print("T ", t, " ", bits[0], bits[1], bits[2], bits[3], " b=", t.blocked, " ", op, "\n") 190 } 191 192 func (ts *timers) trace(op string) { 193 if timerDebug { 194 println("TS", ts, op) 195 } 196 } 197 198 // lock locks the timer, allowing reading or writing any of the timer fields. 199 func (t *timer) lock() { 200 lock(&t.mu) 201 t.trace("lock") 202 } 203 204 // unlock updates t.astate and unlocks the timer. 205 func (t *timer) unlock() { 206 t.trace("unlock") 207 // Let heap fast paths know whether t.whenHeap is accurate. 208 // Also let maybeRunChan know whether channel is in heap. 209 t.astate.Store(t.state) 210 unlock(&t.mu) 211 } 212 213 // hchan returns the channel in t.arg. 214 // t must be a timer with a channel. 215 func (t *timer) hchan() *hchan { 216 if !t.isChan { 217 badTimer() 218 } 219 // Note: t.arg is a chan time.Time, 220 // and runtime cannot refer to that type, 221 // so we cannot use a type assertion. 222 return (*hchan)(efaceOf(&t.arg).data) 223 } 224 225 // updateHeap updates t.whenHeap as directed by t.state, updating t.state 226 // and returning a bool indicating whether the state (and t.whenHeap) changed. 227 // The caller must hold t's lock, or the world can be stopped instead. 228 // If ts != nil, then ts must be locked, t must be ts.heap[0], and updateHeap 229 // takes care of moving t within the timers heap to preserve the heap invariants. 230 // If ts == nil, then t must not be in a heap (or is in a heap that is 231 // temporarily not maintaining its invariant, such as during timers.adjust). 232 func (t *timer) updateHeap(ts *timers) (updated bool) { 233 assertWorldStoppedOrLockHeld(&t.mu) 234 t.trace("updateHeap") 235 if ts != nil { 236 if t.ts != ts || t != ts.heap[0] { 237 badTimer() 238 } 239 assertLockHeld(&ts.mu) 240 } 241 if t.state&timerZombie != 0 { 242 // Take timer out of heap, applying final t.whenHeap update first. 243 t.state &^= timerHeaped | timerZombie 244 if t.state&timerModified != 0 { 245 t.state &^= timerModified 246 t.whenHeap = t.when 247 } 248 if ts != nil { 249 ts.zombies.Add(-1) 250 ts.deleteMin() 251 } 252 return true 253 } 254 255 if t.state&timerModified != 0 { 256 // Apply t.whenHeap update and move within heap. 257 t.state &^= timerModified 258 t.whenHeap = t.when 259 // Move t to the right position. 260 if ts != nil { 261 ts.siftDown(0) 262 ts.updateMinWhenHeap() 263 } 264 return true 265 } 266 267 return false 268 } 269 270 // maxWhen is the maximum value for timer's when field. 271 const maxWhen = 1<<63 - 1 272 273 // verifyTimers can be set to true to add debugging checks that the 274 // timer heaps are valid. 275 const verifyTimers = false 276 277 // Package time APIs. 278 // Godoc uses the comments in package time, not these. 279 280 // time.now is implemented in assembly. 281 282 // timeSleep puts the current goroutine to sleep for at least ns nanoseconds. 283 // 284 //go:linkname timeSleep time.Sleep 285 func timeSleep(ns int64) { 286 if ns <= 0 { 287 return 288 } 289 290 gp := getg() 291 t := gp.timer 292 if t == nil { 293 t = new(timer) 294 t.init(goroutineReady, gp) 295 gp.timer = t 296 } 297 when := nanotime() + ns 298 if when < 0 { // check for overflow. 299 when = maxWhen 300 } 301 gp.sleepWhen = when 302 gopark(resetForSleep, nil, waitReasonSleep, traceBlockSleep, 1) 303 } 304 305 // resetForSleep is called after the goroutine is parked for timeSleep. 306 // We can't call timer.reset in timeSleep itself because if this is a short 307 // sleep and there are many goroutines then the P can wind up running the 308 // timer function, goroutineReady, before the goroutine has been parked. 309 func resetForSleep(gp *g, _ unsafe.Pointer) bool { 310 gp.timer.reset(gp.sleepWhen, 0) 311 return true 312 } 313 314 // A timeTimer is a runtime-allocated time.Timer or time.Ticker 315 // with the additional runtime state following it. 316 // The runtime state is inaccessible to package time. 317 type timeTimer struct { 318 c unsafe.Pointer // <-chan time.Time 319 init bool 320 timer 321 } 322 323 // newTimer allocates and returns a new time.Timer or time.Ticker (same layout) 324 // with the given parameters. 325 // 326 //go:linkname newTimer time.newTimer 327 func newTimer(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, c *hchan) *timeTimer { 328 t := new(timeTimer) 329 t.timer.init(nil, nil) 330 t.trace("new") 331 if raceenabled { 332 racerelease(unsafe.Pointer(&t.timer)) 333 } 334 if c != nil { 335 lockInit(&t.sendLock, lockRankTimerSend) 336 t.isChan = true 337 c.timer = &t.timer 338 if c.dataqsiz == 0 { 339 throw("invalid timer channel: no capacity") 340 } 341 } 342 t.modify(when, period, f, arg, 0) 343 t.init = true 344 return t 345 } 346 347 // stopTimer stops a timer. 348 // It reports whether t was stopped before being run. 349 // 350 //go:linkname stopTimer time.stopTimer 351 func stopTimer(t *timeTimer) bool { 352 return t.stop() 353 } 354 355 // resetTimer resets an inactive timer, adding it to the timer heap. 356 // 357 // Reports whether the timer was modified before it was run. 358 // 359 //go:linkname resetTimer time.resetTimer 360 func resetTimer(t *timeTimer, when, period int64) bool { 361 if raceenabled { 362 racerelease(unsafe.Pointer(&t.timer)) 363 } 364 return t.reset(when, period) 365 } 366 367 // Go runtime. 368 369 // Ready the goroutine arg. 370 func goroutineReady(arg any, _ uintptr, _ int64) { 371 goready(arg.(*g), 0) 372 } 373 374 // addHeap adds t to the timers heap. 375 // The caller must hold ts.lock or the world must be stopped. 376 // The caller must also have checked that t belongs in the heap. 377 // Callers that are not sure can call t.maybeAdd instead, 378 // but note that maybeAdd has different locking requirements. 379 func (ts *timers) addHeap(t *timer) { 380 assertWorldStoppedOrLockHeld(&ts.mu) 381 // Timers rely on the network poller, so make sure the poller 382 // has started. 383 if netpollInited.Load() == 0 { 384 netpollGenericInit() 385 } 386 387 if t.ts != nil { 388 throw("ts set in timer") 389 } 390 t.ts = ts 391 t.whenHeap = t.when 392 ts.heap = append(ts.heap, t) 393 ts.siftUp(len(ts.heap) - 1) 394 if t == ts.heap[0] { 395 ts.updateMinWhenHeap() 396 } 397 } 398 399 // maybeRunAsync checks whether t needs to be triggered and runs it if so. 400 // The caller is responsible for locking the timer and for checking that we 401 // are running timers in async mode. If the timer needs to be run, 402 // maybeRunAsync will unlock and re-lock it. 403 // The timer is always locked on return. 404 func (t *timer) maybeRunAsync() { 405 assertLockHeld(&t.mu) 406 if t.state&timerHeaped == 0 && t.isChan && t.when > 0 { 407 // If timer should have triggered already (but nothing looked at it yet), 408 // trigger now, so that a receive after the stop sees the "old" value 409 // that should be there. 410 // (It is possible to have t.blocked > 0 if there is a racing receive 411 // in blockTimerChan, but timerHeaped not being set means 412 // it hasn't run t.maybeAdd yet; in that case, running the 413 // timer ourselves now is fine.) 414 if now := nanotime(); t.when <= now { 415 systemstack(func() { 416 t.unlockAndRun(now) // resets t.when 417 }) 418 t.lock() 419 } 420 } 421 } 422 423 // stop stops the timer t. It may be on some other P, so we can't 424 // actually remove it from the timers heap. We can only mark it as stopped. 425 // It will be removed in due course by the P whose heap it is on. 426 // Reports whether the timer was stopped before it was run. 427 func (t *timer) stop() bool { 428 async := debug.asynctimerchan.Load() != 0 429 if !async && t.isChan { 430 lock(&t.sendLock) 431 } 432 433 t.lock() 434 t.trace("stop") 435 if async { 436 t.maybeRunAsync() 437 } 438 if t.state&timerHeaped != 0 { 439 t.state |= timerModified 440 if t.state&timerZombie == 0 { 441 t.state |= timerZombie 442 t.ts.zombies.Add(1) 443 } 444 } 445 pending := t.when > 0 446 t.when = 0 447 448 if !async && t.isChan { 449 // Stop any future sends with stale values. 450 // See timer.unlockAndRun. 451 t.seq++ 452 } 453 t.unlock() 454 if !async && t.isChan { 455 unlock(&t.sendLock) 456 if timerchandrain(t.hchan()) { 457 pending = true 458 } 459 } 460 461 return pending 462 } 463 464 // deleteMin removes timer 0 from ts. 465 // ts must be locked. 466 func (ts *timers) deleteMin() { 467 assertLockHeld(&ts.mu) 468 t := ts.heap[0] 469 if t.ts != ts { 470 throw("wrong timers") 471 } 472 t.ts = nil 473 last := len(ts.heap) - 1 474 if last > 0 { 475 ts.heap[0] = ts.heap[last] 476 } 477 ts.heap[last] = nil 478 ts.heap = ts.heap[:last] 479 if last > 0 { 480 ts.siftDown(0) 481 } 482 ts.updateMinWhenHeap() 483 if last == 0 { 484 // If there are no timers, then clearly there are no timerModified timers. 485 ts.minWhenModified.Store(0) 486 } 487 } 488 489 // modify modifies an existing timer. 490 // This is called by the netpoll code or time.Ticker.Reset or time.Timer.Reset. 491 // Reports whether the timer was modified before it was run. 492 // If f == nil, then t.f, t.arg, and t.seq are not modified. 493 func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay int64), arg any, seq uintptr) bool { 494 if when <= 0 { 495 throw("timer when must be positive") 496 } 497 if period < 0 { 498 throw("timer period must be non-negative") 499 } 500 async := debug.asynctimerchan.Load() != 0 501 502 if !async && t.isChan { 503 lock(&t.sendLock) 504 } 505 506 t.lock() 507 if async { 508 t.maybeRunAsync() 509 } 510 t.trace("modify") 511 t.period = period 512 if f != nil { 513 t.f = f 514 t.arg = arg 515 t.seq = seq 516 } 517 518 wake := false 519 pending := t.when > 0 520 t.when = when 521 if t.state&timerHeaped != 0 { 522 t.state |= timerModified 523 if t.state&timerZombie != 0 { 524 // In the heap but marked for removal (by a Stop). 525 // Unmark it, since it has been Reset and will be running again. 526 t.ts.zombies.Add(-1) 527 t.state &^= timerZombie 528 } 529 // Cannot modify t.whenHeap until t.ts is locked. 530 // See comment in type timer above and in timers.adjust below. 531 if when < t.whenHeap { 532 wake = true 533 t.ts.updateMinWhenModified(when) 534 } 535 } 536 537 add := t.needsAdd() 538 539 if !async && t.isChan { 540 // Stop any future sends with stale values. 541 // See timer.unlockAndRun. 542 t.seq++ 543 } 544 t.unlock() 545 if !async && t.isChan { 546 if timerchandrain(t.hchan()) { 547 pending = true 548 } 549 unlock(&t.sendLock) 550 } 551 552 if add { 553 t.maybeAdd() 554 } 555 if wake { 556 wakeNetPoller(when) 557 } 558 559 return pending 560 } 561 562 // needsAdd reports whether t needs to be added to a timers heap. 563 // t must be locked. 564 func (t *timer) needsAdd() bool { 565 assertLockHeld(&t.mu) 566 need := t.state&timerHeaped == 0 && t.when > 0 && (!t.isChan || t.blocked > 0) 567 if need { 568 t.trace("needsAdd+") 569 } else { 570 t.trace("needsAdd-") 571 } 572 return need 573 } 574 575 // maybeAdd adds t to the local timers heap if it needs to be in a heap. 576 // The caller must not hold t's lock nor any timers heap lock. 577 // The caller probably just unlocked t, but that lock must be dropped 578 // in order to acquire a ts.lock, to avoid lock inversions. 579 // (timers.adjust holds ts.lock while acquiring each t's lock, 580 // so we cannot hold any t's lock while acquiring ts.lock). 581 // 582 // Strictly speaking it *might* be okay to hold t.lock and 583 // acquire ts.lock at the same time, because we know that 584 // t is not in any ts.heap, so nothing holding a ts.lock would 585 // be acquiring the t.lock at the same time, meaning there 586 // isn't a possible deadlock. But it is easier and safer not to be 587 // too clever and respect the static ordering. 588 // (If we don't, we have to change the static lock checking of t and ts.) 589 // 590 // Concurrent calls to time.Timer.Reset or blockTimerChan 591 // may result in concurrent calls to t.maybeAdd, 592 // so we cannot assume that t is not in a heap on entry to t.maybeAdd. 593 func (t *timer) maybeAdd() { 594 // Note: Not holding any locks on entry to t.maybeAdd, 595 // so the current g can be rescheduled to a different M and P 596 // at any time, including between the ts := assignment and the 597 // call to ts.lock. If a reschedule happened then, we would be 598 // adding t to some other P's timers, perhaps even a P that the scheduler 599 // has marked as idle with no timers, in which case the timer could 600 // go unnoticed until long after t.when. 601 // Calling acquirem instead of using getg().m makes sure that 602 // we end up locking and inserting into the current P's timers. 603 mp := acquirem() 604 ts := &mp.p.ptr().timers 605 ts.lock() 606 ts.cleanHead() 607 t.lock() 608 t.trace("maybeAdd") 609 when := int64(0) 610 if t.needsAdd() { 611 t.state |= timerHeaped 612 when = t.when 613 ts.addHeap(t) 614 } 615 t.unlock() 616 ts.unlock() 617 releasem(mp) 618 if when > 0 { 619 wakeNetPoller(when) 620 } 621 } 622 623 // reset resets the time when a timer should fire. 624 // If used for an inactive timer, the timer will become active. 625 // Reports whether the timer was active and was stopped. 626 func (t *timer) reset(when, period int64) bool { 627 return t.modify(when, period, nil, nil, 0) 628 } 629 630 // cleanHead cleans up the head of the timer queue. This speeds up 631 // programs that create and delete timers; leaving them in the heap 632 // slows down heap operations. 633 // The caller must have locked ts. 634 func (ts *timers) cleanHead() { 635 ts.trace("cleanHead") 636 assertLockHeld(&ts.mu) 637 gp := getg() 638 for { 639 if len(ts.heap) == 0 { 640 return 641 } 642 643 // This loop can theoretically run for a while, and because 644 // it is holding timersLock it cannot be preempted. 645 // If someone is trying to preempt us, just return. 646 // We can clean the timers later. 647 if gp.preemptStop { 648 return 649 } 650 651 t := ts.heap[0] 652 if t.ts != ts { 653 throw("bad ts") 654 } 655 656 if t.astate.Load()&(timerModified|timerZombie) == 0 { 657 // Fast path: head of timers does not need adjustment. 658 return 659 } 660 661 t.lock() 662 updated := t.updateHeap(ts) 663 t.unlock() 664 if !updated { 665 // Head of timers does not need adjustment. 666 return 667 } 668 } 669 } 670 671 // take moves any timers from src into ts 672 // and then clears the timer state from src, 673 // because src is being destroyed. 674 // The caller must not have locked either timers. 675 // For now this is only called when the world is stopped. 676 func (ts *timers) take(src *timers) { 677 ts.trace("take") 678 assertWorldStopped() 679 if len(src.heap) > 0 { 680 // The world is stopped, so we ignore the locking of ts and src here. 681 // That would introduce a sched < timers lock ordering, 682 // which we'd rather avoid in the static ranking. 683 ts.move(src.heap) 684 src.heap = nil 685 src.zombies.Store(0) 686 src.minWhenHeap.Store(0) 687 src.minWhenModified.Store(0) 688 src.len.Store(0) 689 ts.len.Store(uint32(len(ts.heap))) 690 } 691 } 692 693 // moveTimers moves a slice of timers to pp. The slice has been taken 694 // from a different P. 695 // The world must be stopped so that ts is safe to modify. 696 func (ts *timers) move(timers []*timer) { 697 assertWorldStopped() 698 for _, t := range timers { 699 t.ts = nil 700 t.updateHeap(nil) 701 if t.state&timerHeaped != 0 { 702 ts.addHeap(t) 703 } 704 } 705 } 706 707 // adjust looks through the timers in ts.heap for 708 // any timers that have been modified to run earlier, and puts them in 709 // the correct place in the heap. While looking for those timers, 710 // it also moves timers that have been modified to run later, 711 // and removes deleted timers. The caller must have locked ts. 712 func (ts *timers) adjust(now int64, force bool) { 713 ts.trace("adjust") 714 assertLockHeld(&ts.mu) 715 // If we haven't yet reached the time of the earliest modified 716 // timer, don't do anything. This speeds up programs that adjust 717 // a lot of timers back and forth if the timers rarely expire. 718 // We'll postpone looking through all the adjusted timers until 719 // one would actually expire. 720 if !force { 721 first := ts.minWhenModified.Load() 722 if first == 0 || first > now { 723 if verifyTimers { 724 ts.verify() 725 } 726 return 727 } 728 } 729 730 // minWhenModified is a lower bound on the earliest t.when 731 // among the timerModified timers. We want to make it more precise: 732 // we are going to scan the heap and clean out all the timerModified bits, 733 // at which point minWhenModified can be set to 0 (indicating none at all). 734 // 735 // Other P's can be calling ts.wakeTime concurrently, and we'd like to 736 // keep ts.wakeTime returning an accurate value throughout this entire process. 737 // 738 // Setting minWhenModified = 0 *before* the scan could make wakeTime 739 // return an incorrect value: if minWhenModified < minWhenHeap, then clearing 740 // it to 0 will make wakeTime return minWhenHeap (too late) until the scan finishes. 741 // To avoid that, we want to set minWhenModified to 0 *after* the scan. 742 // 743 // Setting minWhenModified = 0 *after* the scan could result in missing 744 // concurrent timer modifications in other goroutines; those will lock 745 // the specific timer, set the timerModified bit, and set t.when. 746 // To avoid that, we want to set minWhenModified to 0 *before* the scan. 747 // 748 // The way out of this dilemma is to preserve wakeTime a different way. 749 // wakeTime is min(minWhenHeap, minWhenModified), and minWhenHeap 750 // is protected by ts.lock, which we hold, so we can modify it however we like 751 // in service of keeping wakeTime accurate. 752 // 753 // So we can: 754 // 755 // 1. Set minWhenHeap = min(minWhenHeap, minWhenModified) 756 // 2. Set minWhenModified = 0 757 // (Other goroutines may modify timers and update minWhenModified now.) 758 // 3. Scan timers 759 // 4. Set minWhenHeap = heap[0].whenHeap 760 // 761 // That order preserves a correct value of wakeTime throughout the entire 762 // operation: 763 // Step 1 “locks in” an accurate wakeTime even with minWhenModified cleared. 764 // Step 2 makes sure concurrent t.when updates are not lost during the scan. 765 // Step 3 processes all modified timer values, justifying minWhenModified = 0. 766 // Step 4 corrects minWhenHeap to a precise value. 767 // 768 // The wakeTime method implementation reads minWhenModified *before* minWhenHeap, 769 // so that if the minWhenModified is observed to be 0, that means the minWhenHeap that 770 // follows will include the information that was zeroed out of it. 771 ts.minWhenHeap.Store(ts.wakeTime()) 772 ts.minWhenModified.Store(0) 773 774 changed := false 775 for i := 0; i < len(ts.heap); i++ { 776 t := ts.heap[i] 777 if t.ts != ts { 778 throw("bad ts") 779 } 780 781 if t.astate.Load()&(timerModified|timerZombie) == 0 { 782 // Does not need adjustment. 783 continue 784 } 785 786 t.lock() 787 if t.state&timerHeaped == 0 { 788 badTimer() 789 } 790 if t.state&timerZombie != 0 { 791 ts.zombies.Add(-1) // updateHeap will return updated=true and we will delete t 792 } 793 if t.updateHeap(nil) { 794 changed = true 795 if t.state&timerHeaped == 0 { 796 n := len(ts.heap) 797 ts.heap[i] = ts.heap[n-1] 798 ts.heap[n-1] = nil 799 ts.heap = ts.heap[:n-1] 800 t.ts = nil 801 i-- 802 } 803 } 804 t.unlock() 805 } 806 807 if changed { 808 ts.initHeap() 809 } 810 ts.updateMinWhenHeap() 811 812 if verifyTimers { 813 ts.verify() 814 } 815 } 816 817 // wakeTime looks at ts's timers and returns the time when we 818 // should wake up the netpoller. It returns 0 if there are no timers. 819 // This function is invoked when dropping a P, so it must run without 820 // any write barriers. 821 // 822 //go:nowritebarrierrec 823 func (ts *timers) wakeTime() int64 { 824 // Note that the order of these two loads matters: 825 // adjust updates minWhen to make it safe to clear minNextWhen. 826 // We read minWhen after reading minNextWhen so that 827 // if we see a cleared minNextWhen, we are guaranteed to see 828 // the updated minWhen. 829 nextWhen := ts.minWhenModified.Load() 830 when := ts.minWhenHeap.Load() 831 if when == 0 || (nextWhen != 0 && nextWhen < when) { 832 when = nextWhen 833 } 834 return when 835 } 836 837 // check runs any timers in ts that are ready. 838 // If now is not 0 it is the current time. 839 // It returns the passed time or the current time if now was passed as 0. 840 // and the time when the next timer should run or 0 if there is no next timer, 841 // and reports whether it ran any timers. 842 // If the time when the next timer should run is not 0, 843 // it is always larger than the returned time. 844 // We pass now in and out to avoid extra calls of nanotime. 845 // 846 //go:yeswritebarrierrec 847 func (ts *timers) check(now int64) (rnow, pollUntil int64, ran bool) { 848 ts.trace("check") 849 // If it's not yet time for the first timer, or the first adjusted 850 // timer, then there is nothing to do. 851 next := ts.wakeTime() 852 if next == 0 { 853 // No timers to run or adjust. 854 return now, 0, false 855 } 856 857 if now == 0 { 858 now = nanotime() 859 } 860 861 // If this is the local P, and there are a lot of deleted timers, 862 // clear them out. We only do this for the local P to reduce 863 // lock contention on timersLock. 864 zombies := ts.zombies.Load() 865 if zombies < 0 { 866 badTimer() 867 } 868 force := ts == &getg().m.p.ptr().timers && int(zombies) > int(ts.len.Load())/4 869 870 if now < next && !force { 871 // Next timer is not ready to run, and we don't need to clear deleted timers. 872 return now, next, false 873 } 874 875 ts.lock() 876 if len(ts.heap) > 0 { 877 ts.adjust(now, force) 878 for len(ts.heap) > 0 { 879 // Note that runtimer may temporarily unlock ts. 880 if tw := ts.run(now); tw != 0 { 881 if tw > 0 { 882 pollUntil = tw 883 } 884 break 885 } 886 ran = true 887 } 888 } 889 ts.unlock() 890 891 return now, pollUntil, ran 892 } 893 894 // run examines the first timer in ts. If it is ready based on now, 895 // it runs the timer and removes or updates it. 896 // Returns 0 if it ran a timer, -1 if there are no more timers, or the time 897 // when the first timer should run. 898 // The caller must have locked ts. 899 // If a timer is run, this will temporarily unlock ts. 900 // 901 //go:systemstack 902 func (ts *timers) run(now int64) int64 { 903 ts.trace("run") 904 assertLockHeld(&ts.mu) 905 Redo: 906 if len(ts.heap) == 0 { 907 return -1 908 } 909 t := ts.heap[0] 910 if t.ts != ts { 911 throw("bad ts") 912 } 913 914 if t.astate.Load()&(timerModified|timerZombie) == 0 && t.whenHeap > now { 915 // Fast path: not ready to run. 916 // The access of t.whenHeap is protected by the caller holding 917 // ts.lock, even though t itself is unlocked. 918 return t.whenHeap 919 } 920 921 t.lock() 922 if t.updateHeap(ts) { 923 t.unlock() 924 goto Redo 925 } 926 927 if t.state&timerHeaped == 0 || t.state&timerModified != 0 { 928 badTimer() 929 } 930 931 if t.when > now { 932 // Not ready to run. 933 t.unlock() 934 return t.when 935 } 936 937 t.unlockAndRun(now) 938 assertLockHeld(&ts.mu) // t is unlocked now, but not ts 939 return 0 940 } 941 942 // unlockAndRun unlocks and runs the timer t (which must be locked). 943 // If t is in a timer set (t.ts != nil), the caller must also have locked the timer set, 944 // and this call will temporarily unlock the timer set while running the timer function. 945 // unlockAndRun returns with t unlocked and t.ts (re-)locked. 946 // 947 //go:systemstack 948 func (t *timer) unlockAndRun(now int64) { 949 t.trace("unlockAndRun") 950 assertLockHeld(&t.mu) 951 if t.ts != nil { 952 assertLockHeld(&t.ts.mu) 953 } 954 if raceenabled { 955 // Note that we are running on a system stack, 956 // so there is no chance of getg().m being reassigned 957 // out from under us while this function executes. 958 tsLocal := &getg().m.p.ptr().timers 959 if tsLocal.raceCtx == 0 { 960 tsLocal.raceCtx = racegostart(abi.FuncPCABIInternal((*timers).run) + sys.PCQuantum) 961 } 962 raceacquirectx(tsLocal.raceCtx, unsafe.Pointer(t)) 963 } 964 965 if t.state&(timerModified|timerZombie) != 0 { 966 badTimer() 967 } 968 969 f := t.f 970 arg := t.arg 971 seq := t.seq 972 var next int64 973 delay := now - t.when 974 if t.period > 0 { 975 // Leave in heap but adjust next time to fire. 976 next = t.when + t.period*(1+delay/t.period) 977 if next < 0 { // check for overflow. 978 next = maxWhen 979 } 980 } else { 981 next = 0 982 } 983 if t.state&timerHeaped != 0 { 984 t.when = next 985 t.state |= timerModified 986 if next == 0 { 987 t.state |= timerZombie 988 t.ts.zombies.Add(1) 989 } 990 } else { 991 t.when = next 992 } 993 ts := t.ts 994 t.updateHeap(ts) 995 t.unlock() 996 997 if raceenabled { 998 // Temporarily use the current P's racectx for g0. 999 gp := getg() 1000 if gp.racectx != 0 { 1001 throw("unexpected racectx") 1002 } 1003 gp.racectx = gp.m.p.ptr().timers.raceCtx 1004 } 1005 1006 if ts != nil { 1007 ts.unlock() 1008 } 1009 1010 async := debug.asynctimerchan.Load() != 0 1011 if !async && t.isChan { 1012 // For a timer channel, we want to make sure that no stale sends 1013 // happen after a t.stop or t.modify, but we cannot hold t.mu 1014 // during the actual send (which f does) due to lock ordering. 1015 // It can happen that we are holding t's lock above, we decide 1016 // it's time to send a time value (by calling f), grab the parameters, 1017 // unlock above, and then a t.stop or t.modify changes the timer 1018 // and returns. At that point, the send needs not to happen after all. 1019 // The way we arrange for it not to happen is that t.stop and t.modify 1020 // both increment t.seq while holding both t.mu and t.sendLock. 1021 // We copied the seq value above while holding t.mu. 1022 // Now we can acquire t.sendLock (which will be held across the send) 1023 // and double-check that t.seq is still the seq value we saw above. 1024 // If not, the timer has been updated and we should skip the send. 1025 // We skip the send by reassigning f to a no-op function. 1026 lock(&t.sendLock) 1027 if t.seq != seq { 1028 f = func(any, uintptr, int64) {} 1029 } 1030 } 1031 1032 f(arg, seq, delay) 1033 1034 if !async && t.isChan { 1035 unlock(&t.sendLock) 1036 } 1037 1038 if ts != nil { 1039 ts.lock() 1040 } 1041 1042 if raceenabled { 1043 gp := getg() 1044 gp.racectx = 0 1045 } 1046 } 1047 1048 // verifyTimerHeap verifies that the timers is in a valid state. 1049 // This is only for debugging, and is only called if verifyTimers is true. 1050 // The caller must have locked ts. 1051 func (ts *timers) verify() { 1052 assertLockHeld(&ts.mu) 1053 for i, t := range ts.heap { 1054 if i == 0 { 1055 // First timer has no parent. 1056 continue 1057 } 1058 1059 // The heap is 4-ary. See siftupTimer and siftdownTimer. 1060 p := (i - 1) / 4 1061 if t.whenHeap < ts.heap[p].whenHeap { 1062 print("bad timer heap at ", i, ": ", p, ": ", ts.heap[p].whenHeap, ", ", i, ": ", t.whenHeap, "\n") 1063 throw("bad timer heap") 1064 } 1065 } 1066 if n := int(ts.len.Load()); len(ts.heap) != n { 1067 println("timer heap len", len(ts.heap), "!= atomic len", n) 1068 throw("bad timer heap len") 1069 } 1070 } 1071 1072 // updateMinWhenHeap sets ts.minWhenHeap to ts.heap[0].whenHeap. 1073 // The caller must have locked ts or the world must be stopped. 1074 func (ts *timers) updateMinWhenHeap() { 1075 assertWorldStoppedOrLockHeld(&ts.mu) 1076 if len(ts.heap) == 0 { 1077 ts.minWhenHeap.Store(0) 1078 } else { 1079 ts.minWhenHeap.Store(ts.heap[0].whenHeap) 1080 } 1081 } 1082 1083 // updateMinWhenModified updates ts.minWhenModified to be <= when. 1084 // ts need not be (and usually is not) locked. 1085 func (ts *timers) updateMinWhenModified(when int64) { 1086 for { 1087 old := ts.minWhenModified.Load() 1088 if old != 0 && old < when { 1089 return 1090 } 1091 if ts.minWhenModified.CompareAndSwap(old, when) { 1092 return 1093 } 1094 } 1095 } 1096 1097 // timeSleepUntil returns the time when the next timer should fire. Returns 1098 // maxWhen if there are no timers. 1099 // This is only called by sysmon and checkdead. 1100 func timeSleepUntil() int64 { 1101 next := int64(maxWhen) 1102 1103 // Prevent allp slice changes. This is like retake. 1104 lock(&allpLock) 1105 for _, pp := range allp { 1106 if pp == nil { 1107 // This can happen if procresize has grown 1108 // allp but not yet created new Ps. 1109 continue 1110 } 1111 1112 if w := pp.timers.wakeTime(); w != 0 { 1113 next = min(next, w) 1114 } 1115 } 1116 unlock(&allpLock) 1117 1118 return next 1119 } 1120 1121 // Heap maintenance algorithms. 1122 // These algorithms check for slice index errors manually. 1123 // Slice index error can happen if the program is using racy 1124 // access to timers. We don't want to panic here, because 1125 // it will cause the program to crash with a mysterious 1126 // "panic holding locks" message. Instead, we panic while not 1127 // holding a lock. 1128 1129 // siftUp puts the timer at position i in the right place 1130 // in the heap by moving it up toward the top of the heap. 1131 func (ts *timers) siftUp(i int) { 1132 t := ts.heap 1133 if i >= len(t) { 1134 badTimer() 1135 } 1136 when := t[i].whenHeap 1137 if when <= 0 { 1138 badTimer() 1139 } 1140 tmp := t[i] 1141 for i > 0 { 1142 p := (i - 1) / 4 // parent 1143 if when >= t[p].whenHeap { 1144 break 1145 } 1146 t[i] = t[p] 1147 i = p 1148 } 1149 if tmp != t[i] { 1150 t[i] = tmp 1151 } 1152 } 1153 1154 // siftDown puts the timer at position i in the right place 1155 // in the heap by moving it down toward the bottom of the heap. 1156 func (ts *timers) siftDown(i int) { 1157 t := ts.heap 1158 n := len(t) 1159 if i >= n { 1160 badTimer() 1161 } 1162 when := t[i].whenHeap 1163 if when <= 0 { 1164 badTimer() 1165 } 1166 tmp := t[i] 1167 for { 1168 c := i*4 + 1 // left child 1169 c3 := c + 2 // mid child 1170 if c >= n { 1171 break 1172 } 1173 w := t[c].whenHeap 1174 if c+1 < n && t[c+1].whenHeap < w { 1175 w = t[c+1].whenHeap 1176 c++ 1177 } 1178 if c3 < n { 1179 w3 := t[c3].whenHeap 1180 if c3+1 < n && t[c3+1].whenHeap < w3 { 1181 w3 = t[c3+1].whenHeap 1182 c3++ 1183 } 1184 if w3 < w { 1185 w = w3 1186 c = c3 1187 } 1188 } 1189 if w >= when { 1190 break 1191 } 1192 t[i] = t[c] 1193 i = c 1194 } 1195 if tmp != t[i] { 1196 t[i] = tmp 1197 } 1198 } 1199 1200 // initHeap reestablishes the heap order in the slice ts.heap. 1201 // It takes O(n) time for n=len(ts.heap), not the O(n log n) of n repeated add operations. 1202 func (ts *timers) initHeap() { 1203 // Last possible element that needs sifting down is parent of last element; 1204 // last element is len(t)-1; parent of last element is (len(t)-1-1)/4. 1205 if len(ts.heap) <= 1 { 1206 return 1207 } 1208 for i := (len(ts.heap) - 1 - 1) / 4; i >= 0; i-- { 1209 ts.siftDown(i) 1210 } 1211 } 1212 1213 // badTimer is called if the timer data structures have been corrupted, 1214 // presumably due to racy use by the program. We panic here rather than 1215 // panicking due to invalid slice access while holding locks. 1216 // See issue #25686. 1217 func badTimer() { 1218 throw("timer data corruption") 1219 } 1220 1221 // Timer channels. 1222 1223 // maybeRunChan checks whether the timer needs to run 1224 // to send a value to its associated channel. If so, it does. 1225 // The timer must not be locked. 1226 func (t *timer) maybeRunChan() { 1227 if t.astate.Load()&timerHeaped != 0 { 1228 // If the timer is in the heap, the ordinary timer code 1229 // is in charge of sending when appropriate. 1230 return 1231 } 1232 1233 t.lock() 1234 now := nanotime() 1235 if t.state&timerHeaped != 0 || t.when == 0 || t.when > now { 1236 t.trace("maybeRunChan-") 1237 // Timer in the heap, or not running at all, or not triggered. 1238 t.unlock() 1239 return 1240 } 1241 t.trace("maybeRunChan+") 1242 systemstack(func() { 1243 t.unlockAndRun(now) 1244 }) 1245 } 1246 1247 // blockTimerChan is called when a channel op has decided to block on c. 1248 // The caller holds the channel lock for c and possibly other channels. 1249 // blockTimerChan makes sure that c is in a timer heap, 1250 // adding it if needed. 1251 func blockTimerChan(c *hchan) { 1252 t := c.timer 1253 t.lock() 1254 t.trace("blockTimerChan") 1255 if !t.isChan { 1256 badTimer() 1257 } 1258 1259 t.blocked++ 1260 1261 // If this is the first enqueue after a recent dequeue, 1262 // the timer may still be in the heap but marked as a zombie. 1263 // Unmark it in this case, if the timer is still pending. 1264 if t.state&timerHeaped != 0 && t.state&timerZombie != 0 && t.when > 0 { 1265 t.state &^= timerZombie 1266 t.ts.zombies.Add(-1) 1267 } 1268 1269 // t.maybeAdd must be called with t unlocked, 1270 // because it needs to lock t.ts before t. 1271 // Then it will do nothing if t.needsAdd(state) is false. 1272 // Check that now before the unlock, 1273 // avoiding the extra lock-lock-unlock-unlock 1274 // inside maybeAdd when t does not need to be added. 1275 add := t.needsAdd() 1276 t.unlock() 1277 if add { 1278 t.maybeAdd() 1279 } 1280 } 1281 1282 // unblockTimerChan is called when a channel op that was blocked on c 1283 // is no longer blocked. Every call to blockTimerChan must be paired with 1284 // a call to unblockTimerChan. 1285 // The caller holds the channel lock for c and possibly other channels. 1286 // unblockTimerChan removes c from the timer heap when nothing is 1287 // blocked on it anymore. 1288 func unblockTimerChan(c *hchan) { 1289 t := c.timer 1290 t.lock() 1291 t.trace("unblockTimerChan") 1292 if !t.isChan || t.blocked == 0 { 1293 badTimer() 1294 } 1295 t.blocked-- 1296 if t.blocked == 0 && t.state&timerHeaped != 0 && t.state&timerZombie == 0 { 1297 // Last goroutine that was blocked on this timer. 1298 // Mark for removal from heap but do not clear t.when, 1299 // so that we know what time it is still meant to trigger. 1300 t.state |= timerZombie 1301 t.ts.zombies.Add(1) 1302 } 1303 t.unlock() 1304 }