github.com/ltltlt/go-source-code@v0.0.0-20190830023027-95be009773aa/runtime/time.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Time-related runtime and pieces of package time. 6 7 package runtime 8 9 import ( 10 "runtime/internal/sys" 11 "unsafe" 12 ) 13 14 // Package time knows the layout of this structure. 15 // If this struct changes, adjust ../time/sleep.go:/runtimeTimer. 16 // For GOOS=nacl, package syscall knows the layout of this structure. 17 // If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer. 18 type timer struct { 19 tb *timersBucket // the bucket the timer lives in 20 i int // heap index 21 22 // Timer wakes up at when, and then at when+period, ... (period > 0 only) 23 // each time calling f(arg, now) in the timer goroutine, so f must be 24 // a well-behaved function and not block. 25 when int64 26 period int64 27 f func(interface{}, uintptr) 28 arg interface{} 29 seq uintptr 30 } 31 32 // timersLen is the length of timers array. 33 // 34 // Ideally, this would be set to GOMAXPROCS, but that would require 35 // dynamic reallocation 36 // 37 // The current value is a compromise between memory usage and performance 38 // that should cover the majority of GOMAXPROCS values used in the wild. 39 const timersLen = 64 40 41 // timers contains "per-P" timer heaps. 42 // 43 // Timers are queued into timersBucket associated with the current P, 44 // so each P may work with its own timers independently of other P instances. 45 // 46 // Each timersBucket may be associated with multiple P 47 // if GOMAXPROCS > timersLen. 48 var timers [timersLen]struct { 49 timersBucket 50 51 // The padding should eliminate false sharing 52 // between timersBucket values. 53 pad [sys.CacheLineSize - unsafe.Sizeof(timersBucket{})%sys.CacheLineSize]byte 54 } 55 56 func (t *timer) assignBucket() *timersBucket { 57 id := uint8(getg().m.p.ptr().id) % timersLen 58 t.tb = &timers[id].timersBucket 59 return t.tb 60 } 61 62 //go:notinheap 63 type timersBucket struct { 64 lock mutex 65 gp *g 66 created bool 67 sleeping bool 68 rescheduling bool 69 sleepUntil int64 70 waitnote note 71 t []*timer 72 } 73 74 // nacl fake time support - time in nanoseconds since 1970 75 var faketime int64 76 77 // Package time APIs. 78 // Godoc uses the comments in package time, not these. 79 80 // time.now is implemented in assembly. 81 82 // timeSleep puts the current goroutine to sleep for at least ns nanoseconds. 83 //go:linkname timeSleep time.Sleep 84 func timeSleep(ns int64) { 85 if ns <= 0 { 86 return 87 } 88 89 gp := getg() 90 t := gp.timer 91 if t == nil { 92 t = new(timer) 93 gp.timer = t 94 } 95 *t = timer{} 96 t.when = nanotime() + ns 97 t.f = goroutineReady 98 t.arg = gp 99 tb := t.assignBucket() 100 lock(&tb.lock) 101 tb.addtimerLocked(t) 102 goparkunlock(&tb.lock, "sleep", traceEvGoSleep, 2) 103 } 104 105 // startTimer adds t to the timer heap. 106 //go:linkname startTimer time.startTimer 107 func startTimer(t *timer) { 108 if raceenabled { 109 racerelease(unsafe.Pointer(t)) 110 } 111 addtimer(t) 112 } 113 114 // stopTimer removes t from the timer heap if it is there. 115 // It returns true if t was removed, false if t wasn't even there. 116 //go:linkname stopTimer time.stopTimer 117 func stopTimer(t *timer) bool { 118 return deltimer(t) 119 } 120 121 // Go runtime. 122 123 // Ready the goroutine arg. 124 func goroutineReady(arg interface{}, seq uintptr) { 125 goready(arg.(*g), 0) 126 } 127 128 func addtimer(t *timer) { 129 tb := t.assignBucket() 130 lock(&tb.lock) 131 tb.addtimerLocked(t) 132 unlock(&tb.lock) 133 } 134 135 // Add a timer to the heap and start or kick timerproc if the new timer is 136 // earlier than any of the others. 137 // Timers are locked. 138 func (tb *timersBucket) addtimerLocked(t *timer) { 139 // when must never be negative; otherwise timerproc will overflow 140 // during its delta calculation and never expire other runtime timers. 141 if t.when < 0 { 142 t.when = 1<<63 - 1 143 } 144 t.i = len(tb.t) 145 tb.t = append(tb.t, t) 146 siftupTimer(tb.t, t.i) 147 if t.i == 0 { 148 // siftup moved to top: new earliest deadline. 149 if tb.sleeping { 150 tb.sleeping = false 151 notewakeup(&tb.waitnote) 152 } 153 if tb.rescheduling { 154 tb.rescheduling = false 155 goready(tb.gp, 0) 156 } 157 } 158 if !tb.created { 159 tb.created = true 160 go timerproc(tb) 161 } 162 } 163 164 // Delete timer t from the heap. 165 // Do not need to update the timerproc: if it wakes up early, no big deal. 166 func deltimer(t *timer) bool { 167 if t.tb == nil { 168 // t.tb can be nil if the user created a timer 169 // directly, without invoking startTimer e.g 170 // time.Ticker{C: c} 171 // In this case, return early without any deletion. 172 // See Issue 21874. 173 return false 174 } 175 176 tb := t.tb 177 178 lock(&tb.lock) 179 // t may not be registered anymore and may have 180 // a bogus i (typically 0, if generated by Go). 181 // Verify it before proceeding. 182 i := t.i 183 last := len(tb.t) - 1 184 if i < 0 || i > last || tb.t[i] != t { 185 unlock(&tb.lock) 186 return false 187 } 188 if i != last { 189 tb.t[i] = tb.t[last] 190 tb.t[i].i = i 191 } 192 tb.t[last] = nil 193 tb.t = tb.t[:last] 194 if i != last { 195 siftupTimer(tb.t, i) 196 siftdownTimer(tb.t, i) 197 } 198 unlock(&tb.lock) 199 return true 200 } 201 202 // Timerproc runs the time-driven events. 203 // It sleeps until the next event in the tb heap. 204 // If addtimer inserts a new earlier event, it wakes timerproc early. 205 func timerproc(tb *timersBucket) { 206 tb.gp = getg() 207 for { 208 lock(&tb.lock) 209 tb.sleeping = false 210 now := nanotime() 211 delta := int64(-1) 212 for { 213 if len(tb.t) == 0 { 214 delta = -1 215 break 216 } 217 t := tb.t[0] 218 delta = t.when - now 219 if delta > 0 { 220 break 221 } 222 if t.period > 0 { 223 // leave in heap but adjust next time to fire 224 t.when += t.period * (1 + -delta/t.period) 225 siftdownTimer(tb.t, 0) 226 } else { 227 // remove from heap 228 last := len(tb.t) - 1 229 if last > 0 { 230 tb.t[0] = tb.t[last] 231 tb.t[0].i = 0 232 } 233 tb.t[last] = nil 234 tb.t = tb.t[:last] 235 if last > 0 { 236 siftdownTimer(tb.t, 0) 237 } 238 t.i = -1 // mark as removed 239 } 240 f := t.f 241 arg := t.arg 242 seq := t.seq 243 unlock(&tb.lock) 244 if raceenabled { 245 raceacquire(unsafe.Pointer(t)) 246 } 247 f(arg, seq) 248 lock(&tb.lock) 249 } 250 if delta < 0 || faketime > 0 { 251 // No timers left - put goroutine to sleep. 252 tb.rescheduling = true 253 goparkunlock(&tb.lock, "timer goroutine (idle)", traceEvGoBlock, 1) 254 continue 255 } 256 // At least one timer pending. Sleep until then. 257 tb.sleeping = true 258 tb.sleepUntil = now + delta 259 noteclear(&tb.waitnote) 260 unlock(&tb.lock) 261 notetsleepg(&tb.waitnote, delta) 262 } 263 } 264 265 func timejump() *g { 266 if faketime == 0 { 267 return nil 268 } 269 270 for i := range timers { 271 lock(&timers[i].lock) 272 } 273 gp := timejumpLocked() 274 for i := range timers { 275 unlock(&timers[i].lock) 276 } 277 278 return gp 279 } 280 281 func timejumpLocked() *g { 282 // Determine a timer bucket with minimum when. 283 var minT *timer 284 for i := range timers { 285 tb := &timers[i] 286 if !tb.created || len(tb.t) == 0 { 287 continue 288 } 289 t := tb.t[0] 290 if minT == nil || t.when < minT.when { 291 minT = t 292 } 293 } 294 if minT == nil || minT.when <= faketime { 295 return nil 296 } 297 298 faketime = minT.when 299 tb := minT.tb 300 if !tb.rescheduling { 301 return nil 302 } 303 tb.rescheduling = false 304 return tb.gp 305 } 306 307 func timeSleepUntil() int64 { 308 next := int64(1<<63 - 1) 309 310 // Determine minimum sleepUntil across all the timer buckets. 311 // 312 // The function can not return a precise answer, 313 // as another timer may pop in as soon as timers have been unlocked. 314 // So lock the timers one by one instead of all at once. 315 for i := range timers { 316 tb := &timers[i] 317 318 lock(&tb.lock) 319 if tb.sleeping && tb.sleepUntil < next { 320 next = tb.sleepUntil 321 } 322 unlock(&tb.lock) 323 } 324 325 return next 326 } 327 328 // Heap maintenance algorithms. 329 330 func siftupTimer(t []*timer, i int) { 331 when := t[i].when 332 tmp := t[i] 333 for i > 0 { 334 p := (i - 1) / 4 // parent 335 if when >= t[p].when { 336 break 337 } 338 t[i] = t[p] 339 t[i].i = i 340 i = p 341 } 342 if tmp != t[i] { 343 t[i] = tmp 344 t[i].i = i 345 } 346 } 347 348 func siftdownTimer(t []*timer, i int) { 349 n := len(t) 350 when := t[i].when 351 tmp := t[i] 352 for { 353 c := i*4 + 1 // left child 354 c3 := c + 2 // mid child 355 if c >= n { 356 break 357 } 358 w := t[c].when 359 if c+1 < n && t[c+1].when < w { 360 w = t[c+1].when 361 c++ 362 } 363 if c3 < n { 364 w3 := t[c3].when 365 if c3+1 < n && t[c3+1].when < w3 { 366 w3 = t[c3+1].when 367 c3++ 368 } 369 if w3 < w { 370 w = w3 371 c = c3 372 } 373 } 374 if w >= when { 375 break 376 } 377 t[i] = t[c] 378 t[i].i = i 379 i = c 380 } 381 if tmp != t[i] { 382 t[i] = tmp 383 t[i].i = i 384 } 385 } 386 387 // Entry points for net, time to call nanotime. 388 389 //go:linkname poll_runtimeNano internal/poll.runtimeNano 390 func poll_runtimeNano() int64 { 391 return nanotime() 392 } 393 394 //go:linkname time_runtimeNano time.runtimeNano 395 func time_runtimeNano() int64 { 396 return nanotime() 397 } 398 399 // Monotonic times are reported as offsets from startNano. 400 // We initialize startNano to nanotime() - 1 so that on systems where 401 // monotonic time resolution is fairly low (e.g. Windows 2008 402 // which appears to have a default resolution of 15ms), 403 // we avoid ever reporting a nanotime of 0. 404 // (Callers may want to use 0 as "time not set".) 405 var startNano int64 = nanotime() - 1