github.com/comwrg/go/src@v0.0.0-20220319063731-c238d0440370/time/sleep_test.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package time_test
     6  
     7  import (
     8  	"errors"
     9  	"fmt"
    10  	"math/rand"
    11  	"runtime"
    12  	"strings"
    13  	"sync"
    14  	"sync/atomic"
    15  	"testing"
    16  	. "time"
    17  )
    18  
    19  // Go runtime uses different Windows timers for time.Now and sleeping.
    20  // These can tick at different frequencies and can arrive out of sync.
    21  // The effect can be seen, for example, as time.Sleep(100ms) is actually
    22  // shorter then 100ms when measured as difference between time.Now before and
    23  // after time.Sleep call. This was observed on Windows XP SP3 (windows/386).
    24  // windowsInaccuracy is to ignore such errors.
    25  const windowsInaccuracy = 17 * Millisecond
    26  
    27  func TestSleep(t *testing.T) {
    28  	const delay = 100 * Millisecond
    29  	go func() {
    30  		Sleep(delay / 2)
    31  		Interrupt()
    32  	}()
    33  	start := Now()
    34  	Sleep(delay)
    35  	delayadj := delay
    36  	if runtime.GOOS == "windows" {
    37  		delayadj -= windowsInaccuracy
    38  	}
    39  	duration := Now().Sub(start)
    40  	if duration < delayadj {
    41  		t.Fatalf("Sleep(%s) slept for only %s", delay, duration)
    42  	}
    43  }
    44  
    45  // Test the basic function calling behavior. Correct queueing
    46  // behavior is tested elsewhere, since After and AfterFunc share
    47  // the same code.
    48  func TestAfterFunc(t *testing.T) {
    49  	i := 10
    50  	c := make(chan bool)
    51  	var f func()
    52  	f = func() {
    53  		i--
    54  		if i >= 0 {
    55  			AfterFunc(0, f)
    56  			Sleep(1 * Second)
    57  		} else {
    58  			c <- true
    59  		}
    60  	}
    61  
    62  	AfterFunc(0, f)
    63  	<-c
    64  }
    65  
    66  func TestAfterStress(t *testing.T) {
    67  	stop := uint32(0)
    68  	go func() {
    69  		for atomic.LoadUint32(&stop) == 0 {
    70  			runtime.GC()
    71  			// Yield so that the OS can wake up the timer thread,
    72  			// so that it can generate channel sends for the main goroutine,
    73  			// which will eventually set stop = 1 for us.
    74  			Sleep(Nanosecond)
    75  		}
    76  	}()
    77  	ticker := NewTicker(1)
    78  	for i := 0; i < 100; i++ {
    79  		<-ticker.C
    80  	}
    81  	ticker.Stop()
    82  	atomic.StoreUint32(&stop, 1)
    83  }
    84  
    85  func benchmark(b *testing.B, bench func(n int)) {
    86  
    87  	// Create equal number of garbage timers on each P before starting
    88  	// the benchmark.
    89  	var wg sync.WaitGroup
    90  	garbageAll := make([][]*Timer, runtime.GOMAXPROCS(0))
    91  	for i := range garbageAll {
    92  		wg.Add(1)
    93  		go func(i int) {
    94  			defer wg.Done()
    95  			garbage := make([]*Timer, 1<<15)
    96  			for j := range garbage {
    97  				garbage[j] = AfterFunc(Hour, nil)
    98  			}
    99  			garbageAll[i] = garbage
   100  		}(i)
   101  	}
   102  	wg.Wait()
   103  
   104  	b.ResetTimer()
   105  	b.RunParallel(func(pb *testing.PB) {
   106  		for pb.Next() {
   107  			bench(1000)
   108  		}
   109  	})
   110  	b.StopTimer()
   111  
   112  	for _, garbage := range garbageAll {
   113  		for _, t := range garbage {
   114  			t.Stop()
   115  		}
   116  	}
   117  }
   118  
   119  func BenchmarkAfterFunc(b *testing.B) {
   120  	benchmark(b, func(n int) {
   121  		c := make(chan bool)
   122  		var f func()
   123  		f = func() {
   124  			n--
   125  			if n >= 0 {
   126  				AfterFunc(0, f)
   127  			} else {
   128  				c <- true
   129  			}
   130  		}
   131  
   132  		AfterFunc(0, f)
   133  		<-c
   134  	})
   135  }
   136  
   137  func BenchmarkAfter(b *testing.B) {
   138  	benchmark(b, func(n int) {
   139  		for i := 0; i < n; i++ {
   140  			<-After(1)
   141  		}
   142  	})
   143  }
   144  
   145  func BenchmarkStop(b *testing.B) {
   146  	benchmark(b, func(n int) {
   147  		for i := 0; i < n; i++ {
   148  			NewTimer(1 * Second).Stop()
   149  		}
   150  	})
   151  }
   152  
   153  func BenchmarkSimultaneousAfterFunc(b *testing.B) {
   154  	benchmark(b, func(n int) {
   155  		var wg sync.WaitGroup
   156  		wg.Add(n)
   157  		for i := 0; i < n; i++ {
   158  			AfterFunc(0, wg.Done)
   159  		}
   160  		wg.Wait()
   161  	})
   162  }
   163  
   164  func BenchmarkStartStop(b *testing.B) {
   165  	benchmark(b, func(n int) {
   166  		timers := make([]*Timer, n)
   167  		for i := 0; i < n; i++ {
   168  			timers[i] = AfterFunc(Hour, nil)
   169  		}
   170  
   171  		for i := 0; i < n; i++ {
   172  			timers[i].Stop()
   173  		}
   174  	})
   175  }
   176  
   177  func BenchmarkReset(b *testing.B) {
   178  	benchmark(b, func(n int) {
   179  		t := NewTimer(Hour)
   180  		for i := 0; i < n; i++ {
   181  			t.Reset(Hour)
   182  		}
   183  		t.Stop()
   184  	})
   185  }
   186  
   187  func BenchmarkSleep(b *testing.B) {
   188  	benchmark(b, func(n int) {
   189  		var wg sync.WaitGroup
   190  		wg.Add(n)
   191  		for i := 0; i < n; i++ {
   192  			go func() {
   193  				Sleep(Nanosecond)
   194  				wg.Done()
   195  			}()
   196  		}
   197  		wg.Wait()
   198  	})
   199  }
   200  
   201  func TestAfter(t *testing.T) {
   202  	const delay = 100 * Millisecond
   203  	start := Now()
   204  	end := <-After(delay)
   205  	delayadj := delay
   206  	if runtime.GOOS == "windows" {
   207  		delayadj -= windowsInaccuracy
   208  	}
   209  	if duration := Now().Sub(start); duration < delayadj {
   210  		t.Fatalf("After(%s) slept for only %d ns", delay, duration)
   211  	}
   212  	if min := start.Add(delayadj); end.Before(min) {
   213  		t.Fatalf("After(%s) expect >= %s, got %s", delay, min, end)
   214  	}
   215  }
   216  
   217  func TestAfterTick(t *testing.T) {
   218  	const Count = 10
   219  	Delta := 100 * Millisecond
   220  	if testing.Short() {
   221  		Delta = 10 * Millisecond
   222  	}
   223  	t0 := Now()
   224  	for i := 0; i < Count; i++ {
   225  		<-After(Delta)
   226  	}
   227  	t1 := Now()
   228  	d := t1.Sub(t0)
   229  	target := Delta * Count
   230  	if d < target*9/10 {
   231  		t.Fatalf("%d ticks of %s too fast: took %s, expected %s", Count, Delta, d, target)
   232  	}
   233  	if !testing.Short() && d > target*30/10 {
   234  		t.Fatalf("%d ticks of %s too slow: took %s, expected %s", Count, Delta, d, target)
   235  	}
   236  }
   237  
   238  func TestAfterStop(t *testing.T) {
   239  	// We want to test that we stop a timer before it runs.
   240  	// We also want to test that it didn't run after a longer timer.
   241  	// Since we don't want the test to run for too long, we don't
   242  	// want to use lengthy times. That makes the test inherently flaky.
   243  	// So only report an error if it fails five times in a row.
   244  
   245  	var errs []string
   246  	logErrs := func() {
   247  		for _, e := range errs {
   248  			t.Log(e)
   249  		}
   250  	}
   251  
   252  	for i := 0; i < 5; i++ {
   253  		AfterFunc(100*Millisecond, func() {})
   254  		t0 := NewTimer(50 * Millisecond)
   255  		c1 := make(chan bool, 1)
   256  		t1 := AfterFunc(150*Millisecond, func() { c1 <- true })
   257  		c2 := After(200 * Millisecond)
   258  		if !t0.Stop() {
   259  			errs = append(errs, "failed to stop event 0")
   260  			continue
   261  		}
   262  		if !t1.Stop() {
   263  			errs = append(errs, "failed to stop event 1")
   264  			continue
   265  		}
   266  		<-c2
   267  		select {
   268  		case <-t0.C:
   269  			errs = append(errs, "event 0 was not stopped")
   270  			continue
   271  		case <-c1:
   272  			errs = append(errs, "event 1 was not stopped")
   273  			continue
   274  		default:
   275  		}
   276  		if t1.Stop() {
   277  			errs = append(errs, "Stop returned true twice")
   278  			continue
   279  		}
   280  
   281  		// Test passed, so all done.
   282  		if len(errs) > 0 {
   283  			t.Logf("saw %d errors, ignoring to avoid flakiness", len(errs))
   284  			logErrs()
   285  		}
   286  
   287  		return
   288  	}
   289  
   290  	t.Errorf("saw %d errors", len(errs))
   291  	logErrs()
   292  }
   293  
   294  func TestAfterQueuing(t *testing.T) {
   295  	// This test flakes out on some systems,
   296  	// so we'll try it a few times before declaring it a failure.
   297  	const attempts = 5
   298  	err := errors.New("!=nil")
   299  	for i := 0; i < attempts && err != nil; i++ {
   300  		delta := Duration(20+i*50) * Millisecond
   301  		if err = testAfterQueuing(delta); err != nil {
   302  			t.Logf("attempt %v failed: %v", i, err)
   303  		}
   304  	}
   305  	if err != nil {
   306  		t.Fatal(err)
   307  	}
   308  }
   309  
   310  var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8, 0}
   311  
   312  type afterResult struct {
   313  	slot int
   314  	t    Time
   315  }
   316  
   317  func await(slot int, result chan<- afterResult, ac <-chan Time) {
   318  	result <- afterResult{slot, <-ac}
   319  }
   320  
   321  func testAfterQueuing(delta Duration) error {
   322  	// make the result channel buffered because we don't want
   323  	// to depend on channel queueing semantics that might
   324  	// possibly change in the future.
   325  	result := make(chan afterResult, len(slots))
   326  
   327  	t0 := Now()
   328  	for _, slot := range slots {
   329  		go await(slot, result, After(Duration(slot)*delta))
   330  	}
   331  	var order []int
   332  	var times []Time
   333  	for range slots {
   334  		r := <-result
   335  		order = append(order, r.slot)
   336  		times = append(times, r.t)
   337  	}
   338  	for i := range order {
   339  		if i > 0 && order[i] < order[i-1] {
   340  			return fmt.Errorf("After calls returned out of order: %v", order)
   341  		}
   342  	}
   343  	for i, t := range times {
   344  		dt := t.Sub(t0)
   345  		target := Duration(order[i]) * delta
   346  		if dt < target-delta/2 || dt > target+delta*10 {
   347  			return fmt.Errorf("After(%s) arrived at %s, expected [%s,%s]", target, dt, target-delta/2, target+delta*10)
   348  		}
   349  	}
   350  	return nil
   351  }
   352  
   353  func TestTimerStopStress(t *testing.T) {
   354  	if testing.Short() {
   355  		return
   356  	}
   357  	for i := 0; i < 100; i++ {
   358  		go func(i int) {
   359  			timer := AfterFunc(2*Second, func() {
   360  				t.Errorf("timer %d was not stopped", i)
   361  			})
   362  			Sleep(1 * Second)
   363  			timer.Stop()
   364  		}(i)
   365  	}
   366  	Sleep(3 * Second)
   367  }
   368  
   369  func TestSleepZeroDeadlock(t *testing.T) {
   370  	// Sleep(0) used to hang, the sequence of events was as follows.
   371  	// Sleep(0) sets G's status to Gwaiting, but then immediately returns leaving the status.
   372  	// Then the goroutine calls e.g. new and falls down into the scheduler due to pending GC.
   373  	// After the GC nobody wakes up the goroutine from Gwaiting status.
   374  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
   375  	c := make(chan bool)
   376  	go func() {
   377  		for i := 0; i < 100; i++ {
   378  			runtime.GC()
   379  		}
   380  		c <- true
   381  	}()
   382  	for i := 0; i < 100; i++ {
   383  		Sleep(0)
   384  		tmp := make(chan bool, 1)
   385  		tmp <- true
   386  		<-tmp
   387  	}
   388  	<-c
   389  }
   390  
   391  func testReset(d Duration) error {
   392  	t0 := NewTimer(2 * d)
   393  	Sleep(d)
   394  	if !t0.Reset(3 * d) {
   395  		return errors.New("resetting unfired timer returned false")
   396  	}
   397  	Sleep(2 * d)
   398  	select {
   399  	case <-t0.C:
   400  		return errors.New("timer fired early")
   401  	default:
   402  	}
   403  	Sleep(2 * d)
   404  	select {
   405  	case <-t0.C:
   406  	default:
   407  		return errors.New("reset timer did not fire")
   408  	}
   409  
   410  	if t0.Reset(50 * Millisecond) {
   411  		return errors.New("resetting expired timer returned true")
   412  	}
   413  	return nil
   414  }
   415  
   416  func TestReset(t *testing.T) {
   417  	// We try to run this test with increasingly larger multiples
   418  	// until one works so slow, loaded hardware isn't as flaky,
   419  	// but without slowing down fast machines unnecessarily.
   420  	const unit = 25 * Millisecond
   421  	tries := []Duration{
   422  		1 * unit,
   423  		3 * unit,
   424  		7 * unit,
   425  		15 * unit,
   426  	}
   427  	var err error
   428  	for _, d := range tries {
   429  		err = testReset(d)
   430  		if err == nil {
   431  			t.Logf("passed using duration %v", d)
   432  			return
   433  		}
   434  	}
   435  	t.Error(err)
   436  }
   437  
   438  // Test that sleeping (via Sleep or Timer) for an interval so large it
   439  // overflows does not result in a short sleep duration. Nor does it interfere
   440  // with execution of other timers. If it does, timers in this or subsequent
   441  // tests may not fire.
   442  func TestOverflowSleep(t *testing.T) {
   443  	const big = Duration(int64(1<<63 - 1))
   444  
   445  	go func() {
   446  		Sleep(big)
   447  		// On failure, this may return after the test has completed, so
   448  		// we need to panic instead.
   449  		panic("big sleep returned")
   450  	}()
   451  
   452  	select {
   453  	case <-After(big):
   454  		t.Fatalf("big timeout fired")
   455  	case <-After(25 * Millisecond):
   456  		// OK
   457  	}
   458  
   459  	const neg = Duration(-1 << 63)
   460  	Sleep(neg) // Returns immediately.
   461  	select {
   462  	case <-After(neg):
   463  		// OK
   464  	case <-After(1 * Second):
   465  		t.Fatalf("negative timeout didn't fire")
   466  	}
   467  }
   468  
   469  // Test that a panic while deleting a timer does not leave
   470  // the timers mutex held, deadlocking a ticker.Stop in a defer.
   471  func TestIssue5745(t *testing.T) {
   472  	ticker := NewTicker(Hour)
   473  	defer func() {
   474  		// would deadlock here before the fix due to
   475  		// lock taken before the segfault.
   476  		ticker.Stop()
   477  
   478  		if r := recover(); r == nil {
   479  			t.Error("Expected panic, but none happened.")
   480  		}
   481  	}()
   482  
   483  	// cause a panic due to a segfault
   484  	var timer *Timer
   485  	timer.Stop()
   486  	t.Error("Should be unreachable.")
   487  }
   488  
   489  func TestOverflowPeriodRuntimeTimer(t *testing.T) {
   490  	// This may hang forever if timers are broken. See comment near
   491  	// the end of CheckRuntimeTimerOverflow in internal_test.go.
   492  	CheckRuntimeTimerPeriodOverflow()
   493  }
   494  
   495  func checkZeroPanicString(t *testing.T) {
   496  	e := recover()
   497  	s, _ := e.(string)
   498  	if want := "called on uninitialized Timer"; !strings.Contains(s, want) {
   499  		t.Errorf("panic = %v; want substring %q", e, want)
   500  	}
   501  }
   502  
   503  func TestZeroTimerResetPanics(t *testing.T) {
   504  	defer checkZeroPanicString(t)
   505  	var tr Timer
   506  	tr.Reset(1)
   507  }
   508  
   509  func TestZeroTimerStopPanics(t *testing.T) {
   510  	defer checkZeroPanicString(t)
   511  	var tr Timer
   512  	tr.Stop()
   513  }
   514  
   515  // Test that zero duration timers aren't missed by the scheduler. Regression test for issue 44868.
   516  func TestZeroTimer(t *testing.T) {
   517  	if testing.Short() {
   518  		t.Skip("-short")
   519  	}
   520  
   521  	for i := 0; i < 1000000; i++ {
   522  		s := Now()
   523  		ti := NewTimer(0)
   524  		<-ti.C
   525  		if diff := Since(s); diff > 2*Second {
   526  			t.Errorf("Expected time to get value from Timer channel in less than 2 sec, took %v", diff)
   527  		}
   528  	}
   529  }
   530  
   531  // Test that rapidly moving a timer earlier doesn't cause it to get dropped.
   532  // Issue 47329.
   533  func TestTimerModifiedEarlier(t *testing.T) {
   534  	past := Until(Unix(0, 0))
   535  	count := 1000
   536  	fail := 0
   537  	for i := 0; i < count; i++ {
   538  		timer := NewTimer(Hour)
   539  		for j := 0; j < 10; j++ {
   540  			if !timer.Stop() {
   541  				<-timer.C
   542  			}
   543  			timer.Reset(past)
   544  		}
   545  
   546  		deadline := NewTimer(10 * Second)
   547  		defer deadline.Stop()
   548  		now := Now()
   549  		select {
   550  		case <-timer.C:
   551  			if since := Since(now); since > 8*Second {
   552  				t.Errorf("timer took too long (%v)", since)
   553  				fail++
   554  			}
   555  		case <-deadline.C:
   556  			t.Error("deadline expired")
   557  		}
   558  	}
   559  
   560  	if fail > 0 {
   561  		t.Errorf("%d failures", fail)
   562  	}
   563  }
   564  
   565  // Test that rapidly moving timers earlier and later doesn't cause
   566  // some of the sleep times to be lost.
   567  // Issue 47762
   568  func TestAdjustTimers(t *testing.T) {
   569  	var rnd = rand.New(rand.NewSource(Now().UnixNano()))
   570  
   571  	timers := make([]*Timer, 100)
   572  	states := make([]int, len(timers))
   573  	indices := rnd.Perm(len(timers))
   574  
   575  	for len(indices) != 0 {
   576  		var ii = rnd.Intn(len(indices))
   577  		var i = indices[ii]
   578  
   579  		var timer = timers[i]
   580  		var state = states[i]
   581  		states[i]++
   582  
   583  		switch state {
   584  		case 0:
   585  			timers[i] = NewTimer(0)
   586  		case 1:
   587  			<-timer.C // Timer is now idle.
   588  
   589  		// Reset to various long durations, which we'll cancel.
   590  		case 2:
   591  			if timer.Reset(1 * Minute) {
   592  				panic("shouldn't be active (1)")
   593  			}
   594  		case 4:
   595  			if timer.Reset(3 * Minute) {
   596  				panic("shouldn't be active (3)")
   597  			}
   598  		case 6:
   599  			if timer.Reset(2 * Minute) {
   600  				panic("shouldn't be active (2)")
   601  			}
   602  
   603  		// Stop and drain a long-duration timer.
   604  		case 3, 5, 7:
   605  			if !timer.Stop() {
   606  				t.Logf("timer %d state %d Stop returned false", i, state)
   607  				<-timer.C
   608  			}
   609  
   610  		// Start a short-duration timer we expect to select without blocking.
   611  		case 8:
   612  			if timer.Reset(0) {
   613  				t.Fatal("timer.Reset returned true")
   614  			}
   615  		case 9:
   616  			now := Now()
   617  			<-timer.C
   618  			dur := Since(now)
   619  			if dur > 750*Millisecond {
   620  				t.Errorf("timer %d took %v to complete", i, dur)
   621  			}
   622  
   623  		// Timer is done. Swap with tail and remove.
   624  		case 10:
   625  			indices[ii] = indices[len(indices)-1]
   626  			indices = indices[:len(indices)-1]
   627  		}
   628  	}
   629  }
   630  
   631  // Benchmark timer latency when the thread that creates the timer is busy with
   632  // other work and the timers must be serviced by other threads.
   633  // https://golang.org/issue/38860
   634  func BenchmarkParallelTimerLatency(b *testing.B) {
   635  	gmp := runtime.GOMAXPROCS(0)
   636  	if gmp < 2 || runtime.NumCPU() < gmp {
   637  		b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
   638  	}
   639  
   640  	// allocate memory now to avoid GC interference later.
   641  	timerCount := gmp - 1
   642  	stats := make([]struct {
   643  		sum   float64
   644  		max   Duration
   645  		count int64
   646  		_     [5]int64 // cache line padding
   647  	}, timerCount)
   648  
   649  	// Ensure the time to start new threads to service timers will not pollute
   650  	// the results.
   651  	warmupScheduler(gmp)
   652  
   653  	// Note that other than the AfterFunc calls this benchmark is measuring it
   654  	// avoids using any other timers. In particular, the main goroutine uses
   655  	// doWork to spin for some durations because up through Go 1.15 if all
   656  	// threads are idle sysmon could leave deep sleep when we wake.
   657  
   658  	// Ensure sysmon is in deep sleep.
   659  	doWork(30 * Millisecond)
   660  
   661  	b.ResetTimer()
   662  
   663  	const delay = Millisecond
   664  	var wg sync.WaitGroup
   665  	var count int32
   666  	for i := 0; i < b.N; i++ {
   667  		wg.Add(timerCount)
   668  		atomic.StoreInt32(&count, 0)
   669  		for j := 0; j < timerCount; j++ {
   670  			j := j
   671  			expectedWakeup := Now().Add(delay)
   672  			AfterFunc(delay, func() {
   673  				late := Since(expectedWakeup)
   674  				if late < 0 {
   675  					late = 0
   676  				}
   677  				stats[j].count++
   678  				stats[j].sum += float64(late.Nanoseconds())
   679  				if late > stats[j].max {
   680  					stats[j].max = late
   681  				}
   682  				atomic.AddInt32(&count, 1)
   683  				for atomic.LoadInt32(&count) < int32(timerCount) {
   684  					// spin until all timers fired
   685  				}
   686  				wg.Done()
   687  			})
   688  		}
   689  
   690  		for atomic.LoadInt32(&count) < int32(timerCount) {
   691  			// spin until all timers fired
   692  		}
   693  		wg.Wait()
   694  
   695  		// Spin for a bit to let the other scheduler threads go idle before the
   696  		// next round.
   697  		doWork(Millisecond)
   698  	}
   699  	var total float64
   700  	var samples float64
   701  	max := Duration(0)
   702  	for _, s := range stats {
   703  		if s.max > max {
   704  			max = s.max
   705  		}
   706  		total += s.sum
   707  		samples += float64(s.count)
   708  	}
   709  	b.ReportMetric(0, "ns/op")
   710  	b.ReportMetric(total/samples, "avg-late-ns")
   711  	b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
   712  }
   713  
   714  // Benchmark timer latency with staggered wakeup times and varying CPU bound
   715  // workloads. https://golang.org/issue/38860
   716  func BenchmarkStaggeredTickerLatency(b *testing.B) {
   717  	gmp := runtime.GOMAXPROCS(0)
   718  	if gmp < 2 || runtime.NumCPU() < gmp {
   719  		b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
   720  	}
   721  
   722  	const delay = 3 * Millisecond
   723  
   724  	for _, dur := range []Duration{300 * Microsecond, 2 * Millisecond} {
   725  		b.Run(fmt.Sprintf("work-dur=%s", dur), func(b *testing.B) {
   726  			for tickersPerP := 1; tickersPerP < int(delay/dur)+1; tickersPerP++ {
   727  				tickerCount := gmp * tickersPerP
   728  				b.Run(fmt.Sprintf("tickers-per-P=%d", tickersPerP), func(b *testing.B) {
   729  					// allocate memory now to avoid GC interference later.
   730  					stats := make([]struct {
   731  						sum   float64
   732  						max   Duration
   733  						count int64
   734  						_     [5]int64 // cache line padding
   735  					}, tickerCount)
   736  
   737  					// Ensure the time to start new threads to service timers
   738  					// will not pollute the results.
   739  					warmupScheduler(gmp)
   740  
   741  					b.ResetTimer()
   742  
   743  					var wg sync.WaitGroup
   744  					wg.Add(tickerCount)
   745  					for j := 0; j < tickerCount; j++ {
   746  						j := j
   747  						doWork(delay / Duration(gmp))
   748  						expectedWakeup := Now().Add(delay)
   749  						ticker := NewTicker(delay)
   750  						go func(c int, ticker *Ticker, firstWake Time) {
   751  							defer ticker.Stop()
   752  
   753  							for ; c > 0; c-- {
   754  								<-ticker.C
   755  								late := Since(expectedWakeup)
   756  								if late < 0 {
   757  									late = 0
   758  								}
   759  								stats[j].count++
   760  								stats[j].sum += float64(late.Nanoseconds())
   761  								if late > stats[j].max {
   762  									stats[j].max = late
   763  								}
   764  								expectedWakeup = expectedWakeup.Add(delay)
   765  								doWork(dur)
   766  							}
   767  							wg.Done()
   768  						}(b.N, ticker, expectedWakeup)
   769  					}
   770  					wg.Wait()
   771  
   772  					var total float64
   773  					var samples float64
   774  					max := Duration(0)
   775  					for _, s := range stats {
   776  						if s.max > max {
   777  							max = s.max
   778  						}
   779  						total += s.sum
   780  						samples += float64(s.count)
   781  					}
   782  					b.ReportMetric(0, "ns/op")
   783  					b.ReportMetric(total/samples, "avg-late-ns")
   784  					b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
   785  				})
   786  			}
   787  		})
   788  	}
   789  }
   790  
   791  // warmupScheduler ensures the scheduler has at least targetThreadCount threads
   792  // in its thread pool.
   793  func warmupScheduler(targetThreadCount int) {
   794  	var wg sync.WaitGroup
   795  	var count int32
   796  	for i := 0; i < targetThreadCount; i++ {
   797  		wg.Add(1)
   798  		go func() {
   799  			atomic.AddInt32(&count, 1)
   800  			for atomic.LoadInt32(&count) < int32(targetThreadCount) {
   801  				// spin until all threads started
   802  			}
   803  
   804  			// spin a bit more to ensure they are all running on separate CPUs.
   805  			doWork(Millisecond)
   806  			wg.Done()
   807  		}()
   808  	}
   809  	wg.Wait()
   810  }
   811  
   812  func doWork(dur Duration) {
   813  	start := Now()
   814  	for Since(start) < dur {
   815  	}
   816  }