gitlab.com/SkynetLabs/skyd@v1.6.9/skymodules/distributiontracker_test.go (about)

     1  package skymodules
     2  
     3  import (
     4  	"math"
     5  	"testing"
     6  	"time"
     7  
     8  	"gitlab.com/NebulousLabs/fastrand"
     9  )
    10  
    11  // TestDistributionTracker is a collection of unit test that verify the
    12  // functionality of the distribution tracker.
    13  func TestDistributionTracker(t *testing.T) {
    14  	if testing.Short() {
    15  		t.SkipNow()
    16  	}
    17  	t.Parallel()
    18  
    19  	t.Run("Bucketing", testDistributionBucketing)
    20  	t.Run("ChanceAfter", testDistributionChanceAfter)
    21  	t.Run("ChancesAfter", testDistributionChancesAfter)
    22  	t.Run("ChanceAfterShift", testDistributionChanceAfterShift)
    23  	t.Run("Clone", testDistributionClone)
    24  	t.Run("Decay", testDistributionDecay)
    25  	t.Run("DecayedLifetime", testDistributionDecayedLifetime)
    26  	t.Run("ExpectedDuration", testDistributionExpectedDuration)
    27  	t.Run("ExpectedDurationWithShift", testDistributionExpectedDurationWithShift)
    28  	t.Run("FullTestLong", testDistributionTrackerFullTestLong)
    29  	t.Run("Helpers", testDistributionHelpers)
    30  	t.Run("MergeWith", testDistributionMergeWith)
    31  	t.Run("Shift", testDistributionShift)
    32  }
    33  
    34  // testDistributionBucketing will check that the distribution is placing timings
    35  // into the right buckets and then reporting the right timings in the pstats.
    36  func testDistributionBucketing(t *testing.T) {
    37  	t.Parallel()
    38  
    39  	// Adding a half life prevents it from decaying every time we add a data
    40  	// point.
    41  	d := NewDistribution(time.Minute * 100)
    42  
    43  	// Get a distribution with no data collected.
    44  	if d.PStat(0.55) != DistributionDurationForBucketIndex(DistributionTrackerTotalBuckets-1) {
    45  		t.Error("expecting a distribution with no data to return the max possible value")
    46  	}
    47  
    48  	// Try adding a single datapoint to each bucket, by adding it at the right
    49  	// millisecond offset.
    50  	var i int
    51  	total := time.Millisecond
    52  	for i < 64 {
    53  		d.AddDataPoint(total)
    54  		if d.timings[i] != 1 {
    55  			t.Error("bad:", i)
    56  		}
    57  
    58  		total += 4 * time.Millisecond
    59  		i++
    60  
    61  		pstat := d.PStat(0.99999999999)
    62  		if pstat != total-time.Millisecond {
    63  			t.Error("bad", i, pstat, total)
    64  		}
    65  		pstat = d.PStat(0.00000001)
    66  		if pstat != time.Millisecond*4 {
    67  			t.Error("bad", i, pstat, total)
    68  		}
    69  		pstat = d.PStat(0.5)
    70  		if pstat != DistributionDurationForBucketIndex((i+1)/2) {
    71  			t.Error("bad", i, pstat, total)
    72  		}
    73  	}
    74  	for i < 64+48 {
    75  		d.AddDataPoint(total)
    76  		if d.timings[i] != 1 {
    77  			t.Error("bad:", i)
    78  		}
    79  
    80  		total += 16 * time.Millisecond
    81  		i++
    82  
    83  		pstat := d.PStat(0.99999999999)
    84  		if pstat != total-time.Millisecond {
    85  			t.Error("bad", i, pstat, total)
    86  		}
    87  		pstat = d.PStat(0.00000001)
    88  		if pstat != time.Millisecond*4 {
    89  			t.Error("bad", i, pstat, total)
    90  		}
    91  		pstat = d.PStat(0.5)
    92  		if pstat != DistributionDurationForBucketIndex((i+1)/2) {
    93  			t.Error("bad", i, pstat, total)
    94  		}
    95  	}
    96  	for i < 64+48*2 {
    97  		d.AddDataPoint(total)
    98  		if d.timings[i] != 1 {
    99  			t.Error("bad:", i)
   100  		}
   101  
   102  		total += 64 * time.Millisecond
   103  		i++
   104  
   105  		pstat := d.PStat(0.99999999999)
   106  		if pstat != total-time.Millisecond {
   107  			t.Error("bad", i, pstat, total)
   108  		}
   109  		pstat = d.PStat(0.00000001)
   110  		if pstat != time.Millisecond*4 {
   111  			t.Error("bad", i, pstat, total)
   112  		}
   113  		pstat = d.PStat(0.5)
   114  		if pstat != DistributionDurationForBucketIndex((i+1)/2) {
   115  			t.Error("bad", i, pstat, total)
   116  		}
   117  	}
   118  	for i < 64+48*3 {
   119  		d.AddDataPoint(total)
   120  		if d.timings[i] != 1 {
   121  			t.Error("bad:", i)
   122  		}
   123  
   124  		total += 256 * time.Millisecond
   125  		i++
   126  
   127  		pstat := d.PStat(0.99999999999)
   128  		if pstat != total-time.Millisecond {
   129  			t.Error("bad", i, pstat, total)
   130  		}
   131  		pstat = d.PStat(0.00000001)
   132  		if pstat != time.Millisecond*4 {
   133  			t.Error("bad", i, pstat, total)
   134  		}
   135  		pstat = d.PStat(0.5)
   136  		if pstat != DistributionDurationForBucketIndex((i+1)/2) {
   137  			t.Error("bad", i, pstat, total)
   138  		}
   139  	}
   140  	for i < 64+48*4 {
   141  		d.AddDataPoint(total)
   142  		if d.timings[i] != 1 {
   143  			t.Error("bad:", i)
   144  		}
   145  
   146  		total += 1024 * time.Millisecond
   147  		i++
   148  
   149  		pstat := d.PStat(0.99999999999)
   150  		if pstat != total-time.Millisecond {
   151  			t.Error("bad", i, pstat, total)
   152  		}
   153  		pstat = d.PStat(0.00000001)
   154  		if pstat != time.Millisecond*4 {
   155  			t.Error("bad", i, pstat, total)
   156  		}
   157  		pstat = d.PStat(0.5)
   158  		if pstat != DistributionDurationForBucketIndex((i+1)/2) {
   159  			t.Error("bad", i, pstat, total)
   160  		}
   161  	}
   162  	for i < 64+48*5 {
   163  		d.AddDataPoint(total)
   164  		if d.timings[i] != 1 {
   165  			t.Error("bad:", i)
   166  		}
   167  
   168  		total += 4096 * time.Millisecond
   169  		i++
   170  
   171  		pstat := d.PStat(0.99999999999)
   172  		if pstat != total-time.Millisecond {
   173  			t.Error("bad", i, pstat, total)
   174  		}
   175  		pstat = d.PStat(0.00000001)
   176  		if pstat != time.Millisecond*4 {
   177  			t.Error("bad", i, pstat, total)
   178  		}
   179  		pstat = d.PStat(0.5)
   180  		if pstat != DistributionDurationForBucketIndex((i+1)/2) {
   181  			t.Error("bad", i, pstat, total)
   182  		}
   183  	}
   184  	for i < 64+48*6 {
   185  		d.AddDataPoint(total)
   186  		if d.timings[i] != 1 {
   187  			t.Error("bad:", i)
   188  		}
   189  
   190  		total += 16384 * time.Millisecond
   191  		i++
   192  
   193  		pstat := d.PStat(0.99999999999)
   194  		if pstat != total-time.Millisecond {
   195  			t.Error("bad", i, pstat, total)
   196  		}
   197  		pstat = d.PStat(0.00000001)
   198  		if pstat != time.Millisecond*4 {
   199  			t.Error("bad", i, pstat, total)
   200  		}
   201  		pstat = d.PStat(0.5)
   202  		if pstat != DistributionDurationForBucketIndex((i+1)/2) {
   203  			t.Error("bad", i, pstat, total)
   204  		}
   205  	}
   206  	for i < 64+48*7-1 {
   207  		d.AddDataPoint(total)
   208  		if d.timings[i] != 1 {
   209  			t.Error("bad:", i)
   210  		}
   211  
   212  		total += 65536 * time.Millisecond
   213  		i++
   214  
   215  		pstat := d.PStat(0.99999999999)
   216  		if pstat != total-time.Millisecond {
   217  			t.Error("bad", i, pstat, total)
   218  		}
   219  		pstat = d.PStat(0.00000001)
   220  		if pstat != time.Millisecond*4 {
   221  			t.Error("bad", i, pstat, total)
   222  		}
   223  		pstat = d.PStat(0.5)
   224  		if pstat != DistributionDurationForBucketIndex((i+1)/2) {
   225  			t.Error("bad", i, pstat, total)
   226  		}
   227  	}
   228  
   229  	// Test off the end of the bucket.
   230  	expectedPStat := total - time.Millisecond
   231  	total += 1e9 * time.Millisecond
   232  	d.AddDataPoint(total)
   233  	pstat := d.PStat(0.99999999999)
   234  	if pstat != expectedPStat {
   235  		t.Error("bad")
   236  	}
   237  	pstat = d.PStat(0.00000001)
   238  	if pstat != distributionTrackerInitialStepSize {
   239  		t.Error("bad", i, pstat, total)
   240  	}
   241  	pstat = d.PStat(0.5)
   242  	if pstat != DistributionDurationForBucketIndex(DistributionTrackerTotalBuckets/2) {
   243  		t.Error("bad", pstat, DistributionDurationForBucketIndex(DistributionTrackerTotalBuckets/2))
   244  	}
   245  }
   246  
   247  // testDistributionChanceAfter will test the `ChanceAfter` method on the
   248  // distribution tracker.
   249  func testDistributionChanceAfter(t *testing.T) {
   250  	t.Parallel()
   251  
   252  	d := NewDistribution(time.Minute * 100)
   253  	ms := time.Millisecond
   254  
   255  	// verify the chance is zero if we don't have any datapoints
   256  	chance := d.ChanceAfter(time.Duration(0))
   257  	if chance != 0 {
   258  		t.Fatal("bad")
   259  	}
   260  	chance = d.ChanceAfter(time.Second)
   261  	if chance != 0 {
   262  		t.Fatal("bad")
   263  	}
   264  
   265  	// add some datapoints below 100ms
   266  	for i := 0; i < 100; i++ {
   267  		d.AddDataPoint(time.Duration(fastrand.Intn(100)) * ms)
   268  	}
   269  
   270  	// verify we have a 100% chance of coming in after 100ms
   271  	chance = d.ChanceAfter(100 * ms)
   272  	if chance != 1 {
   273  		t.Fatal("bad")
   274  	}
   275  
   276  	// verify the chance is somewhere between 0 and 1 for random durations
   277  	for i := 0; i < 100; i++ {
   278  		randomDur := time.Duration(fastrand.Intn(100)) * ms
   279  		chance = d.ChanceAfter(randomDur)
   280  		if !(chance >= 0 && chance <= 1) {
   281  			t.Fatal("bad", chance, randomDur)
   282  		}
   283  	}
   284  
   285  	// verify the chance increases if the duration increases
   286  	prev := float64(0)
   287  	for i := 0; i < 100; i += 10 {
   288  		chance = d.ChanceAfter(time.Duration(i) * ms)
   289  		if chance < prev {
   290  			t.Fatal("bad", chance, prev)
   291  		}
   292  		prev = chance
   293  	}
   294  
   295  	// verify the chance is deterministic
   296  	randomDur := time.Duration(fastrand.Intn(100)) * ms
   297  	if d.ChanceAfter(randomDur) != d.ChanceAfter(randomDur) {
   298  		t.Fatal("bad")
   299  	}
   300  
   301  	// reset the distribution and add a datapoint in every bucket
   302  	d = NewDistribution(time.Minute * 100)
   303  	for i := 0; i < DistributionTrackerTotalBuckets; i++ {
   304  		d.AddDataPoint(DistributionDurationForBucketIndex(i))
   305  	}
   306  
   307  	// assert the chance at every bucket equals the sum of all data points in
   308  	// buckets up until then divided by the total amount of data points
   309  	for i := 0; i < DistributionTrackerTotalBuckets; i++ {
   310  		if d.ChanceAfter(DistributionDurationForBucketIndex(i)) != float64(i)/d.DataPoints() {
   311  			t.Fatal("bad", i)
   312  		}
   313  	}
   314  }
   315  
   316  // testDistributionChancesAfter will test the `ChancesAfter` method on the
   317  // distribution tracker.
   318  func testDistributionChancesAfter(t *testing.T) {
   319  	t.Parallel()
   320  
   321  	// add a datapoint to every bucket
   322  	d := NewDistribution(time.Minute * 100)
   323  	for i := 0; i < DistributionTrackerTotalBuckets; i++ {
   324  		d.AddDataPoint(DistributionDurationForBucketIndex(i))
   325  	}
   326  
   327  	// verify chances after equals chance after duration and corresponding index
   328  	chances := d.ChancesAfter()
   329  	for i := 0; i < DistributionTrackerTotalBuckets; i++ {
   330  		if d.ChanceAfter(DistributionDurationForBucketIndex(i)) != chances[i] {
   331  			t.Fatal("bad")
   332  		}
   333  	}
   334  
   335  	// since we add a single datapoint in every bucket, the chance increase per
   336  	// bucket is the same for every bucket, and it's equal to 1 divided by the
   337  	// total number of buckets
   338  	//
   339  	// we can use this to assert the chances after, we assert that the current
   340  	// value is always equal to the previous chance plus the amount of chance
   341  	// that every bucket adds
   342  	//
   343  	// NOTE: we use 1e-9 as an equality threshold to cope with floating point
   344  	// errors
   345  	chancePerBucket := float64(1) / DistributionTrackerTotalBuckets
   346  	for i := 1; i < DistributionTrackerTotalBuckets; i++ {
   347  		if math.Abs(chances[i]-chances[i-1]+chancePerBucket) <= 1e-9 {
   348  			t.Fatal("bad")
   349  		}
   350  	}
   351  }
   352  
   353  // testDistributionChanceAfterShift will test the `ChanceAfter` method on the
   354  // distribution tracker after a `Shift` has been applied to the distribution.
   355  func testDistributionChanceAfterShift(t *testing.T) {
   356  	t.Parallel()
   357  
   358  	d := NewDistribution(time.Minute * 100)
   359  	for i := 0; i < 100; i++ {
   360  		d.AddDataPoint(time.Duration(i) * time.Millisecond)
   361  	}
   362  
   363  	if d.ChanceAfter(90*time.Millisecond) != .9 {
   364  		t.Fatal("bad")
   365  	}
   366  
   367  	d.Shift(80 * time.Millisecond)
   368  	chanceAfter := d.ChanceAfter(90 * time.Millisecond)
   369  	if chanceAfter != 0.5 {
   370  		t.Fatal("bad", chanceAfter)
   371  	}
   372  	if d.total != 20 {
   373  		t.Error("bad", d.total, 20)
   374  	}
   375  	var denom float64
   376  	for i, timing := range d.timings {
   377  		denom += timing * float64(DistributionDurationForBucketIndex(i))
   378  	}
   379  	if d.expectedDurationNumerator != denom {
   380  		t.Error("bad", d.expectedDurationNumerator, denom)
   381  	}
   382  
   383  	d.Shift(100 * time.Millisecond)
   384  	chanceAfter = d.ChanceAfter(100 * time.Millisecond)
   385  	if chanceAfter != 0 {
   386  		t.Fatal("bad", chanceAfter)
   387  	}
   388  	if d.total != 0 {
   389  		t.Error("bad", d.total, 0)
   390  	}
   391  	denom = 0
   392  	for i, timing := range d.timings {
   393  		denom += timing * float64(DistributionDurationForBucketIndex(i))
   394  	}
   395  	if d.expectedDurationNumerator != denom {
   396  		t.Error("bad", d.expectedDurationNumerator, denom)
   397  	}
   398  }
   399  
   400  // testDistributionClone will test the `Clone` method on the distribution
   401  // tracker.
   402  func testDistributionClone(t *testing.T) {
   403  	t.Parallel()
   404  
   405  	d := NewDistribution(time.Minute * 100)
   406  
   407  	// add 1000 random data points
   408  	ms := time.Millisecond
   409  	for i := 0; i < 1000; i++ {
   410  		d.AddDataPoint(time.Duration(fastrand.Intn(100)) * ms)
   411  	}
   412  
   413  	// clone the distributions
   414  	c := d.Clone()
   415  
   416  	// assert the distribution's properties were copied over
   417  	if c.staticHalfLife != d.staticHalfLife {
   418  		t.Fatal("bad")
   419  	}
   420  	if c.decayedLifetime != d.decayedLifetime {
   421  		t.Fatal("bad")
   422  	}
   423  	if c.lastDecay != d.lastDecay {
   424  		t.Fatal("bad")
   425  	}
   426  	if c.total != d.total {
   427  		t.Fatal("bad")
   428  	}
   429  	if c.expectedDurationNumerator != d.expectedDurationNumerator {
   430  		t.Fatal("bad")
   431  	}
   432  
   433  	// assert the datapoints and percentiles are identical
   434  	if c.DataPoints() != d.DataPoints() {
   435  		t.Fatal("bad")
   436  	}
   437  	if c.PStat(.9) != d.PStat(.9) {
   438  		t.Fatal("bad")
   439  	}
   440  
   441  	// add more datapoints to the original distribution
   442  	for i := 0; i < 1000; i++ {
   443  		d.AddDataPoint(time.Duration(fastrand.Intn(100)) * ms)
   444  	}
   445  
   446  	// assert the original distribution diverged from the clone
   447  	if c.DataPoints() == d.DataPoints() {
   448  		t.Fatal("bad")
   449  	}
   450  }
   451  
   452  // testDistributionDecay will test that the distribution is being decayed
   453  // correctly when enough time has passed.
   454  func testDistributionDecay(t *testing.T) {
   455  	t.Parallel()
   456  
   457  	// Create a distribution with a half life of 100 minutes, which means a
   458  	// decay operation should trigger every minute.
   459  	d := NewDistribution(time.Minute * 100)
   460  	totalPoints := func() float64 {
   461  		var total float64
   462  		for i := 0; i < len(d.timings); i++ {
   463  			total += d.timings[i]
   464  		}
   465  		return float64(total)
   466  	}
   467  
   468  	// Add 500 data points.
   469  	for i := 0; i < 500; i++ {
   470  		// Use different buckets.
   471  		if i%6 == 0 {
   472  			d.AddDataPoint(time.Millisecond)
   473  		} else {
   474  			d.AddDataPoint(time.Millisecond * 100)
   475  		}
   476  	}
   477  	// We accept a range of values to compensate for the limited precision of
   478  	// floating points.
   479  	if totalPoints() < 499 || totalPoints() > 501 {
   480  		t.Error("bad", totalPoints())
   481  	}
   482  
   483  	// Simulate exactly the half life of time passing.
   484  	d.lastDecay = time.Now().Add(-100 * time.Minute)
   485  	d.AddDataPoint(time.Millisecond)
   486  	// We accept a range of values to compensate for the limited precision of
   487  	// floating points.
   488  	if totalPoints() < 250 || totalPoints() > 252 {
   489  		t.Error("bad", totalPoints())
   490  	}
   491  
   492  	// Simulate exactly one quarter of the half life passing twice.
   493  	d.lastDecay = time.Now().Add(-50 * time.Minute)
   494  	d.AddDataPoint(time.Millisecond)
   495  	d.lastDecay = time.Now().Add(-50 * time.Minute)
   496  	d.AddDataPoint(time.Millisecond)
   497  	// We accept a range of values to compensate for the limited precision of
   498  	// floating points.
   499  	if totalPoints() < 126 || totalPoints() > 128 {
   500  		t.Error("bad", totalPoints())
   501  	}
   502  }
   503  
   504  // testDistributionDecayedLifetime checks that the total counted decayed
   505  // lifetime of the distribution is being tracked correctly.
   506  func testDistributionDecayedLifetime(t *testing.T) {
   507  	t.Parallel()
   508  
   509  	// Create a distribution with a half life of 300 minutes, which means a
   510  	// decay operation should trigger every three minutes.
   511  	d := NewDistribution(time.Minute * 300)
   512  	totalPoints := func() float64 {
   513  		var total float64
   514  		for i := 0; i < len(d.timings); i++ {
   515  			total += d.timings[i]
   516  		}
   517  		return float64(total)
   518  	}
   519  
   520  	// Do 10k steps, each step advancing one minute. Every third step should
   521  	// trigger a decay. Add 1 data point each step.
   522  	for i := 0; i < 10e3; i++ {
   523  		d.lastDecay = d.lastDecay.Add(-1 * time.Minute)
   524  		d.AddDataPoint(time.Millisecond)
   525  	}
   526  	pointsPerHour := totalPoints() / (float64(d.decayedLifetime) / float64(time.Hour))
   527  	// We accept a range of values to compensate for the limited precision of
   528  	// floating points.
   529  	if pointsPerHour < 55 || pointsPerHour > 65 {
   530  		t.Error("bad", pointsPerHour)
   531  	}
   532  }
   533  
   534  // testDistributionExpectedDurationWithShift is the unit test for
   535  // ExpectedDurationWithShift. It makes use of both Shift and ExpectedDuration
   536  // which are covered by their own unit tests to compare their results to
   537  // ExpectedDurationWithShift's.
   538  func testDistributionExpectedDurationWithShift(t *testing.T) {
   539  	d := NewDistribution(time.Minute * 100)
   540  
   541  	// Add 100 datapoints to every bucket.
   542  	for i := range d.timings {
   543  		for j := 0; j < 100; j++ {
   544  			d.AddDataPoint(DistributionDurationForBucketIndex(i))
   545  		}
   546  	}
   547  
   548  	// Do 100 shifts and compares between ExpectedDuration and
   549  	// ExpectedDurationWithShift.
   550  	for i := 0; i < 100; i++ {
   551  		shift := 10 * time.Millisecond * time.Duration(i)
   552  
   553  		// Compute the expected duration once by using ExpectedDuration
   554  		// on a copy.
   555  		c := d.Clone()
   556  		c.Shift(shift)
   557  		ed1 := c.ExpectedDuration()
   558  
   559  		// And another time with ExpectedDurationWithShift.
   560  		ed2 := d.ExpectedDurationWithShift(shift)
   561  
   562  		// The results should mach.
   563  		if ed1 != ed2 {
   564  			t.Fatal("expected duration mismatch", ed1, ed2)
   565  		}
   566  	}
   567  }
   568  
   569  // testDistributionExpectedDuration will test that the distribution correctly
   570  // returns the expected duration based upon all data points in the distribution.
   571  func testDistributionExpectedDuration(t *testing.T) {
   572  	t.Parallel()
   573  
   574  	d := NewDistribution(time.Minute * 100)
   575  	ms := time.Millisecond
   576  
   577  	// check whether we default to the worst case if we have 0 data points
   578  	expected := d.ExpectedDuration()
   579  	if expected != DistributionDurationForBucketIndex(len(d.timings)-1) {
   580  		t.Error("bad")
   581  	}
   582  
   583  	// add a first data point
   584  	duration := 8 * ms
   585  	d.AddDataPoint(duration)
   586  	expected = d.ExpectedDuration()
   587  	if expected != duration {
   588  		t.Error("bad")
   589  	}
   590  
   591  	// now add 1000 datapoints, between 1-50ms
   592  	for i := 0; i < 1000; i++ {
   593  		d.AddDataPoint(time.Duration(fastrand.Uint64n(50)+1) * ms)
   594  	}
   595  	expected = d.ExpectedDuration()
   596  	if expected < 22*ms || expected > 28*ms {
   597  		t.Error("bad")
   598  	}
   599  
   600  	// add 1000 more datapoints, between 51 and 100ms
   601  	for i := 0; i < 1000; i++ {
   602  		d.AddDataPoint(time.Duration(fastrand.Uint64n(100)+50) * ms)
   603  	}
   604  
   605  	// assert the expected duration increased
   606  	expected = d.ExpectedDuration()
   607  	if expected < 50*ms || expected > 75*ms {
   608  		t.Error("bad")
   609  	}
   610  
   611  	// reset the distribution
   612  	d = NewDistribution(time.Minute * 100)
   613  
   614  	// add one datapoint to every bucket and keep track of the total duration
   615  	var totalDuration int64
   616  	for i := 0; i < DistributionTrackerTotalBuckets; i++ {
   617  		point := DistributionDurationForBucketIndex(i)
   618  		totalDuration += point.Nanoseconds()
   619  		d.AddDataPoint(point)
   620  	}
   621  
   622  	// the expected duration is the sum of the duration of all buckets
   623  	// multiplied by the % chance a datapoint is in that bucket, because we
   624  	// added exactly one datapoint to every bucket, the pct chance will be
   625  	// the same across all buckets, namely 1/DistributionTrackerTotalBuckets
   626  	pctChance := float64(1) / float64(DistributionTrackerTotalBuckets)
   627  	expected = time.Duration(pctChance * float64(totalDuration))
   628  	if d.ExpectedDuration() != expected {
   629  		t.Error("bad", d.ExpectedDuration(), expected)
   630  	}
   631  
   632  	// add 100 datapoints to the first and last bucket
   633  	firstBucketDur := DistributionDurationForBucketIndex(0)
   634  	lastBucketDur := DistributionDurationForBucketIndex(DistributionTrackerTotalBuckets - 1)
   635  	for i := 0; i < 100; i++ {
   636  		d.AddDataPoint(firstBucketDur)
   637  		d.AddDataPoint(lastBucketDur)
   638  	}
   639  
   640  	// now calculate the expected duration from the first and lust bucket
   641  	// separately and sum it with the expected durations of the other buckets,
   642  	// we have to this into account in the total duration
   643  	totalDuration -= firstBucketDur.Nanoseconds()
   644  	totalDuration -= lastBucketDur.Nanoseconds()
   645  
   646  	pctChanceFirst := float64(101) / float64(d.DataPoints())
   647  	pctChanceLast := float64(101) / float64(d.DataPoints())
   648  	pctChanceOther := float64(1) / float64(d.DataPoints())
   649  
   650  	expectedFirst := pctChanceFirst * float64(firstBucketDur)
   651  	expectedLast := pctChanceLast * float64(lastBucketDur)
   652  	expectedOthers := pctChanceOther * float64(totalDuration)
   653  
   654  	expected = time.Duration(expectedFirst + expectedOthers + expectedLast)
   655  	if d.ExpectedDuration() != expected {
   656  		t.Error("bad", d.ExpectedDuration(), expected)
   657  	}
   658  }
   659  
   660  // testDistributionTrackerFullTestLong attempts to use a distribution tracker in
   661  // full, including using actual sleeps instead of artificial clock manipulation.
   662  func testDistributionTrackerFullTestLong(t *testing.T) {
   663  	if testing.Short() {
   664  		t.SkipNow()
   665  	}
   666  	t.Parallel()
   667  
   668  	// Get the standard distributions but then fix their half lives.
   669  	dt := NewDistributionTrackerStandard()
   670  	dt.distributions[0].staticHalfLife = 5 * time.Second
   671  	dt.distributions[1].staticHalfLife = 20 * time.Second
   672  	dt.distributions[2].staticHalfLife = time.Hour
   673  
   674  	// Add 5000 data points to the first bucket.
   675  	for i := 0; i < 5000; i++ {
   676  		dt.AddDataPoint(DistributionDurationForBucketIndex(0))
   677  	}
   678  	// Add 4000 data points to the third bucket.
   679  	for i := 0; i < 4000; i++ {
   680  		dt.AddDataPoint(DistributionDurationForBucketIndex(2))
   681  	}
   682  	// Add 900 data points to the 10th bucket.
   683  	for i := 0; i < 900; i++ {
   684  		dt.AddDataPoint(DistributionDurationForBucketIndex(9))
   685  	}
   686  	// Add 90 data points to the 20th bucket.
   687  	for i := 0; i < 90; i++ {
   688  		dt.AddDataPoint(DistributionDurationForBucketIndex(19))
   689  	}
   690  	// Add 9 data points to the 30th bucket.
   691  	for i := 0; i < 9; i++ {
   692  		dt.AddDataPoint(DistributionDurationForBucketIndex(29))
   693  	}
   694  	// Add 1 data points to the 40th bucket
   695  	dt.AddDataPoint(DistributionDurationForBucketIndex(39))
   696  
   697  	// Check how the distributions seem.
   698  	percentiles := dt.Percentiles()
   699  
   700  	// Each should be less than the next, and equal across all distribuitons.
   701  	if percentiles[0][0] >= percentiles[0][1] || percentiles[0][1] >= percentiles[0][2] || percentiles[0][2] >= percentiles[0][3] || percentiles[0][3] >= percentiles[0][4] {
   702  		t.Log(percentiles)
   703  		t.Error("bad")
   704  	}
   705  	if percentiles[0][0] != percentiles[1][0] || percentiles[1][0] != percentiles[2][0] {
   706  		t.Log(percentiles)
   707  		t.Error("bad")
   708  	}
   709  	if percentiles[0][1] != percentiles[1][1] || percentiles[1][1] != percentiles[2][1] {
   710  		t.Log(percentiles)
   711  		t.Error("bad")
   712  	}
   713  	if percentiles[0][2] != percentiles[1][2] || percentiles[1][2] != percentiles[2][2] {
   714  		t.Log(percentiles)
   715  		t.Error("bad")
   716  	}
   717  	if percentiles[0][3] != percentiles[1][3] || percentiles[1][3] != percentiles[2][3] {
   718  		t.Log(percentiles)
   719  		t.Error("bad")
   720  	}
   721  	if percentiles[0][4] != percentiles[1][4] || percentiles[1][4] != percentiles[2][4] {
   722  		t.Log(percentiles)
   723  		t.Error("bad")
   724  	}
   725  
   726  	// Have 20 seconds elpase, and add 5000 more data points to the first bucket.
   727  	// This should skew the distribution for the first bucket but not the other
   728  	// two.
   729  	time.Sleep(time.Second * 20)
   730  	for i := 0; i < 5000; i++ {
   731  		dt.AddDataPoint(DistributionDurationForBucketIndex(0))
   732  	}
   733  	percentiles = dt.Percentiles()
   734  	if percentiles[0][0] != percentiles[0][1] {
   735  		t.Log(percentiles)
   736  		t.Error("bad")
   737  	}
   738  	if percentiles[0][1] >= percentiles[1][1] {
   739  		t.Log(percentiles)
   740  		t.Error("bad")
   741  	}
   742  	if percentiles[1][1] != percentiles[2][1] {
   743  		t.Log(percentiles)
   744  		t.Error("bad")
   745  	}
   746  
   747  	// Add 30,000 more entries, this should shift the 20 second bucket but not
   748  	// the 1 hour bucket.
   749  	for i := 0; i < 30000; i++ {
   750  		dt.AddDataPoint(DistributionDurationForBucketIndex(0))
   751  	}
   752  	percentiles = dt.Percentiles()
   753  	if percentiles[0][1] != percentiles[0][2] {
   754  		t.Log(percentiles)
   755  		t.Error("bad")
   756  	}
   757  	if percentiles[1][0] != percentiles[1][1] {
   758  		t.Log(percentiles)
   759  		t.Error("bad")
   760  	}
   761  	if percentiles[1][1] != percentiles[2][0] {
   762  		t.Log(percentiles)
   763  		t.Error("bad")
   764  	}
   765  	if percentiles[2][0] >= percentiles[2][1] {
   766  		t.Log(percentiles)
   767  		t.Error("bad")
   768  	}
   769  }
   770  
   771  // testDistributionMergeWith verifies the 'MergeWith' method on the
   772  // distribution.
   773  func testDistributionMergeWith(t *testing.T) {
   774  	t.Parallel()
   775  
   776  	// create a new distribution
   777  	d := NewDistribution(time.Minute * 100)
   778  	ms := time.Millisecond
   779  
   780  	// add some random data points
   781  	for i := 0; i < 1000; i++ {
   782  		d.AddDataPoint(time.Duration(fastrand.Intn(i+1)) * ms)
   783  	}
   784  
   785  	// get the chance at a random duration that's expected to be non zero
   786  	randDur := time.Duration(fastrand.Intn(500)+250) * ms
   787  	chance := d.ChanceAfter(randDur)
   788  	if chance == 0 {
   789  		t.Error("bad")
   790  	}
   791  
   792  	// create another distribution and merge it, use 1 as weight, seeing as the
   793  	// other distribution has no datapoints in it, the distribution won't change
   794  	other := NewDistribution(time.Minute * 100)
   795  	d.MergeWith(other, 1)
   796  	if d.ChanceAfter(randDur) != chance {
   797  		t.Fatal("unexpected")
   798  	}
   799  
   800  	// get the expected duration and verify it's lowers after merging with a
   801  	// distribution that has more datapoints on the lower end
   802  	expectedDur := d.ExpectedDuration()
   803  	for i := 0; i < 1000; i++ {
   804  		other.AddDataPoint(time.Duration(fastrand.Intn(i/4+1)) * ms)
   805  	}
   806  	d.MergeWith(other, 1)
   807  	if d.ExpectedDuration() >= expectedDur {
   808  		t.Fatal("unexpected")
   809  	}
   810  
   811  	// create a clean distribution and merge it with 3 distributions using a
   812  	// weight of 33% every time, the 3 distributions have datapoints below 1s,
   813  	// between 1s and 2s, and between 2s and 3s respectively
   814  	clean := NewDistribution(time.Minute * 100)
   815  	d1s := NewDistribution(time.Minute * 100)
   816  	for i := 0; i < 1000; i++ {
   817  		d1s.AddDataPoint(time.Duration(fastrand.Intn(1000)) * ms)
   818  	}
   819  	clean.MergeWith(d1s, .33)
   820  	d2s := NewDistribution(time.Minute * 100)
   821  	for i := 0; i < 1000; i++ {
   822  		d2s.AddDataPoint(time.Duration(fastrand.Intn(1000)+1000) * ms)
   823  	}
   824  	clean.MergeWith(d2s, .33)
   825  	d3s := NewDistribution(time.Minute * 100)
   826  	for i := 0; i < 1000; i++ {
   827  		d3s.AddDataPoint(time.Duration(fastrand.Intn(1000)+2000) * ms)
   828  	}
   829  	clean.MergeWith(d3s, .33)
   830  
   831  	// assert the chance after 1,2 and 3s is more or less equal to 33%, 66%, 99%
   832  	chanceAfter1s := clean.ChanceAfter(time.Second)
   833  	if chanceAfter1s < 0.31 || chanceAfter1s > 0.35 {
   834  		t.Fatal("unexpected", chanceAfter1s)
   835  	}
   836  	chanceAfter2s := clean.ChanceAfter(2 * time.Second)
   837  	if chanceAfter2s < 0.64 || chanceAfter2s > 0.68 {
   838  		t.Fatal("unexpected", chanceAfter2s)
   839  	}
   840  	chanceAfter3s := clean.ChanceAfter(3 * time.Second)
   841  	if chanceAfter3s < 0.97 {
   842  		t.Fatal("unexpected", chanceAfter3s)
   843  	}
   844  
   845  	// create two distributions
   846  	d1 := NewDistribution(time.Minute * 100)
   847  	d2 := NewDistribution(time.Minute * 100)
   848  
   849  	// add a datapoint in every bucket
   850  	for i := 0; i < DistributionTrackerTotalBuckets; i++ {
   851  		d1.AddDataPoint(DistributionDurationForBucketIndex(i))
   852  		d2.AddDataPoint(DistributionDurationForBucketIndex(i))
   853  	}
   854  	if d1.DataPoints() != DistributionTrackerTotalBuckets {
   855  		t.Fatal("unexpected")
   856  	}
   857  	if d2.DataPoints() != DistributionTrackerTotalBuckets {
   858  		t.Fatal("unexpected")
   859  	}
   860  	if d1.ExpectedDuration() != d2.ExpectedDuration() {
   861  		t.Fatal("unexpected")
   862  	}
   863  	oldExpectedDuration := d1.ExpectedDuration()
   864  
   865  	// merge the two distributions using a weight of .5
   866  	d1.MergeWith(d2, .5)
   867  	if d1.DataPoints() != 1.5*DistributionTrackerTotalBuckets {
   868  		t.Fatal("unexpected")
   869  	}
   870  	if d2.DataPoints() != DistributionTrackerTotalBuckets {
   871  		t.Fatal("unexpected")
   872  	}
   873  
   874  	// assert the expected duration has not changed, the expected duration
   875  	// relies on the pct chance a datapoint is in some bucket, seeing as the
   876  	// datapoints are evenly distributed, this has not changed
   877  	if d1.ExpectedDuration() != oldExpectedDuration {
   878  		t.Fatal("unexpected")
   879  	}
   880  
   881  	// create a third and fourth distribution with datapoints in the lower and
   882  	// up half of the buckets
   883  	d3 := NewDistribution(time.Minute * 100)
   884  	d4 := NewDistribution(time.Minute * 100)
   885  	var totalDurFirstHalf int64
   886  	var totalDurSecondHalf int64
   887  	for i := 0; i < DistributionTrackerTotalBuckets; i++ {
   888  		point := DistributionDurationForBucketIndex(i)
   889  		if i < DistributionTrackerTotalBuckets/2 {
   890  			d3.AddDataPoint(point)
   891  			totalDurFirstHalf += point.Nanoseconds()
   892  		} else {
   893  			d4.AddDataPoint(DistributionDurationForBucketIndex(i))
   894  			totalDurSecondHalf += point.Nanoseconds()
   895  		}
   896  	}
   897  
   898  	// merge the third distribution using a weight of 1, manually calculate the
   899  	// expected duration and compare it with the actual value
   900  	d1.MergeWith(d3, 1)
   901  	if d1.ExpectedDuration() >= oldExpectedDuration {
   902  		t.Fatal("unexpected")
   903  	}
   904  	pctChanceFirst := 2.5 / d1.DataPoints()
   905  	pctChanceSecond := 1.5 / d1.DataPoints()
   906  	if d1.ExpectedDuration() != time.Duration(pctChanceFirst*float64(totalDurFirstHalf)+pctChanceSecond*float64(totalDurSecondHalf)) {
   907  		t.Fatal("unexpected")
   908  	}
   909  
   910  	// merge the fourth distribution, again using a weight of 1, the expect
   911  	// duration should be back to normal because we're evenly distributed again
   912  	d1.MergeWith(d4, 1)
   913  	if d1.ExpectedDuration() != oldExpectedDuration {
   914  		t.Fatal("unexpected")
   915  	}
   916  	pctChanceFirst = 2.5 / d1.DataPoints()
   917  	pctChanceSecond = 2.5 / d1.DataPoints()
   918  	if d1.ExpectedDuration() != time.Duration(pctChanceFirst*float64(totalDurFirstHalf)+pctChanceSecond*float64(totalDurSecondHalf)) {
   919  		t.Fatal("unexpected")
   920  	}
   921  }
   922  
   923  // testDistributionShift verifies the 'Shift' method on the distribution.
   924  func testDistributionShift(t *testing.T) {
   925  	t.Parallel()
   926  
   927  	// create a new distribution
   928  	d := NewDistribution(time.Minute * 100)
   929  	ms := time.Millisecond
   930  
   931  	// add some datapoints below 896ms (896 perfectly aligns with a bucket)
   932  	for i := 0; i < 1000; i++ {
   933  		d.AddDataPoint(time.Duration(fastrand.Uint64n(896)) * ms)
   934  	}
   935  
   936  	// check the chance is 1
   937  	chance := d.ChanceAfter(896 * ms)
   938  	if chance != 1 {
   939  		t.Fatal("bad")
   940  	}
   941  
   942  	// calculate the chance after 576ms, expect it to be hovering around 65%
   943  	chance = d.ChanceAfter(576 * ms)
   944  	if !(chance > .55 && chance < .75) {
   945  		t.Fatal("bad")
   946  	}
   947  
   948  	// shift the distribution by 0ms - it should have no effect
   949  	d.Shift(time.Duration(0))
   950  	chanceAfterShift := d.ChanceAfter(576 * ms)
   951  	if chanceAfterShift != chance {
   952  		t.Fatal("bad")
   953  	}
   954  
   955  	// shift the distribution by 576ms, this is perfectly aligned with a bucket
   956  	// so there'll be no fractionalised value that's being smeared over all
   957  	// buckets preceding it
   958  	d.Shift(time.Duration(576) * ms)
   959  	chanceAfterShift = d.ChanceAfter(576 * ms)
   960  	if chanceAfterShift != 0 {
   961  		t.Fatal("bad")
   962  	}
   963  
   964  	// verify the chance after 896ms is still one
   965  	chance = d.ChanceAfter(896 * ms)
   966  	if chance != 1 {
   967  		t.Fatal("bad")
   968  	}
   969  
   970  	// get a random chance value between 576 and 896ms, verify shifting the
   971  	// distribution below the 576ms essentially is a no-op
   972  	randDur := time.Duration(fastrand.Intn(896-576) + 576)
   973  	chance = d.ChanceAfter(randDur)
   974  	for i := 0; i < 550; i += 50 {
   975  		d.Shift(time.Duration(i) * ms)
   976  		if d.ChanceAfter(randDur) != chance {
   977  			t.Fatal("bad")
   978  		}
   979  	}
   980  
   981  	// verify initial buckets are empty
   982  	if d.ChanceAfter(16*ms) != 0 {
   983  		t.Fatal("bad")
   984  	}
   985  
   986  	// shift until we hit a bucket that got fractionalised and smeared across
   987  	// all buckets preceding it, we start at 804ms and go up with steps of 8ms
   988  	// to ensure we shift at a duration that induces a fractionalised shift
   989  	for i := 804; i < 896; i += 8 {
   990  		_, fraction := indexForDuration(time.Duration(i) * ms)
   991  		if fraction == 0 {
   992  			continue
   993  		}
   994  
   995  		d.Shift(time.Duration(i) * ms)
   996  		if d.ChanceAfter(16*ms) > 0 {
   997  			break
   998  		}
   999  	}
  1000  
  1001  	// verify the shift fractionalised a bucket and smeared the remainder over
  1002  	// all buckets preceding the one at which we shifted.
  1003  	if d.ChanceAfter(16*ms) == 0 {
  1004  		t.Fatal("bad")
  1005  	}
  1006  
  1007  	// reset the new distribution
  1008  	d = NewDistribution(time.Minute * 100)
  1009  
  1010  	// add one datapoint in every bucket
  1011  	for i := 0; i < DistributionTrackerTotalBuckets; i++ {
  1012  		d.AddDataPoint(DistributionDurationForBucketIndex(i))
  1013  	}
  1014  
  1015  	// shift it by 100 buckets and assert all buckets are completely empty,
  1016  	// there was no smear because the shift aligned perfectly with a bucket
  1017  	d.Shift(DistributionDurationForBucketIndex(100))
  1018  	for i := 0; i < 100; i++ {
  1019  		if d.ChanceAfter(DistributionDurationForBucketIndex(i)) != 0 {
  1020  			t.Fatal("bad")
  1021  		}
  1022  	}
  1023  
  1024  	// shift it again but now make sure we shift at a fraction of a bucket so we
  1025  	// should see a remainder value smeared out across all preceding buckets
  1026  	shiftAt := DistributionDurationForBucketIndex(200) + (256/2)*ms
  1027  
  1028  	// quickly assert that we're shifting at the exact point we want to shift,
  1029  	// namely at bucket index 200 and we want to make sure we're at exactly 50%
  1030  	// of that bucket, which is a 256ms bucket.
  1031  	index, fraction := indexForDuration(shiftAt)
  1032  	if index != 200 || fraction != .5 {
  1033  		t.Fatal("bad")
  1034  	}
  1035  
  1036  	// perform the shift
  1037  	d.Shift(shiftAt)
  1038  
  1039  	// we expected to see a smear of 1/2/200 because we are smearing half of the
  1040  	// original value over the buckets before it
  1041  	smear := float64(1) / float64(400)
  1042  
  1043  	// we know the value now in all buckets up until bucket with index 200, so
  1044  	// the chance after every duration will increase with the same amount, let's
  1045  	// calculate the amount for the bucket at index 1 which is the step with
  1046  	// which we'll be increasing
  1047  	index, fraction = indexForDuration(4 * ms)
  1048  	if index != 1 && fraction != 1 {
  1049  		t.Fatal("bad")
  1050  	}
  1051  
  1052  	// compare the expected chance with the actual chance, allow for some
  1053  	// floating point precision errors up until 1e-9
  1054  	for i := 1; i < 200; i++ {
  1055  		chance = smear * float64(i) / d.DataPoints()
  1056  		if math.Abs(chance-d.ChanceAfter(DistributionDurationForBucketIndex(i))) > 1e-9 {
  1057  			t.Fatal("bad", i, chance, d.ChanceAfter(DistributionDurationForBucketIndex(i)))
  1058  		}
  1059  	}
  1060  }
  1061  
  1062  // testDistributionHelpers probes the `indexForDuration` helper function.
  1063  func testDistributionHelpers(t *testing.T) {
  1064  	t.Parallel()
  1065  
  1066  	ms := time.Millisecond
  1067  
  1068  	// verify some duration values up until the "initial buckets"
  1069  	// there's 64 initial buckets using a 4ms step size
  1070  	index, fraction := indexForDuration(0)
  1071  	if index != 0 || fraction != 0 {
  1072  		t.Error("bad")
  1073  	}
  1074  	index, fraction = indexForDuration(16 * ms)
  1075  	if index != 4 || fraction != 0 {
  1076  		t.Error("bad")
  1077  	}
  1078  	index, fraction = indexForDuration(65 * ms)
  1079  	if index != 16 || fraction != 0.25 {
  1080  		t.Error("bad")
  1081  	}
  1082  	index, fraction = indexForDuration(255 * ms)
  1083  	if index != 63 || fraction != 0.75 {
  1084  		t.Error("bad")
  1085  	}
  1086  
  1087  	// verify some durations where the stepsize is 16ms
  1088  
  1089  	// 64x4ms buckets + 22x16ms buckets = 608ms mark
  1090  	// meaning we are 12ms into the next 16ms bucket which is 75%
  1091  	index, fraction = indexForDuration(620 * ms)
  1092  	if index != 86 || fraction != 0.75 {
  1093  		t.Error("bad")
  1094  	}
  1095  
  1096  	// 64x4ms buckets + 40x16ms buckets = 896ms mark
  1097  	// meaning we are 0ms into the next bucket
  1098  	index, fraction = indexForDuration(896 * ms)
  1099  	if index != 104 || fraction != 0 {
  1100  		t.Error("bad")
  1101  	}
  1102  
  1103  	// verify some durations where the stepsize is 64ms
  1104  
  1105  	// 64x4ms buckets + 48x16ms buckets + 15*64buckets = 1984ms mark
  1106  	// meaning we are 16ms into the next bucket which is 25%
  1107  	index, fraction = indexForDuration(2000 * ms)
  1108  	if index != 127 || fraction != 0.25 {
  1109  		t.Error("bad")
  1110  	}
  1111  
  1112  	// verify upper bound
  1113  	index, fraction = indexForDuration(time.Hour + 10*time.Minute)
  1114  	if index != 399 || fraction != 1 {
  1115  		t.Error("bad", index, fraction)
  1116  	}
  1117  }