go.chromium.org/luci@v0.0.0-20240309015107-7cdc2e660f33/common/sync/dispatcher/buffer/buffer_test.go (about)

     1  // Copyright 2019 The LUCI Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package buffer
    16  
    17  import (
    18  	"context"
    19  	"math/rand"
    20  	"strings"
    21  	"testing"
    22  	"time"
    23  
    24  	"go.chromium.org/luci/common/clock"
    25  	"go.chromium.org/luci/common/clock/testclock"
    26  	"go.chromium.org/luci/common/retry"
    27  
    28  	. "github.com/smartystreets/goconvey/convey"
    29  	. "go.chromium.org/luci/common/testing/assertions"
    30  )
    31  
    32  func TestBuffer(t *testing.T) {
    33  	must := func(b *Batch, err error) *Batch {
    34  		So(err, ShouldBeNil)
    35  		return b
    36  	}
    37  	addNoBlockZero := func(b *Buffer, now time.Time, item any) (*Batch, error) {
    38  		return b.AddNoBlock(now, item, 0)
    39  	}
    40  	addNoBlockStr := func(b *Buffer, now time.Time, item string) (*Batch, error) {
    41  		return b.AddNoBlock(now, item, len(item))
    42  	}
    43  
    44  	Convey(`Buffer`, t, func() {
    45  		Convey(`construction`, func() {
    46  			Convey(`success`, func() {
    47  				b, err := NewBuffer(nil)
    48  				So(err, ShouldBeNil)
    49  
    50  				So(b.Stats(), ShouldResemble, Stats{})
    51  				So(b.NextSendTime(), ShouldBeZeroValue)
    52  				So(b.CanAddItem(), ShouldBeTrue)
    53  				So(b.LeaseOne(time.Time{}), ShouldBeNil)
    54  			})
    55  			Convey(`fail`, func() {
    56  				_, err := NewBuffer(&Options{BatchItemsMax: -100})
    57  				So(err, ShouldErrLike, "normalizing buffer.Options")
    58  			})
    59  		})
    60  
    61  		Convey(`usage`, func() {
    62  			start := testclock.TestRecentTimeUTC
    63  			ctx, tclock := testclock.UseTime(context.Background(), start)
    64  
    65  			Convey(`common behavior`, func() {
    66  				b, err := NewBuffer(&Options{
    67  					MaxLeases:     2,
    68  					BatchItemsMax: 20,
    69  				})
    70  				So(err, ShouldBeNil)
    71  				nextSendTimeOffset := b.opts.BatchAgeMax
    72  
    73  				Convey(`batch cut by count`, func() {
    74  					for i := 0; i < int(b.opts.BatchItemsMax); i++ {
    75  						So(b.unleased.Len(), ShouldEqual, 0)
    76  						if i > 0 {
    77  							// The next send time should be when the current batch will be
    78  							// forcibly cut due to BatchAgeMax.
    79  							So(b.NextSendTime(), ShouldResemble, start.Add(nextSendTimeOffset))
    80  						}
    81  						So(must(addNoBlockZero(b, clock.Now(ctx), i)), ShouldBeNil)
    82  					}
    83  
    84  					So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 20})
    85  					So(b.Stats().Total(), ShouldResemble, 20)
    86  					So(b.Stats().Empty(), ShouldResemble, false)
    87  					// The next send time should be when the current batch is available to
    88  					// send. this is a test and time hasn't advanced, it's reset
    89  					// back to the start time.
    90  					So(b.NextSendTime(), ShouldEqual, start)
    91  					So(b.CanAddItem(), ShouldBeTrue)
    92  					So(b.unleased.Len(), ShouldEqual, 1)
    93  					So(b.currentBatch, ShouldBeNil)
    94  
    95  					batch := b.LeaseOne(clock.Now(ctx))
    96  					So(b.LeaseOne(clock.Now(ctx)), ShouldBeNil)
    97  
    98  					So(b.stats, ShouldResemble, Stats{LeasedItemCount: 20})
    99  					So(batch.Data, ShouldHaveLength, b.opts.BatchItemsMax)
   100  					for i := range batch.Data {
   101  						So(batch.Data[i].Item, ShouldEqual, i)
   102  					}
   103  
   104  					Convey(`ACK`, func() {
   105  						b.ACK(batch)
   106  
   107  						So(b.stats, ShouldResemble, Stats{})
   108  						So(b.Stats().Total(), ShouldResemble, 0)
   109  						So(b.Stats().Empty(), ShouldResemble, true)
   110  
   111  						Convey(`double ACK panic`, func() {
   112  							So(func() { b.ACK(batch) }, ShouldPanicLike, "unknown *Batch")
   113  							So(b.stats, ShouldResemble, Stats{})
   114  						})
   115  					})
   116  
   117  					Convey(`Partial NACK`, func() {
   118  						batch.Data = batch.Data[:10] // pretend we processed some Data
   119  
   120  						b.NACK(ctx, nil, batch)
   121  
   122  						So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 10})
   123  						So(b.unleased.Len(), ShouldEqual, 1)
   124  
   125  						Convey(`Adding Data does nothing`, func() {
   126  							// no batch yet; the one we NACK'd is sleeping
   127  							So(b.LeaseOne(clock.Now(ctx)), ShouldBeNil)
   128  							tclock.Set(b.NextSendTime())
   129  							newBatch := b.LeaseOne(clock.Now(ctx))
   130  							So(newBatch, ShouldNotBeNil)
   131  
   132  							newBatch.Data = append(newBatch.Data, BatchItem{}, BatchItem{}, BatchItem{})
   133  
   134  							b.NACK(ctx, nil, newBatch)
   135  
   136  							So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 10})
   137  							So(b.unleased.Len(), ShouldEqual, 1)
   138  						})
   139  					})
   140  
   141  					Convey(`Full NACK`, func() {
   142  						b.NACK(ctx, nil, batch)
   143  
   144  						So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 20})
   145  						So(b.unleased.Len(), ShouldEqual, 1)
   146  
   147  						Convey(`double NACK panic`, func() {
   148  							So(func() { b.NACK(ctx, nil, batch) }, ShouldPanicLike, "unknown *Batch")
   149  							So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 20})
   150  							So(b.unleased.Len(), ShouldEqual, 1)
   151  						})
   152  					})
   153  
   154  					Convey(`Max leases limits LeaseOne`, func() {
   155  						now := clock.Now(ctx)
   156  						So(must(addNoBlockZero(b, now, 10)), ShouldBeNil)
   157  						b.Flush(now)
   158  						So(must(addNoBlockZero(b, now, 20)), ShouldBeNil)
   159  						b.Flush(now)
   160  
   161  						So(b.LeaseOne(clock.Now(ctx)), ShouldNotBeNil)
   162  
   163  						// There's something to send
   164  						So(b.NextSendTime(), ShouldNotBeZeroValue)
   165  
   166  						// But lease limit of 2 is hit
   167  						So(b.LeaseOne(clock.Now(ctx)), ShouldBeNil)
   168  					})
   169  				})
   170  
   171  				Convey(`batch cut by time`, func() {
   172  					So(must(addNoBlockZero(b, clock.Now(ctx), "bobbie")), ShouldBeNil)
   173  
   174  					// should be equal to timeout of first batch, plus 1ms
   175  					nextSend := b.NextSendTime()
   176  					So(nextSend, ShouldResemble, start.Add(nextSendTimeOffset))
   177  					tclock.Set(nextSend)
   178  
   179  					So(must(addNoBlockZero(b, clock.Now(ctx), "charlie")), ShouldBeNil)
   180  					So(must(addNoBlockZero(b, clock.Now(ctx), "dakota")), ShouldBeNil)
   181  
   182  					// We haven't leased one yet, so NextSendTime should stay the same.
   183  					nextSend = b.NextSendTime()
   184  					So(nextSend, ShouldResemble, start.Add(nextSendTimeOffset))
   185  
   186  					// Eventually time passes, and we can lease the batch.
   187  					tclock.Set(nextSend)
   188  					batch := b.LeaseOne(clock.Now(ctx))
   189  					So(batch, ShouldNotBeNil)
   190  
   191  					// that batch included everything
   192  					So(b.NextSendTime(), ShouldBeZeroValue)
   193  
   194  					So(batch.Data, ShouldHaveLength, 3)
   195  					So(batch.Data[0].Item, ShouldResemble, "bobbie")
   196  					So(batch.Data[1].Item, ShouldResemble, "charlie")
   197  					So(batch.Data[2].Item, ShouldResemble, "dakota")
   198  				})
   199  
   200  				Convey(`batch cut by flush`, func() {
   201  					So(must(addNoBlockZero(b, clock.Now(ctx), "bobbie")), ShouldBeNil)
   202  					So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 1})
   203  
   204  					So(b.LeaseOne(clock.Now(ctx)), ShouldBeNil)
   205  
   206  					b.Flush(start)
   207  					So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 1})
   208  					So(b.currentBatch, ShouldBeNil)
   209  					So(b.unleased.data, ShouldHaveLength, 1)
   210  
   211  					Convey(`double flush is noop`, func() {
   212  						b.Flush(start)
   213  						So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 1})
   214  						So(b.currentBatch, ShouldBeNil)
   215  						So(b.unleased.data, ShouldHaveLength, 1)
   216  					})
   217  
   218  					batch := b.LeaseOne(clock.Now(ctx))
   219  					So(batch, ShouldNotBeNil)
   220  					So(b.stats, ShouldResemble, Stats{LeasedItemCount: 1})
   221  
   222  					b.ACK(batch)
   223  
   224  					So(b.stats.Empty(), ShouldBeTrue)
   225  				})
   226  			})
   227  
   228  			Convey(`retry limit eventually drops batch`, func() {
   229  				b, err := NewBuffer(&Options{
   230  					BatchItemsMax: 1,
   231  					Retry: func() retry.Iterator {
   232  						return &retry.Limited{Retries: 1}
   233  					},
   234  				})
   235  				So(err, ShouldBeNil)
   236  
   237  				So(must(addNoBlockZero(b, clock.Now(ctx), 1)), ShouldBeNil)
   238  
   239  				b.NACK(ctx, nil, b.LeaseOne(clock.Now(ctx)))
   240  				So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 1})
   241  				b.NACK(ctx, nil, b.LeaseOne(clock.Now(ctx)))
   242  				// only one retry was allowed, start it's gone.
   243  				So(b.stats, ShouldResemble, Stats{})
   244  			})
   245  
   246  			Convey(`in-order delivery`, func() {
   247  				b, err := NewBuffer(&Options{
   248  					MaxLeases:     1,
   249  					BatchItemsMax: 1,
   250  					FullBehavior:  InfiniteGrowth{},
   251  					FIFO:          true,
   252  					Retry: func() retry.Iterator {
   253  						return &retry.Limited{Retries: -1}
   254  					},
   255  				})
   256  				So(err, ShouldBeNil)
   257  
   258  				expect := make([]int, 20)
   259  				for i := 0; i < 20; i++ {
   260  					expect[i] = i
   261  					So(must(addNoBlockZero(b, clock.Now(ctx), i)), ShouldBeNil)
   262  					tclock.Add(time.Millisecond)
   263  				}
   264  
   265  				out := make([]int, 0, 20)
   266  
   267  				// ensure a reasonably random distribution below.
   268  				rand.Seed(time.Now().UnixNano())
   269  
   270  				for !b.Stats().Empty() {
   271  					batch := b.LeaseOne(clock.Now(ctx))
   272  					tclock.Add(time.Millisecond)
   273  
   274  					if rand.Intn(2) == 0 {
   275  						out = append(out, batch.Data[0].Item.(int))
   276  						b.ACK(batch)
   277  					} else {
   278  						b.NACK(ctx, nil, batch)
   279  					}
   280  				}
   281  
   282  				So(out, ShouldResemble, expect)
   283  			})
   284  
   285  			Convey(`batch size`, func() {
   286  				b, err := NewBuffer(&Options{
   287  					FullBehavior:  &BlockNewItems{MaxSize: 150},
   288  					BatchItemsMax: -1,
   289  					BatchSizeMax:  100,
   290  				})
   291  				So(err, ShouldBeNil)
   292  
   293  				t0 := tclock.Now()
   294  
   295  				Convey(`cuts previous batch when adding another item`, func() {
   296  					must(addNoBlockStr(b, t0, "hello, this is a string which is 47 bytes long."))
   297  					must(addNoBlockStr(b, t0, "hello, this is a string which is 47 bytes long."))
   298  
   299  					// Cuts previous batch when adding a new item
   300  					must(addNoBlockStr(b, t0, "hello, this is a string which is 47 bytes long."))
   301  
   302  					Convey(`and cannot exceed FullBehavior`, func() {
   303  						must(addNoBlockStr(b, t0, "this is a kind of long string")) // buffer is now past capacity.
   304  
   305  						_, err := addNoBlockStr(b, t0, "boom time")
   306  						So(err, ShouldErrLike, ErrBufferFull)
   307  					})
   308  
   309  					Convey(`we should see only two items when leasing a batch`, func() {
   310  						// BlockNewItems accounts for addition of too-large item.
   311  						batch := b.LeaseOne(t0)
   312  						So(batch, ShouldNotBeNil)
   313  						So(batch.Data, ShouldHaveLength, 2)
   314  						b.ACK(batch)
   315  					})
   316  				})
   317  
   318  				Convey(`cuts batch if it exactly equals size limit`, func() {
   319  					// Batch is cut if it equals BatchSizeMax after insertion.
   320  					must(addNoBlockStr(b, t0, "hello, this is a string which is 47 bytes long."))
   321  					must(addNoBlockStr(b, t0, "hello, this is a longer string which is 53 bytes long"))
   322  
   323  					batch := b.LeaseOne(t0)
   324  					So(batch, ShouldNotBeNil)
   325  					So(batch.Data, ShouldHaveLength, 2)
   326  					So(batch.Data[1].Item, ShouldContainSubstring, "longer")
   327  					b.ACK(batch)
   328  				})
   329  
   330  				Convey(`too-large items error`, func() {
   331  					_, err := addNoBlockStr(b, t0, strings.Repeat("this is 21 chars long", 5))
   332  					So(err, ShouldErrLike, ErrItemTooLarge)
   333  				})
   334  
   335  				Convey(`too-small items error`, func() {
   336  					// note; it's up to the users to ensure they don't put zero-size
   337  					// items in here. If they really wanted 'empty' items in here, they
   338  					// should still assign them some arbitrary non-zero size (like 1).
   339  					_, err := addNoBlockStr(b, t0, "")
   340  					So(err, ShouldErrLike, ErrItemTooSmall)
   341  				})
   342  
   343  				Convey(`exactly-BatchSizeMax items do a double flush`, func() {
   344  					must(addNoBlockStr(b, t0, "short stuff"))
   345  
   346  					_, err := b.AddNoBlock(t0, "I'm lazy and lying about the size.", b.opts.BatchSizeMax)
   347  					So(err, ShouldBeNil)
   348  					batch := b.LeaseOne(t0)
   349  					So(batch, ShouldNotBeNil)
   350  					So(batch.Data, ShouldHaveLength, 1)
   351  					So(batch.Data[0].Item, ShouldContainSubstring, "short stuff")
   352  					b.ACK(batch)
   353  
   354  					batch = b.LeaseOne(t0)
   355  					So(batch, ShouldNotBeNil)
   356  					So(batch.Data, ShouldHaveLength, 1)
   357  					So(batch.Data[0].Item, ShouldContainSubstring, "lazy and lying")
   358  					b.ACK(batch)
   359  				})
   360  
   361  				Convey(`NACK`, func() {
   362  					Convey(`adds size back`, func() {
   363  						must(addNoBlockStr(b, t0, "stuff"))
   364  
   365  						b.Flush(t0)
   366  						batch := b.LeaseOne(t0)
   367  						So(b.Stats(), ShouldResemble, Stats{LeasedItemCount: 1, LeasedItemSize: 5})
   368  
   369  						// can shrink the batch, too
   370  						batch.Data[0].Size = 1
   371  						b.NACK(ctx, nil, batch)
   372  						So(b.Stats(), ShouldResemble, Stats{UnleasedItemCount: 1, UnleasedItemSize: 1})
   373  					})
   374  				})
   375  			})
   376  
   377  			Convey(`full buffer behavior`, func() {
   378  
   379  				Convey(`DropOldestBatch`, func() {
   380  					b, err := NewBuffer(&Options{
   381  						FullBehavior:  &DropOldestBatch{MaxLiveItems: 1},
   382  						BatchItemsMax: 1,
   383  					})
   384  					So(err, ShouldBeNil)
   385  
   386  					So(must(addNoBlockZero(b, clock.Now(ctx), 0)), ShouldBeNil)
   387  					So(must(addNoBlockZero(b, clock.Now(ctx), 1)), ShouldNotBeNil) // drops 0
   388  
   389  					So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 1}) // full!
   390  					So(b.CanAddItem(), ShouldBeTrue)
   391  
   392  					Convey(`via new data`, func() {
   393  						So(must(addNoBlockZero(b, clock.Now(ctx), 100)), ShouldNotBeNil) // drops 1
   394  						So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 1})         // still full
   395  
   396  						tclock.Set(b.NextSendTime())
   397  
   398  						batch := b.LeaseOne(clock.Now(ctx))
   399  						So(batch, ShouldNotBeNil)
   400  						So(batch.Data, ShouldHaveLength, 1)
   401  						So(batch.Data[0].Item, ShouldEqual, 100)
   402  						So(batch.id, ShouldEqual, 3)
   403  					})
   404  
   405  					Convey(`via NACK`, func() {
   406  						leased := b.LeaseOne(clock.Now(ctx))
   407  						So(leased, ShouldNotBeNil)
   408  						So(b.stats, ShouldResemble, Stats{LeasedItemCount: 1})
   409  
   410  						dropped := must(addNoBlockZero(b, clock.Now(ctx), 100))
   411  						So(dropped, ShouldResemble, leased)
   412  						So(b.stats, ShouldResemble, Stats{
   413  							UnleasedItemCount:      1,
   414  							DroppedLeasedItemCount: 1,
   415  						})
   416  
   417  						dropped = must(addNoBlockZero(b, clock.Now(ctx), 200))
   418  						So(dropped.Data[0].Item, ShouldResemble, 100)
   419  						So(b.stats, ShouldResemble, Stats{
   420  							UnleasedItemCount:      1,
   421  							DroppedLeasedItemCount: 1,
   422  						})
   423  
   424  						b.NACK(ctx, nil, leased)
   425  
   426  						So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 1})
   427  					})
   428  				})
   429  
   430  				Convey(`DropOldestBatch dropping items from the current batch`, func() {
   431  					b, err := NewBuffer(&Options{
   432  						FullBehavior:  &DropOldestBatch{MaxLiveItems: 1},
   433  						BatchItemsMax: -1,
   434  					})
   435  					So(err, ShouldBeNil)
   436  
   437  					So(must(addNoBlockZero(b, clock.Now(ctx), 0)), ShouldBeNil)
   438  					So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 1})
   439  					So(must(addNoBlockZero(b, clock.Now(ctx), 1)), ShouldNotBeNil)
   440  					So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 1})
   441  				})
   442  
   443  				Convey(`BlockNewItems`, func() {
   444  					b, err := NewBuffer(&Options{
   445  						FullBehavior:  &BlockNewItems{MaxItems: 21},
   446  						BatchItemsMax: 20,
   447  					})
   448  					So(err, ShouldBeNil)
   449  
   450  					for i := 0; i < int(b.opts.BatchItemsMax)+1; i++ {
   451  						So(must(addNoBlockZero(b, clock.Now(ctx), i)), ShouldBeNil)
   452  					}
   453  
   454  					So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 21})
   455  					So(b.CanAddItem(), ShouldBeFalse)
   456  
   457  					Convey(`Adding more errors`, func() {
   458  						_, err := addNoBlockZero(b, clock.Now(ctx), 100)
   459  						So(err, ShouldErrLike, ErrBufferFull)
   460  					})
   461  
   462  					Convey(`Leasing a batch still rejects Adds`, func() {
   463  						batch := b.LeaseOne(clock.Now(ctx))
   464  						So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 1, LeasedItemCount: 20})
   465  
   466  						So(b.CanAddItem(), ShouldBeFalse)
   467  						_, err := addNoBlockZero(b, clock.Now(ctx), 100)
   468  						So(err, ShouldErrLike, ErrBufferFull)
   469  
   470  						Convey(`ACK`, func() {
   471  							b.ACK(batch)
   472  							So(b.CanAddItem(), ShouldBeTrue)
   473  							So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 1})
   474  							So(must(addNoBlockZero(b, clock.Now(ctx), 100)), ShouldBeNil)
   475  							So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 2})
   476  						})
   477  
   478  						Convey(`partial NACK`, func() {
   479  							batch.Data = batch.Data[:10]
   480  							b.NACK(ctx, nil, batch)
   481  
   482  							So(b.CanAddItem(), ShouldBeTrue)
   483  							So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 11})
   484  
   485  							for i := 0; i < 10; i++ {
   486  								So(must(addNoBlockZero(b, clock.Now(ctx), 100+i)), ShouldBeNil)
   487  							}
   488  
   489  							So(b.CanAddItem(), ShouldBeFalse)
   490  							So(b.LeaseOne(clock.Now(ctx)), ShouldBeNil) // no batch cut yet
   491  							tclock.Set(b.NextSendTime())
   492  
   493  							So(b.LeaseOne(clock.Now(ctx)), ShouldNotBeNil)
   494  						})
   495  
   496  						Convey(`NACK`, func() {
   497  							b.NACK(ctx, nil, batch)
   498  							So(b.stats, ShouldResemble, Stats{UnleasedItemCount: 21})
   499  							So(b.CanAddItem(), ShouldBeFalse)
   500  						})
   501  
   502  					})
   503  
   504  				})
   505  
   506  			})
   507  		})
   508  	})
   509  }