go.chromium.org/luci@v0.0.0-20240309015107-7cdc2e660f33/common/sync/dispatcher/channel_test.go (about)

     1  // Copyright 2019 The LUCI Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package dispatcher
    16  
    17  import (
    18  	"context"
    19  	"fmt"
    20  	"sort"
    21  	"strings"
    22  	"sync"
    23  	"testing"
    24  	"time"
    25  
    26  	"go.chromium.org/luci/common/clock"
    27  	"go.chromium.org/luci/common/clock/testclock"
    28  	"go.chromium.org/luci/common/errors"
    29  	"go.chromium.org/luci/common/logging"
    30  	"go.chromium.org/luci/common/logging/gologger"
    31  	"go.chromium.org/luci/common/retry/transient"
    32  	"go.chromium.org/luci/common/sync/dispatcher/buffer"
    33  	"golang.org/x/time/rate"
    34  
    35  	. "github.com/smartystreets/goconvey/convey"
    36  	. "go.chromium.org/luci/common/testing/assertions"
    37  )
    38  
    39  func dummySendFn(*buffer.Batch) error { return nil }
    40  
    41  func noDrop(dropped *buffer.Batch, flush bool) {
    42  	if flush {
    43  		return
    44  	}
    45  	panic(fmt.Sprintf("dropping %+v", dropped))
    46  }
    47  
    48  func dbgIfVerbose(ctx context.Context) (context.Context, func(string, ...any)) {
    49  	if testing.Verbose() {
    50  		ctx = logging.SetLevel(gologger.StdConfig.Use(ctx), logging.Debug)
    51  		return ctx, logging.Get(logging.SetField(ctx, "dispatcher.coordinator", true)).Infof
    52  	}
    53  	return ctx, func(string, ...any) {}
    54  }
    55  
    56  func TestChannelConstruction(t *testing.T) {
    57  	Convey(`Channel`, t, func() {
    58  		ctx, _ := testclock.UseTime(context.Background(), testclock.TestRecentTimeUTC)
    59  		ctx, dbg := dbgIfVerbose(ctx)
    60  		ctx, cancel := context.WithCancel(ctx)
    61  		defer cancel()
    62  
    63  		Convey(`construction`, func() {
    64  
    65  			Convey(`success`, func() {
    66  				ch, err := NewChannel(ctx, &Options{testingDbg: dbg}, dummySendFn)
    67  				So(err, ShouldBeNil)
    68  				ch.Close()
    69  				<-ch.DrainC
    70  			})
    71  
    72  			Convey(`failure`, func() {
    73  				Convey(`bad SendFn`, func() {
    74  					_, err := NewChannel(ctx, nil, nil)
    75  					So(err, ShouldErrLike, "send is required")
    76  				})
    77  
    78  				Convey(`bad Options`, func() {
    79  					_, err := NewChannel(ctx, &Options{
    80  						QPSLimit: rate.NewLimiter(100, 0),
    81  					}, dummySendFn)
    82  					So(err, ShouldErrLike, "normalizing dispatcher.Options")
    83  				})
    84  
    85  				Convey(`bad Options.Buffer`, func() {
    86  					_, err := NewChannel(ctx, &Options{
    87  						Buffer: buffer.Options{
    88  							BatchItemsMax: -3,
    89  						},
    90  					}, dummySendFn)
    91  					So(err, ShouldErrLike, "allocating Buffer")
    92  				})
    93  			})
    94  
    95  		})
    96  
    97  	})
    98  
    99  }
   100  
   101  func TestSerialSenderWithoutDrops(t *testing.T) {
   102  	Convey(`serial world-state sender without drops`, t, func(cvctx C) {
   103  		ctx, tclock := testclock.UseTime(context.Background(), testclock.TestRecentTimeUTC)
   104  		ctx, dbg := dbgIfVerbose(ctx)
   105  
   106  		sentBatches := []string{}
   107  		enableThisError := false
   108  
   109  		ch, err := NewChannel(ctx, &Options{
   110  			DropFn:   noDrop,
   111  			QPSLimit: rate.NewLimiter(rate.Inf, 0),
   112  			Buffer: buffer.Options{
   113  				MaxLeases:     1,
   114  				BatchItemsMax: 1,
   115  				FullBehavior:  &buffer.BlockNewItems{MaxItems: 10},
   116  			},
   117  			testingDbg: dbg,
   118  		}, func(batch *buffer.Batch) (err error) {
   119  			cvctx.So(batch.Data, ShouldHaveLength, 1)
   120  			str := batch.Data[0].Item.(string)
   121  			if enableThisError && str == "This" {
   122  				enableThisError = false
   123  				return errors.New("narp", transient.Tag)
   124  			}
   125  			sentBatches = append(sentBatches, str)
   126  			if str == "test." {
   127  				defaultRetryAmount := buffer.Defaults.Retry().Next(ctx, nil)
   128  				tclock.Set(tclock.Now().Add(defaultRetryAmount))
   129  			}
   130  			return nil
   131  		})
   132  		So(err, ShouldBeNil)
   133  		defer ch.CloseAndDrain(ctx)
   134  
   135  		Convey(`no errors`, func() {
   136  			ch.C <- "Hello"
   137  			ch.C <- "World!"
   138  			ch.C <- "This"
   139  			ch.C <- "is"
   140  			ch.C <- "a"
   141  			ch.C <- "test."
   142  			ch.CloseAndDrain(ctx)
   143  
   144  			So(sentBatches, ShouldResemble, []string{
   145  				"Hello", "World!",
   146  				"This", "is", "a", "test.",
   147  			})
   148  		})
   149  
   150  		Convey(`error and retry`, func() {
   151  			enableThisError = true
   152  
   153  			ch.C <- "Hello"
   154  			ch.C <- "World!"
   155  			ch.C <- "This"
   156  			ch.C <- "is"
   157  			ch.C <- "a"
   158  			ch.C <- "test."
   159  			ch.CloseAndDrain(ctx)
   160  
   161  			So(sentBatches, ShouldResemble, []string{
   162  				"Hello", "World!",
   163  				"is", "a", "test.", "This",
   164  			})
   165  		})
   166  
   167  	})
   168  }
   169  
   170  func TestContextShutdown(t *testing.T) {
   171  	Convey(`context cancelation ends channel`, t, func(cvctx C) {
   172  		ctx, _ := testclock.UseTime(context.Background(), testclock.TestRecentTimeUTC)
   173  		ctx, dbg := dbgIfVerbose(ctx)
   174  		cctx, cancel := context.WithCancel(ctx)
   175  
   176  		sentBatches := []string{}
   177  		droppedBatches := []string{}
   178  
   179  		ch, err := NewChannel(cctx, &Options{
   180  			QPSLimit: rate.NewLimiter(rate.Inf, 0),
   181  			DropFn: func(dropped *buffer.Batch, flush bool) {
   182  				if flush {
   183  					return
   184  				}
   185  				droppedBatches = append(droppedBatches, dropped.Data[0].Item.(string))
   186  			},
   187  			Buffer: buffer.Options{
   188  				MaxLeases:     1,
   189  				BatchItemsMax: 1,
   190  				FullBehavior:  &buffer.BlockNewItems{MaxItems: 2},
   191  			},
   192  			testingDbg: dbg,
   193  		}, func(batch *buffer.Batch) (err error) {
   194  			sentBatches = append(sentBatches, batch.Data[0].Item.(string))
   195  			<-cctx.Done()
   196  			return
   197  		})
   198  		So(err, ShouldBeNil)
   199  
   200  		ch.C <- "hey"
   201  		ch.C <- "buffered"
   202  		select {
   203  		case ch.C <- "blocked":
   204  			panic("channel should have been blocked")
   205  		case <-time.After(time.Millisecond):
   206  			// OK
   207  		}
   208  
   209  		cancel()
   210  		ch.C <- "IGNORE ME" // canceled channel can be written to, but is dropped
   211  
   212  		ch.CloseAndDrain(ctx)
   213  
   214  		So(sentBatches, ShouldContain, "hey")
   215  		So(droppedBatches, ShouldContain, "buffered")
   216  		So(droppedBatches, ShouldContain, "IGNORE ME")
   217  	})
   218  }
   219  
   220  func TestQPSLimit(t *testing.T) {
   221  	Convey(`QPS limited send`, t, func() {
   222  		ctx := context.Background() // uses real time!
   223  		ctx, dbg := dbgIfVerbose(ctx)
   224  
   225  		sentBatches := []int{}
   226  
   227  		ch, err := NewChannel(ctx, &Options{
   228  			QPSLimit: rate.NewLimiter(rate.Every(10*time.Millisecond), 1),
   229  			DropFn:   noDrop,
   230  			Buffer: buffer.Options{
   231  				MaxLeases:     1,
   232  				BatchItemsMax: 1,
   233  				FullBehavior:  &buffer.BlockNewItems{MaxItems: 20},
   234  			},
   235  			testingDbg: dbg,
   236  		}, func(batch *buffer.Batch) (err error) {
   237  			sentBatches = append(sentBatches, batch.Data[0].Item.(int))
   238  			return
   239  		})
   240  		So(err, ShouldBeNil)
   241  
   242  		expected := []int{}
   243  
   244  		start := time.Now()
   245  		for i := 0; i < 20; i++ {
   246  			ch.C <- i
   247  			expected = append(expected, i)
   248  		}
   249  		ch.CloseAndDrain(ctx)
   250  		end := time.Now()
   251  
   252  		So(sentBatches, ShouldResemble, expected)
   253  
   254  		// 20 batches, minus a batch because the QPSLimiter starts with full tokens.
   255  		minThreshold := 19 * 10 * time.Millisecond
   256  		So(end, ShouldHappenAfter, start.Add(minThreshold))
   257  	})
   258  }
   259  
   260  func TestQPSLimitParallel(t *testing.T) {
   261  	Convey(`QPS limited send (parallel)`, t, func() {
   262  		ctx := context.Background() // uses real time!
   263  		ctx, dbg := dbgIfVerbose(ctx)
   264  
   265  		var lock sync.Mutex
   266  		sentBatches := []int{}
   267  
   268  		ch, err := NewChannel(ctx, &Options{
   269  			QPSLimit: rate.NewLimiter(rate.Every(10*time.Millisecond), 10),
   270  			DropFn:   noDrop,
   271  			Buffer: buffer.Options{
   272  				MaxLeases:     4,
   273  				BatchItemsMax: 1,
   274  				FullBehavior:  &buffer.BlockNewItems{MaxItems: 20},
   275  			},
   276  			testingDbg: dbg,
   277  		}, func(batch *buffer.Batch) (err error) {
   278  			lock.Lock()
   279  			sentBatches = append(sentBatches, batch.Data[0].Item.(int))
   280  			lock.Unlock()
   281  			return
   282  		})
   283  		So(err, ShouldBeNil)
   284  
   285  		start := time.Now()
   286  		for i := 0; i < 20; i++ {
   287  			ch.C <- i
   288  		}
   289  		ch.CloseAndDrain(ctx)
   290  		end := time.Now()
   291  
   292  		// We know it should have 20 things, but the order will be pseudo-random
   293  		So(sentBatches, ShouldHaveLength, 20)
   294  
   295  		// 20 batches across 4 workers, minus half a batch for sampling error.
   296  		minThreshold := 5*10*time.Millisecond - 5*time.Millisecond
   297  
   298  		So(end, ShouldHappenAfter, start.Add(minThreshold))
   299  	})
   300  }
   301  
   302  func TestExplicitDrops(t *testing.T) {
   303  	Convey(`explict drops with ErrorFn`, t, func() {
   304  		ctx := context.Background() // uses real time!
   305  		ctx, dbg := dbgIfVerbose(ctx)
   306  
   307  		sentBatches := []int{}
   308  		droppedBatches := []int{}
   309  
   310  		ch, err := NewChannel(ctx, &Options{
   311  			QPSLimit: rate.NewLimiter(rate.Inf, 0),
   312  			DropFn: func(batch *buffer.Batch, flush bool) {
   313  				if flush {
   314  					return
   315  				}
   316  				droppedBatches = append(droppedBatches, batch.Data[0].Item.(int))
   317  			},
   318  			ErrorFn: func(batch *buffer.Batch, err error) (retry bool) {
   319  				return false
   320  			},
   321  			Buffer: buffer.Options{
   322  				MaxLeases:     1,
   323  				BatchItemsMax: 1,
   324  				FullBehavior:  &buffer.BlockNewItems{MaxItems: 20},
   325  			},
   326  			testingDbg: dbg,
   327  		}, func(batch *buffer.Batch) (err error) {
   328  			itm := batch.Data[0].Item.(int)
   329  			if itm%2 == 0 {
   330  				err = errors.New("number is even")
   331  			} else {
   332  				sentBatches = append(sentBatches, itm)
   333  			}
   334  			return
   335  		})
   336  		So(err, ShouldBeNil)
   337  
   338  		for i := 0; i < 20; i++ {
   339  			ch.C <- i
   340  		}
   341  		ch.CloseAndDrain(ctx)
   342  
   343  		So(sentBatches, ShouldResemble, []int{1, 3, 5, 7, 9, 11, 13, 15, 17, 19})
   344  		So(droppedBatches, ShouldResemble, []int{0, 2, 4, 6, 8, 10, 12, 14, 16, 18})
   345  	})
   346  }
   347  
   348  func TestImplicitDrops(t *testing.T) {
   349  	Convey(`implicit drops with DropOldestBatch`, t, func(cvctx C) {
   350  		ctx := context.Background() // uses real time!
   351  		ctx, dbg := dbgIfVerbose(ctx)
   352  
   353  		sentBatches := []int{}
   354  		sendBlocker := make(chan struct{})
   355  
   356  		limiter := rate.NewLimiter(rate.Every(100*time.Millisecond), 1)
   357  		ch, err := NewChannel(ctx, &Options{
   358  			QPSLimit: limiter,
   359  			Buffer: buffer.Options{
   360  				MaxLeases:     1,
   361  				BatchItemsMax: 1,
   362  				FullBehavior:  &buffer.DropOldestBatch{MaxLiveItems: 1},
   363  			},
   364  			testingDbg: dbg,
   365  		}, func(batch *buffer.Batch) (err error) {
   366  			sentBatches = append(sentBatches, batch.Data[0].Item.(int))
   367  			<-sendBlocker
   368  			return
   369  		})
   370  		So(err, ShouldBeNil)
   371  		// Grab the first token; channel can't send until it recharges.
   372  		limiter.Reserve()
   373  
   374  		// Stuff a bunch of crap into the channel. We have 100ms to do this until
   375  		// the channel is able to send something. Should be plenty of time (running
   376  		// this on my laptop takes 3-4ms with verbose logs).
   377  		for i := 0; i < 20; i++ {
   378  			ch.C <- i
   379  		}
   380  		// At this point we can start draining the channel.
   381  		close(ch.C)
   382  		// then unblock the sender
   383  		close(sendBlocker)
   384  		// Then wait for the channel to drain
   385  		<-ch.DrainC
   386  
   387  		// We should only have seen one batch actually sent.
   388  		So(sentBatches, ShouldHaveLength, 1)
   389  	})
   390  }
   391  
   392  func TestContextCancel(t *testing.T) {
   393  	Convey(`can use context cancelation for termination`, t, func() {
   394  		ctx := context.Background() // uses real time!
   395  		ctx, dbg := dbgIfVerbose(ctx)
   396  		ctx, cancel := context.WithCancel(ctx)
   397  		defer cancel()
   398  
   399  		ch, err := NewChannel(ctx, &Options{
   400  			QPSLimit: rate.NewLimiter(rate.Inf, 0),
   401  			Buffer: buffer.Options{
   402  				MaxLeases:     1,
   403  				BatchItemsMax: 1,
   404  				FullBehavior:  &buffer.BlockNewItems{MaxItems: 20},
   405  			},
   406  			testingDbg: dbg,
   407  		}, func(batch *buffer.Batch) (err error) {
   408  			// doesn't matter :)
   409  			return
   410  		})
   411  		So(err, ShouldBeNil)
   412  
   413  		writerDone := make(chan struct{})
   414  		go func() {
   415  			defer close(writerDone)
   416  			i := 0
   417  			for {
   418  				select {
   419  				case ch.C <- i:
   420  				case <-ctx.Done():
   421  					return
   422  				}
   423  				i++
   424  			}
   425  		}()
   426  		cancel()
   427  
   428  		<-writerDone
   429  
   430  		close(ch.C) // still responsible for closing C
   431  		<-ch.DrainC // everything shuts down now
   432  	})
   433  }
   434  
   435  func TestDrainedFn(t *testing.T) {
   436  	Convey(`can set DrainedFn to do exactly-once termination tasks`, t, func() {
   437  		ctx := context.Background() // uses real time!
   438  		ctx, dbg := dbgIfVerbose(ctx)
   439  		ctx, cancel := context.WithCancel(ctx)
   440  		defer cancel()
   441  
   442  		amDrained := false
   443  
   444  		ch, err := NewChannel(ctx, &Options{
   445  			DrainedFn:  func() { amDrained = true },
   446  			testingDbg: dbg,
   447  		}, func(batch *buffer.Batch) (err error) {
   448  			// doesn't matter :)
   449  			return
   450  		})
   451  		So(err, ShouldBeNil)
   452  
   453  		ch.Close()
   454  		<-ch.DrainC
   455  		So(amDrained, ShouldBeTrue)
   456  	})
   457  }
   458  
   459  func TestCloseDeadlockRegression(t *testing.T) {
   460  	// This is a regression test for crbug.com/1006623
   461  	//
   462  	// A single run of the test, even with the broken code, doesn't reliably
   463  	// reproduce it. However, running the test ~10 times seems to be VERY likely
   464  	// to catch the deadlock at least once. We could make the test 100% likely to
   465  	// catch the race, but it would involve adding extra synchronization channels
   466  	// to the production code, which makes us nervous :).
   467  	//
   468  	// This code should never hang if the coordinator code is correct.
   469  	for i := 0; i < 10; i++ {
   470  		Convey(fmt.Sprintf(`ensure that the channel can shutdown cleanly (%d)`, i), t, func() {
   471  			ctx := context.Background() // uses real time!
   472  			ctx, dbg := dbgIfVerbose(ctx)
   473  			ctx, cancel := context.WithCancel(ctx)
   474  			defer cancel()
   475  
   476  			inSendFn := make(chan struct{})
   477  			holdSendFn := make(chan struct{})
   478  
   479  			ch, err := NewChannel(ctx, &Options{
   480  				testingDbg: dbg,
   481  				Buffer: buffer.Options{
   482  					MaxLeases:     1,
   483  					BatchItemsMax: 1,
   484  					FullBehavior: &buffer.DropOldestBatch{
   485  						MaxLiveItems: 1,
   486  					},
   487  				},
   488  				QPSLimit: rate.NewLimiter(rate.Inf, 1),
   489  			}, func(batch *buffer.Batch) (err error) {
   490  				inSendFn <- struct{}{}
   491  				<-holdSendFn
   492  				return
   493  			})
   494  			So(err, ShouldBeNil)
   495  
   496  			ch.C <- nil
   497  			// Now ensure we're in the send function
   498  			<-inSendFn
   499  
   500  			ch.C <- nil // this will go into UnleasedItemCount
   501  
   502  			// While still in the send function, cancel the context and close the
   503  			// channel.
   504  			cancel()
   505  			ch.Close()
   506  
   507  			// Now unblock the send function
   508  			close(holdSendFn)
   509  
   510  			// We should drain properly
   511  			<-ch.DrainC
   512  		})
   513  	}
   514  }
   515  
   516  func TestCorrectTimerUsage(t *testing.T) {
   517  	t.Parallel()
   518  
   519  	Convey(`Correct use of Timer.Reset`, t, func(cvctx C) {
   520  		ctx, tclock := testclock.UseTime(context.Background(), testclock.TestRecentTimeUTC)
   521  		ctx, dbg := dbgIfVerbose(ctx)
   522  		tclock.SetTimerCallback(func(d time.Duration, t clock.Timer) {
   523  			switch {
   524  			case testclock.HasTags(t, "coordinator") || testclock.HasTags(t, "test-itself"):
   525  				logging.Debugf(ctx, "unblocking %s", testclock.GetTags(t))
   526  				tclock.Add(d)
   527  			}
   528  		})
   529  
   530  		mu := sync.Mutex{}
   531  		sent := []int{}
   532  
   533  		ch, err := NewChannel(ctx, &Options{
   534  			DropFn: noDrop,
   535  			Buffer: buffer.Options{
   536  				MaxLeases:     10,
   537  				BatchItemsMax: 3,
   538  				BatchAgeMax:   time.Second,
   539  				FullBehavior:  &buffer.BlockNewItems{MaxItems: 15},
   540  			},
   541  			testingDbg: dbg,
   542  		}, func(batch *buffer.Batch) (err error) {
   543  			// Add randomish delays.
   544  			timer := clock.NewTimer(clock.Tag(ctx, "test-itself"))
   545  			timer.Reset(time.Millisecond)
   546  			<-timer.GetC()
   547  
   548  			mu.Lock()
   549  			for i := range batch.Data {
   550  				sent = append(sent, batch.Data[i].Item.(int))
   551  			}
   552  			mu.Unlock()
   553  			return nil
   554  		})
   555  		So(err, ShouldBeNil)
   556  
   557  		const N = 100
   558  		for i := 1; i <= N; i++ {
   559  			ch.C <- i
   560  		}
   561  		// Must not hang when tried with
   562  		//     go test -race -run TestCorrectTimerUsage -failfast -count 1000 -timeout 20s
   563  		//
   564  		// NOTE: there may be failure not due to a deadlock, but due to garbage
   565  		// collection taking too long, after lots of iterations. You can either
   566  		// examine the stack traces or bump the timeout and observe if it increases
   567  		// the number of iterations before failure.
   568  		ch.CloseAndDrain(ctx)
   569  		So(sent, ShouldHaveLength, N)
   570  		sort.Ints(sent)
   571  		for i := 1; i <= N; i++ {
   572  			So(sent[i-1], ShouldEqual, i)
   573  		}
   574  	})
   575  }
   576  
   577  func TestSizeBasedChannel(t *testing.T) {
   578  	t.Parallel()
   579  
   580  	Convey(`Size based channel`, t, func(cvctx C) {
   581  		ctx := context.Background() // uses real time!
   582  		ctx, dbg := dbgIfVerbose(ctx)
   583  		ctx, cancel := context.WithCancel(ctx)
   584  		defer cancel()
   585  
   586  		var mu sync.Mutex
   587  		var needUnlock bool
   588  		defer func() {
   589  			if needUnlock {
   590  				mu.Unlock()
   591  			}
   592  		}()
   593  		var out []string
   594  		var fails []*buffer.Batch
   595  		var errs []error
   596  
   597  		opts := &Options{
   598  			testingDbg: dbg,
   599  			ItemSizeFunc: func(itm any) int {
   600  				return len(itm.(string))
   601  			},
   602  			ErrorFn: func(failedBatch *buffer.Batch, err error) (retry bool) {
   603  				fails = append(fails, failedBatch)
   604  				errs = append(errs, err)
   605  				return false
   606  			},
   607  			Buffer: buffer.Options{
   608  				MaxLeases:     1,
   609  				BatchItemsMax: -1,
   610  				BatchSizeMax:  100,
   611  				FullBehavior:  &buffer.BlockNewItems{},
   612  			},
   613  			QPSLimit: rate.NewLimiter(rate.Inf, 1),
   614  		}
   615  
   616  		ch, err := NewChannel(ctx, opts, func(batch *buffer.Batch) (err error) {
   617  			mu.Lock()
   618  			defer mu.Unlock()
   619  			for _, itm := range batch.Data {
   620  				out = append(out, itm.Item.(string))
   621  			}
   622  			return nil
   623  		})
   624  		So(err, ShouldBeNil)
   625  
   626  		bigString := strings.Repeat("something.", 5) // 50 bytes
   627  
   628  		mu.Lock()
   629  		needUnlock = true
   630  
   631  		for i := 0; i < 10; i++ {
   632  			ch.C <- bigString
   633  		}
   634  
   635  		select {
   636  		case ch.C <- "extra string":
   637  			So(true, ShouldBeFalse) // shouldn't be able to push more
   638  		case <-clock.After(ctx, 250*time.Millisecond):
   639  		}
   640  
   641  		mu.Unlock()
   642  		needUnlock = false
   643  
   644  		select {
   645  		case ch.C <- "extra string": // no problem now
   646  		case <-clock.After(ctx, 250*time.Millisecond):
   647  			So(true, ShouldBeFalse)
   648  		}
   649  
   650  		// pushing a giant object in will end up going to ErrorFn
   651  		ch.C <- strings.Repeat("something.", 50) // 500 bytes
   652  		// pushing an empty object (without having ItemSizeFunc assign it
   653  		// a non-zero arbitrary size) goes to ErrorFn.
   654  		ch.C <- ""
   655  
   656  		ch.CloseAndDrain(ctx)
   657  
   658  		So(fails, ShouldHaveLength, 2)
   659  		So(fails[0].Data, ShouldHaveLength, 1)
   660  		So(fails[0].Data[0].Item, ShouldHaveLength, 500)
   661  		So(fails[0].Data[0].Size, ShouldEqual, 500)
   662  		So(fails[1].Data, ShouldHaveLength, 1)
   663  		So(fails[1].Data[0].Item, ShouldHaveLength, 0)
   664  		So(fails[1].Data[0].Size, ShouldEqual, 0)
   665  		So(errs[0], ShouldErrLike, buffer.ErrItemTooLarge)
   666  		So(errs[1], ShouldErrLike, buffer.ErrItemTooSmall)
   667  
   668  		So(out, ShouldHaveLength, 11)
   669  		So(out[len(out)-1], ShouldResemble, "extra string")
   670  	})
   671  }
   672  
   673  func TestMinQPS(t *testing.T) {
   674  	t.Parallel()
   675  	Convey(`TestMinQPS`, t, func() {
   676  		Convey(`send w/ minimal frequency`, func() {
   677  			ctx := context.Background() // uses real time!
   678  			ctx, dbg := dbgIfVerbose(ctx)
   679  
   680  			numNilBatches := 0
   681  
   682  			ch, err := NewChannel(ctx, &Options{
   683  				MinQPS: rate.Every(100 * time.Millisecond),
   684  				DropFn: noDrop,
   685  				Buffer: buffer.Options{
   686  					MaxLeases:     1,
   687  					BatchItemsMax: 1,
   688  					FullBehavior:  &buffer.BlockNewItems{MaxItems: 20},
   689  				},
   690  				testingDbg: dbg,
   691  			}, func(batch *buffer.Batch) (err error) {
   692  				if batch == nil {
   693  					numNilBatches++
   694  				}
   695  				return
   696  			})
   697  			So(err, ShouldBeNil)
   698  
   699  			for i := 0; i < 20; i++ {
   700  				switch i {
   701  				case 9:
   702  					time.Sleep(2 * time.Second) // to make a gap that ch is empty.
   703  				}
   704  				ch.C <- i
   705  			}
   706  			ch.CloseAndDrain(ctx)
   707  			So(numNilBatches, ShouldBeGreaterThan, 0)
   708  		})
   709  
   710  		Convey(`send w/ minimal frequency non block`, func() {
   711  			ctx := context.Background() // uses real time!
   712  			ctx, dbg := dbgIfVerbose(ctx)
   713  
   714  			mu := sync.Mutex{}
   715  			numNilBatch := 0
   716  			minWaitDuration := 100 * time.Millisecond
   717  
   718  			ch, err := NewChannel(ctx, &Options{
   719  				MinQPS: rate.Every(minWaitDuration),
   720  				DropFn: noDrop,
   721  				Buffer: buffer.Options{
   722  					MaxLeases:     1,
   723  					BatchItemsMax: 1,
   724  					FullBehavior:  &buffer.BlockNewItems{MaxItems: 20},
   725  				},
   726  				testingDbg: dbg,
   727  			}, func(batch *buffer.Batch) (err error) {
   728  				mu.Lock()
   729  				if batch == nil {
   730  					numNilBatch++
   731  				}
   732  				mu.Unlock()
   733  				time.Sleep(200 * time.Millisecond)
   734  				return
   735  			})
   736  			So(err, ShouldBeNil)
   737  
   738  			for i := 0; i < 20; i++ {
   739  				ch.C <- i
   740  			}
   741  			ch.CloseAndDrain(ctx)
   742  
   743  			So(numNilBatch, ShouldEqual, 0)
   744  		})
   745  	})
   746  }