vitess.io/vitess@v0.16.2/go/vt/vtadmin/cache/cache_test.go (about)

     1  /*
     2  Copyright 2022 The Vitess Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8  	http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package cache_test
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/stretchr/testify/assert"
    26  
    27  	"vitess.io/vitess/go/vt/vtadmin/cache"
    28  )
    29  
    30  type testkey string
    31  
    32  func (k testkey) Key() string { return string(k) }
    33  
    34  func TestBackfillDuplicates(t *testing.T) {
    35  	t.Parallel()
    36  
    37  	tests := []struct {
    38  		name              string
    39  		cfg               cache.Config
    40  		enqueueInterval   time.Duration
    41  		enqueueCount      int
    42  		expectedCallCount int
    43  		assertionMsg      string
    44  	}{
    45  		{
    46  			name: "duplicates too close together",
    47  			cfg: cache.Config{
    48  				BackfillRequestTTL:               time.Hour,
    49  				BackfillRequestDuplicateInterval: time.Second,
    50  			},
    51  			enqueueInterval:   0, // no sleep between enqueues
    52  			enqueueCount:      2,
    53  			expectedCallCount: 1,
    54  			assertionMsg:      "fillFunc should only have been called once for rapid duplicate requests",
    55  		},
    56  		{
    57  			name: "duplicates with enough time passed",
    58  			cfg: cache.Config{
    59  				BackfillRequestTTL:               time.Hour,
    60  				BackfillRequestDuplicateInterval: time.Millisecond,
    61  			},
    62  			enqueueInterval:   time.Millisecond * 5, // sleep longer than the duplicate interval
    63  			enqueueCount:      2,
    64  			expectedCallCount: 2,
    65  			assertionMsg:      "fillFunc should have been called for duplicate requests with enough time between them",
    66  		},
    67  	}
    68  
    69  	for _, tt := range tests {
    70  		tt := tt
    71  		t.Run(tt.name, func(t *testing.T) {
    72  			t.Parallel()
    73  
    74  			var callcount int
    75  			c := cache.New(func(ctx context.Context, req testkey) (any, error) {
    76  				callcount++
    77  				return nil, nil
    78  			}, tt.cfg)
    79  
    80  			key := testkey("testkey")
    81  			for i := 0; i < tt.enqueueCount; i++ {
    82  				if !c.EnqueueBackfill(key) {
    83  					assert.Fail(t, "failed to enqueue backfill for key %s", key)
    84  				}
    85  
    86  				time.Sleep(tt.enqueueInterval)
    87  			}
    88  
    89  			c.Close()
    90  
    91  			assert.Equal(t, tt.expectedCallCount, callcount, tt.assertionMsg)
    92  		})
    93  	}
    94  }
    95  
    96  func TestBackfillTTL(t *testing.T) {
    97  	t.Parallel()
    98  
    99  	tests := []struct {
   100  		name              string
   101  		cfg               cache.Config
   102  		fillSleep         time.Duration
   103  		enqueueCount      int
   104  		expectedCallCount int
   105  		assertionMsg      string
   106  	}{
   107  		{
   108  			name: "within backfill ttl",
   109  			cfg: cache.Config{
   110  				BackfillRequestTTL:               time.Millisecond * 50,
   111  				BackfillRequestDuplicateInterval: time.Second,
   112  				BackfillEnqueueWaitTime:          time.Second,
   113  			},
   114  			fillSleep:         time.Millisecond * 10,
   115  			enqueueCount:      2,
   116  			expectedCallCount: 1,
   117  			assertionMsg:      "both requests are within TTL and should have been filled",
   118  		},
   119  		{
   120  			name: "backfill ttl exceeded",
   121  			cfg: cache.Config{
   122  				BackfillRequestTTL:               time.Millisecond * 10,
   123  				BackfillRequestDuplicateInterval: time.Millisecond,
   124  				BackfillEnqueueWaitTime:          time.Second,
   125  			},
   126  			fillSleep:         time.Millisecond * 50,
   127  			enqueueCount:      2,
   128  			expectedCallCount: 1,
   129  			assertionMsg:      "second request exceeds TTL and should not have been filled",
   130  		},
   131  	}
   132  
   133  	for _, tt := range tests {
   134  		tt := tt
   135  		t.Run(tt.name, func(t *testing.T) {
   136  			t.Parallel()
   137  
   138  			var callcount int
   139  			c := cache.New(func(ctx context.Context, req testkey) (any, error) {
   140  				time.Sleep(tt.fillSleep) // make fills take time so that requests in the queue can exceed ttl
   141  				callcount++
   142  				return nil, nil
   143  			}, tt.cfg)
   144  
   145  			key := testkey("testkey")
   146  			for i := 0; i < tt.enqueueCount; i++ {
   147  				if !c.EnqueueBackfill(key) {
   148  					assert.Fail(t, "failed to enqueue backfill for key %s", key)
   149  				}
   150  			}
   151  
   152  			time.Sleep(tt.fillSleep * time.Duration(tt.enqueueCount))
   153  			c.Close()
   154  
   155  			assert.Equal(t, tt.expectedCallCount, callcount, tt.assertionMsg)
   156  		})
   157  	}
   158  }
   159  
   160  type intkey int
   161  
   162  func (k intkey) Key() string { return fmt.Sprintf("%d", int(k)) }
   163  
   164  func TestEnqueueBackfillTimeout(t *testing.T) {
   165  	t.Parallel()
   166  
   167  	var callcount int
   168  	c := cache.New(func(ctx context.Context, req intkey) (any, error) {
   169  		time.Sleep(time.Millisecond * 50) // make fills take time so that the second enqueue exceeds WaitTimeout
   170  		callcount++
   171  		return nil, nil
   172  	}, cache.Config{
   173  		BackfillEnqueueWaitTime: time.Millisecond * 10,
   174  	})
   175  
   176  	var enqueues = []struct {
   177  		shouldFail bool
   178  		msg        string
   179  	}{
   180  		{
   181  			shouldFail: false,
   182  			msg:        "not exceed",
   183  		},
   184  		{
   185  			shouldFail: true,
   186  			msg:        "exceed",
   187  		},
   188  	}
   189  	for i, q := range enqueues {
   190  		ok := c.EnqueueBackfill(intkey(i))
   191  		assert.Equal(t, q.shouldFail, !ok, "enqueue should %s wait timeout", q.msg)
   192  	}
   193  }