github.com/outbrain/consul@v1.4.5/agent/cache/cache_test.go (about)

     1  package cache
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"sort"
     7  	"sync"
     8  	"sync/atomic"
     9  	"testing"
    10  	"time"
    11  
    12  	"github.com/stretchr/testify/assert"
    13  	"github.com/stretchr/testify/mock"
    14  	"github.com/stretchr/testify/require"
    15  )
    16  
    17  // Test a basic Get with no indexes (and therefore no blocking queries).
    18  func TestCacheGet_noIndex(t *testing.T) {
    19  	t.Parallel()
    20  
    21  	require := require.New(t)
    22  
    23  	typ := TestType(t)
    24  	defer typ.AssertExpectations(t)
    25  	c := TestCache(t)
    26  	c.RegisterType("t", typ, nil)
    27  
    28  	// Configure the type
    29  	typ.Static(FetchResult{Value: 42}, nil).Times(1)
    30  
    31  	// Get, should fetch
    32  	req := TestRequest(t, RequestInfo{Key: "hello"})
    33  	result, meta, err := c.Get("t", req)
    34  	require.NoError(err)
    35  	require.Equal(42, result)
    36  	require.False(meta.Hit)
    37  
    38  	// Get, should not fetch since we already have a satisfying value
    39  	result, meta, err = c.Get("t", req)
    40  	require.NoError(err)
    41  	require.Equal(42, result)
    42  	require.True(meta.Hit)
    43  
    44  	// Sleep a tiny bit just to let maybe some background calls happen
    45  	// then verify that we still only got the one call
    46  	time.Sleep(20 * time.Millisecond)
    47  	typ.AssertExpectations(t)
    48  }
    49  
    50  // Test a basic Get with no index and a failed fetch.
    51  func TestCacheGet_initError(t *testing.T) {
    52  	t.Parallel()
    53  
    54  	require := require.New(t)
    55  
    56  	typ := TestType(t)
    57  	defer typ.AssertExpectations(t)
    58  	c := TestCache(t)
    59  	c.RegisterType("t", typ, nil)
    60  
    61  	// Configure the type
    62  	fetcherr := fmt.Errorf("error")
    63  	typ.Static(FetchResult{}, fetcherr).Times(2)
    64  
    65  	// Get, should fetch
    66  	req := TestRequest(t, RequestInfo{Key: "hello"})
    67  	result, meta, err := c.Get("t", req)
    68  	require.Error(err)
    69  	require.Nil(result)
    70  	require.False(meta.Hit)
    71  
    72  	// Get, should fetch again since our last fetch was an error
    73  	result, meta, err = c.Get("t", req)
    74  	require.Error(err)
    75  	require.Nil(result)
    76  	require.False(meta.Hit)
    77  
    78  	// Sleep a tiny bit just to let maybe some background calls happen
    79  	// then verify that we still only got the one call
    80  	time.Sleep(20 * time.Millisecond)
    81  	typ.AssertExpectations(t)
    82  }
    83  
    84  // Test a cached error is replaced by a successful result. See
    85  // https://github.com/hashicorp/consul/issues/4480
    86  func TestCacheGet_cachedErrorsDontStick(t *testing.T) {
    87  	t.Parallel()
    88  
    89  	require := require.New(t)
    90  
    91  	typ := TestType(t)
    92  	defer typ.AssertExpectations(t)
    93  	c := TestCache(t)
    94  	c.RegisterType("t", typ, nil)
    95  
    96  	// Configure the type
    97  	fetcherr := fmt.Errorf("initial error")
    98  	// First fetch errors, subsequent fetches are successful and then block
    99  	typ.Static(FetchResult{}, fetcherr).Times(1)
   100  	typ.Static(FetchResult{Value: 42, Index: 123}, nil).Times(1)
   101  	// We trigger this to return same value to simulate a timeout.
   102  	triggerCh := make(chan time.Time)
   103  	typ.Static(FetchResult{Value: 42, Index: 123}, nil).WaitUntil(triggerCh)
   104  
   105  	// Get, should fetch and get error
   106  	req := TestRequest(t, RequestInfo{Key: "hello"})
   107  	result, meta, err := c.Get("t", req)
   108  	require.Error(err)
   109  	require.Nil(result)
   110  	require.False(meta.Hit)
   111  
   112  	// Get, should fetch again since our last fetch was an error, but get success
   113  	result, meta, err = c.Get("t", req)
   114  	require.NoError(err)
   115  	require.Equal(42, result)
   116  	require.False(meta.Hit)
   117  
   118  	// Now get should block until timeout and then get the same response NOT the
   119  	// cached error.
   120  	getCh1 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{
   121  		Key:      "hello",
   122  		MinIndex: 123,
   123  		// We _don't_ set a timeout here since that doesn't trigger the bug - the
   124  		// bug occurs when the Fetch call times out and returns the same value when
   125  		// an error is set. If it returns a new value the blocking loop works too.
   126  	}))
   127  	time.AfterFunc(50*time.Millisecond, func() {
   128  		// "Timeout" the Fetch after a short time.
   129  		close(triggerCh)
   130  	})
   131  	select {
   132  	case result := <-getCh1:
   133  		t.Fatalf("result or error returned before an update happened. "+
   134  			"If this is nil look above for the error log: %v", result)
   135  	case <-time.After(100 * time.Millisecond):
   136  		// It _should_ keep blocking for a new value here
   137  	}
   138  
   139  	// Sleep a tiny bit just to let maybe some background calls happen
   140  	// then verify the calls.
   141  	time.Sleep(20 * time.Millisecond)
   142  	typ.AssertExpectations(t)
   143  }
   144  
   145  // Test a Get with a request that returns a blank cache key. This should
   146  // force a backend request and skip the cache entirely.
   147  func TestCacheGet_blankCacheKey(t *testing.T) {
   148  	t.Parallel()
   149  
   150  	require := require.New(t)
   151  
   152  	typ := TestType(t)
   153  	defer typ.AssertExpectations(t)
   154  	c := TestCache(t)
   155  	c.RegisterType("t", typ, nil)
   156  
   157  	// Configure the type
   158  	typ.Static(FetchResult{Value: 42}, nil).Times(2)
   159  
   160  	// Get, should fetch
   161  	req := TestRequest(t, RequestInfo{Key: ""})
   162  	result, meta, err := c.Get("t", req)
   163  	require.NoError(err)
   164  	require.Equal(42, result)
   165  	require.False(meta.Hit)
   166  
   167  	// Get, should not fetch since we already have a satisfying value
   168  	result, meta, err = c.Get("t", req)
   169  	require.NoError(err)
   170  	require.Equal(42, result)
   171  	require.False(meta.Hit)
   172  
   173  	// Sleep a tiny bit just to let maybe some background calls happen
   174  	// then verify that we still only got the one call
   175  	time.Sleep(20 * time.Millisecond)
   176  	typ.AssertExpectations(t)
   177  }
   178  
   179  // Test that Get blocks on the initial value
   180  func TestCacheGet_blockingInitSameKey(t *testing.T) {
   181  	t.Parallel()
   182  
   183  	typ := TestType(t)
   184  	defer typ.AssertExpectations(t)
   185  	c := TestCache(t)
   186  	c.RegisterType("t", typ, nil)
   187  
   188  	// Configure the type
   189  	triggerCh := make(chan time.Time)
   190  	typ.Static(FetchResult{Value: 42}, nil).WaitUntil(triggerCh).Times(1)
   191  
   192  	// Perform multiple gets
   193  	getCh1 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   194  	getCh2 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   195  
   196  	// They should block
   197  	select {
   198  	case <-getCh1:
   199  		t.Fatal("should block (ch1)")
   200  	case <-getCh2:
   201  		t.Fatal("should block (ch2)")
   202  	case <-time.After(50 * time.Millisecond):
   203  	}
   204  
   205  	// Trigger it
   206  	close(triggerCh)
   207  
   208  	// Should return
   209  	TestCacheGetChResult(t, getCh1, 42)
   210  	TestCacheGetChResult(t, getCh2, 42)
   211  }
   212  
   213  // Test that Get with different cache keys both block on initial value
   214  // but that the fetches were both properly called.
   215  func TestCacheGet_blockingInitDiffKeys(t *testing.T) {
   216  	t.Parallel()
   217  
   218  	require := require.New(t)
   219  
   220  	typ := TestType(t)
   221  	defer typ.AssertExpectations(t)
   222  	c := TestCache(t)
   223  	c.RegisterType("t", typ, nil)
   224  
   225  	// Keep track of the keys
   226  	var keysLock sync.Mutex
   227  	var keys []string
   228  
   229  	// Configure the type
   230  	triggerCh := make(chan time.Time)
   231  	typ.Static(FetchResult{Value: 42}, nil).
   232  		WaitUntil(triggerCh).
   233  		Times(2).
   234  		Run(func(args mock.Arguments) {
   235  			keysLock.Lock()
   236  			defer keysLock.Unlock()
   237  			keys = append(keys, args.Get(1).(Request).CacheInfo().Key)
   238  		})
   239  
   240  	// Perform multiple gets
   241  	getCh1 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   242  	getCh2 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "goodbye"}))
   243  
   244  	// They should block
   245  	select {
   246  	case <-getCh1:
   247  		t.Fatal("should block (ch1)")
   248  	case <-getCh2:
   249  		t.Fatal("should block (ch2)")
   250  	case <-time.After(50 * time.Millisecond):
   251  	}
   252  
   253  	// Trigger it
   254  	close(triggerCh)
   255  
   256  	// Should return both!
   257  	TestCacheGetChResult(t, getCh1, 42)
   258  	TestCacheGetChResult(t, getCh2, 42)
   259  
   260  	// Verify proper keys
   261  	sort.Strings(keys)
   262  	require.Equal([]string{"goodbye", "hello"}, keys)
   263  }
   264  
   265  // Test a get with an index set will wait until an index that is higher
   266  // is set in the cache.
   267  func TestCacheGet_blockingIndex(t *testing.T) {
   268  	t.Parallel()
   269  
   270  	typ := TestType(t)
   271  	defer typ.AssertExpectations(t)
   272  	c := TestCache(t)
   273  	c.RegisterType("t", typ, nil)
   274  
   275  	// Configure the type
   276  	triggerCh := make(chan time.Time)
   277  	typ.Static(FetchResult{Value: 1, Index: 4}, nil).Once()
   278  	typ.Static(FetchResult{Value: 12, Index: 5}, nil).Once()
   279  	typ.Static(FetchResult{Value: 42, Index: 6}, nil).WaitUntil(triggerCh)
   280  
   281  	// Fetch should block
   282  	resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{
   283  		Key: "hello", MinIndex: 5}))
   284  
   285  	// Should block
   286  	select {
   287  	case <-resultCh:
   288  		t.Fatal("should block")
   289  	case <-time.After(50 * time.Millisecond):
   290  	}
   291  
   292  	// Wait a bit
   293  	close(triggerCh)
   294  
   295  	// Should return
   296  	TestCacheGetChResult(t, resultCh, 42)
   297  }
   298  
   299  // Test a get with an index set will timeout if the fetch doesn't return
   300  // anything.
   301  func TestCacheGet_blockingIndexTimeout(t *testing.T) {
   302  	t.Parallel()
   303  
   304  	typ := TestType(t)
   305  	defer typ.AssertExpectations(t)
   306  	c := TestCache(t)
   307  	c.RegisterType("t", typ, nil)
   308  
   309  	// Configure the type
   310  	triggerCh := make(chan time.Time)
   311  	typ.Static(FetchResult{Value: 1, Index: 4}, nil).Once()
   312  	typ.Static(FetchResult{Value: 12, Index: 5}, nil).Once()
   313  	typ.Static(FetchResult{Value: 42, Index: 6}, nil).WaitUntil(triggerCh)
   314  
   315  	// Fetch should block
   316  	resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{
   317  		Key: "hello", MinIndex: 5, Timeout: 200 * time.Millisecond}))
   318  
   319  	// Should block
   320  	select {
   321  	case <-resultCh:
   322  		t.Fatal("should block")
   323  	case <-time.After(50 * time.Millisecond):
   324  	}
   325  
   326  	// Should return after more of the timeout
   327  	select {
   328  	case result := <-resultCh:
   329  		require.Equal(t, 12, result)
   330  	case <-time.After(300 * time.Millisecond):
   331  		t.Fatal("should've returned")
   332  	}
   333  }
   334  
   335  // Test a get with an index set with requests returning an error
   336  // will return that error.
   337  func TestCacheGet_blockingIndexError(t *testing.T) {
   338  	t.Parallel()
   339  
   340  	typ := TestType(t)
   341  	defer typ.AssertExpectations(t)
   342  	c := TestCache(t)
   343  	c.RegisterType("t", typ, nil)
   344  
   345  	// Configure the type
   346  	var retries uint32
   347  	fetchErr := fmt.Errorf("test fetch error")
   348  	typ.Static(FetchResult{Value: 1, Index: 4}, nil).Once()
   349  	typ.Static(FetchResult{Value: nil, Index: 5}, fetchErr).Run(func(args mock.Arguments) {
   350  		atomic.AddUint32(&retries, 1)
   351  	})
   352  
   353  	// First good fetch to populate catch
   354  	resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   355  	TestCacheGetChResult(t, resultCh, 1)
   356  
   357  	// Fetch should not block and should return error
   358  	resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{
   359  		Key: "hello", MinIndex: 7, Timeout: 1 * time.Minute}))
   360  	TestCacheGetChResult(t, resultCh, nil)
   361  
   362  	// Wait a bit
   363  	time.Sleep(100 * time.Millisecond)
   364  
   365  	// Check the number
   366  	actual := atomic.LoadUint32(&retries)
   367  	require.True(t, actual < 10, fmt.Sprintf("actual: %d", actual))
   368  }
   369  
   370  // Test that if a Type returns an empty value on Fetch that the previous
   371  // value is preserved.
   372  func TestCacheGet_emptyFetchResult(t *testing.T) {
   373  	t.Parallel()
   374  
   375  	require := require.New(t)
   376  
   377  	typ := TestType(t)
   378  	defer typ.AssertExpectations(t)
   379  	c := TestCache(t)
   380  	c.RegisterType("t", typ, nil)
   381  
   382  	stateCh := make(chan int, 1)
   383  
   384  	// Configure the type
   385  	typ.Static(FetchResult{Value: 42, State: 31, Index: 1}, nil).Times(1)
   386  	// Return different State, it should NOT be ignored
   387  	typ.Static(FetchResult{Value: nil, State: 32}, nil).Run(func(args mock.Arguments) {
   388  		// We should get back the original state
   389  		opts := args.Get(0).(FetchOptions)
   390  		require.NotNil(opts.LastResult)
   391  		stateCh <- opts.LastResult.State.(int)
   392  	})
   393  
   394  	// Get, should fetch
   395  	req := TestRequest(t, RequestInfo{Key: "hello"})
   396  	result, meta, err := c.Get("t", req)
   397  	require.NoError(err)
   398  	require.Equal(42, result)
   399  	require.False(meta.Hit)
   400  
   401  	// Get, should not fetch since we already have a satisfying value
   402  	req = TestRequest(t, RequestInfo{
   403  		Key: "hello", MinIndex: 1, Timeout: 100 * time.Millisecond})
   404  	result, meta, err = c.Get("t", req)
   405  	require.NoError(err)
   406  	require.Equal(42, result)
   407  	require.False(meta.Hit)
   408  
   409  	// State delivered to second call should be the result from first call.
   410  	select {
   411  	case state := <-stateCh:
   412  		require.Equal(31, state)
   413  	case <-time.After(20 * time.Millisecond):
   414  		t.Fatal("timed out")
   415  	}
   416  
   417  	// Next request should get the SECOND returned state even though the fetch
   418  	// returns nil and so the previous result is used.
   419  	req = TestRequest(t, RequestInfo{
   420  		Key: "hello", MinIndex: 1, Timeout: 100 * time.Millisecond})
   421  	result, meta, err = c.Get("t", req)
   422  	require.NoError(err)
   423  	require.Equal(42, result)
   424  	require.False(meta.Hit)
   425  	select {
   426  	case state := <-stateCh:
   427  		require.Equal(32, state)
   428  	case <-time.After(20 * time.Millisecond):
   429  		t.Fatal("timed out")
   430  	}
   431  
   432  	// Sleep a tiny bit just to let maybe some background calls happen
   433  	// then verify that we still only got the one call
   434  	time.Sleep(20 * time.Millisecond)
   435  	typ.AssertExpectations(t)
   436  }
   437  
   438  // Test that a type registered with a periodic refresh will perform
   439  // that refresh after the timer is up.
   440  func TestCacheGet_periodicRefresh(t *testing.T) {
   441  	t.Parallel()
   442  
   443  	typ := TestType(t)
   444  	defer typ.AssertExpectations(t)
   445  	c := TestCache(t)
   446  	c.RegisterType("t", typ, &RegisterOptions{
   447  		Refresh:        true,
   448  		RefreshTimer:   100 * time.Millisecond,
   449  		RefreshTimeout: 5 * time.Minute,
   450  	})
   451  
   452  	// This is a bit weird, but we do this to ensure that the final
   453  	// call to the Fetch (if it happens, depends on timing) just blocks.
   454  	triggerCh := make(chan time.Time)
   455  	defer close(triggerCh)
   456  
   457  	// Configure the type
   458  	typ.Static(FetchResult{Value: 1, Index: 4}, nil).Once()
   459  	typ.Static(FetchResult{Value: 12, Index: 5}, nil).Once()
   460  	typ.Static(FetchResult{Value: 12, Index: 5}, nil).WaitUntil(triggerCh)
   461  
   462  	// Fetch should block
   463  	resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   464  	TestCacheGetChResult(t, resultCh, 1)
   465  
   466  	// Fetch again almost immediately should return old result
   467  	time.Sleep(5 * time.Millisecond)
   468  	resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   469  	TestCacheGetChResult(t, resultCh, 1)
   470  
   471  	// Wait for the timer
   472  	time.Sleep(200 * time.Millisecond)
   473  	resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   474  	TestCacheGetChResult(t, resultCh, 12)
   475  }
   476  
   477  // Test that a type registered with a periodic refresh will perform
   478  // that refresh after the timer is up.
   479  func TestCacheGet_periodicRefreshMultiple(t *testing.T) {
   480  	t.Parallel()
   481  
   482  	typ := TestType(t)
   483  	defer typ.AssertExpectations(t)
   484  	c := TestCache(t)
   485  	c.RegisterType("t", typ, &RegisterOptions{
   486  		Refresh:        true,
   487  		RefreshTimer:   0 * time.Millisecond,
   488  		RefreshTimeout: 5 * time.Minute,
   489  	})
   490  
   491  	// This is a bit weird, but we do this to ensure that the final
   492  	// call to the Fetch (if it happens, depends on timing) just blocks.
   493  	trigger := make([]chan time.Time, 3)
   494  	for i := range trigger {
   495  		trigger[i] = make(chan time.Time)
   496  	}
   497  
   498  	// Configure the type
   499  	typ.Static(FetchResult{Value: 1, Index: 4}, nil).Once()
   500  	typ.Static(FetchResult{Value: 12, Index: 5}, nil).Once().WaitUntil(trigger[0])
   501  	typ.Static(FetchResult{Value: 24, Index: 6}, nil).Once().WaitUntil(trigger[1])
   502  	typ.Static(FetchResult{Value: 42, Index: 7}, nil).WaitUntil(trigger[2])
   503  
   504  	// Fetch should block
   505  	resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   506  	TestCacheGetChResult(t, resultCh, 1)
   507  
   508  	// Fetch again almost immediately should return old result
   509  	time.Sleep(5 * time.Millisecond)
   510  	resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   511  	TestCacheGetChResult(t, resultCh, 1)
   512  
   513  	// Trigger the next, sleep a bit, and verify we get the next result
   514  	close(trigger[0])
   515  	time.Sleep(100 * time.Millisecond)
   516  	resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   517  	TestCacheGetChResult(t, resultCh, 12)
   518  
   519  	// Trigger the next, sleep a bit, and verify we get the next result
   520  	close(trigger[1])
   521  	time.Sleep(100 * time.Millisecond)
   522  	resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   523  	TestCacheGetChResult(t, resultCh, 24)
   524  }
   525  
   526  // Test that a refresh performs a backoff.
   527  func TestCacheGet_periodicRefreshErrorBackoff(t *testing.T) {
   528  	t.Parallel()
   529  
   530  	typ := TestType(t)
   531  	defer typ.AssertExpectations(t)
   532  	c := TestCache(t)
   533  	c.RegisterType("t", typ, &RegisterOptions{
   534  		Refresh:        true,
   535  		RefreshTimer:   0,
   536  		RefreshTimeout: 5 * time.Minute,
   537  	})
   538  
   539  	// Configure the type
   540  	var retries uint32
   541  	fetchErr := fmt.Errorf("test fetch error")
   542  	typ.Static(FetchResult{Value: 1, Index: 4}, nil).Once()
   543  	typ.Static(FetchResult{Value: nil, Index: 5}, fetchErr).Run(func(args mock.Arguments) {
   544  		atomic.AddUint32(&retries, 1)
   545  	})
   546  
   547  	// Fetch
   548  	resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   549  	TestCacheGetChResult(t, resultCh, 1)
   550  
   551  	// Sleep a bit. The refresh will quietly fail in the background. What we
   552  	// want to verify is that it doesn't retry too much. "Too much" is hard
   553  	// to measure since its CPU dependent if this test is failing. But due
   554  	// to the short sleep below, we can calculate about what we'd expect if
   555  	// backoff IS working.
   556  	time.Sleep(500 * time.Millisecond)
   557  
   558  	// Fetch should work, we should get a 1 still. Errors are ignored.
   559  	resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   560  	TestCacheGetChResult(t, resultCh, 1)
   561  
   562  	// Check the number
   563  	actual := atomic.LoadUint32(&retries)
   564  	require.True(t, actual < 10, fmt.Sprintf("actual: %d", actual))
   565  }
   566  
   567  // Test that a badly behaved RPC that returns 0 index will perform a backoff.
   568  func TestCacheGet_periodicRefreshBadRPCZeroIndexErrorBackoff(t *testing.T) {
   569  	t.Parallel()
   570  
   571  	typ := TestType(t)
   572  	defer typ.AssertExpectations(t)
   573  	c := TestCache(t)
   574  	c.RegisterType("t", typ, &RegisterOptions{
   575  		Refresh:        true,
   576  		RefreshTimer:   0,
   577  		RefreshTimeout: 5 * time.Minute,
   578  	})
   579  
   580  	// Configure the type
   581  	var retries uint32
   582  	typ.Static(FetchResult{Value: 0, Index: 0}, nil).Run(func(args mock.Arguments) {
   583  		atomic.AddUint32(&retries, 1)
   584  	})
   585  
   586  	// Fetch
   587  	resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   588  	TestCacheGetChResult(t, resultCh, 0)
   589  
   590  	// Sleep a bit. The refresh will quietly fail in the background. What we
   591  	// want to verify is that it doesn't retry too much. "Too much" is hard
   592  	// to measure since its CPU dependent if this test is failing. But due
   593  	// to the short sleep below, we can calculate about what we'd expect if
   594  	// backoff IS working.
   595  	time.Sleep(500 * time.Millisecond)
   596  
   597  	// Fetch should work, we should get a 0 still. Errors are ignored.
   598  	resultCh = TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   599  	TestCacheGetChResult(t, resultCh, 0)
   600  
   601  	// Check the number
   602  	actual := atomic.LoadUint32(&retries)
   603  	require.True(t, actual < 10, fmt.Sprintf("%d retries, should be < 10", actual))
   604  }
   605  
   606  // Test that fetching with no index makes an initial request with no index, but
   607  // then ensures all background refreshes have > 0. This ensures we don't end up
   608  // with any index 0 loops from background refreshed while also returning
   609  // immediately on the initial request if there is no data written to that table
   610  // yet.
   611  func TestCacheGet_noIndexSetsOne(t *testing.T) {
   612  	t.Parallel()
   613  
   614  	typ := TestType(t)
   615  	defer typ.AssertExpectations(t)
   616  	c := TestCache(t)
   617  	c.RegisterType("t", typ, &RegisterOptions{
   618  		Refresh:        true,
   619  		RefreshTimer:   0,
   620  		RefreshTimeout: 5 * time.Minute,
   621  	})
   622  
   623  	// Simulate "well behaved" RPC with no data yet but returning 1
   624  	{
   625  		first := int32(1)
   626  
   627  		typ.Static(FetchResult{Value: 0, Index: 1}, nil).Run(func(args mock.Arguments) {
   628  			opts := args.Get(0).(FetchOptions)
   629  			isFirst := atomic.SwapInt32(&first, 0)
   630  			if isFirst == 1 {
   631  				assert.Equal(t, uint64(0), opts.MinIndex)
   632  			} else {
   633  				assert.True(t, opts.MinIndex > 0, "minIndex > 0")
   634  			}
   635  		})
   636  
   637  		// Fetch
   638  		resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   639  		TestCacheGetChResult(t, resultCh, 0)
   640  
   641  		// Sleep a bit so background refresh happens
   642  		time.Sleep(100 * time.Millisecond)
   643  	}
   644  
   645  	// Same for "badly behaved" RPC that returns 0 index and no data
   646  	{
   647  		first := int32(1)
   648  
   649  		typ.Static(FetchResult{Value: 0, Index: 0}, nil).Run(func(args mock.Arguments) {
   650  			opts := args.Get(0).(FetchOptions)
   651  			isFirst := atomic.SwapInt32(&first, 0)
   652  			if isFirst == 1 {
   653  				assert.Equal(t, uint64(0), opts.MinIndex)
   654  			} else {
   655  				assert.True(t, opts.MinIndex > 0, "minIndex > 0")
   656  			}
   657  		})
   658  
   659  		// Fetch
   660  		resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   661  		TestCacheGetChResult(t, resultCh, 0)
   662  
   663  		// Sleep a bit so background refresh happens
   664  		time.Sleep(100 * time.Millisecond)
   665  	}
   666  }
   667  
   668  // Test that the backend fetch sets the proper timeout.
   669  func TestCacheGet_fetchTimeout(t *testing.T) {
   670  	t.Parallel()
   671  
   672  	require := require.New(t)
   673  
   674  	typ := TestType(t)
   675  	defer typ.AssertExpectations(t)
   676  	c := TestCache(t)
   677  
   678  	// Register the type with a timeout
   679  	timeout := 10 * time.Minute
   680  	c.RegisterType("t", typ, &RegisterOptions{
   681  		RefreshTimeout: timeout,
   682  	})
   683  
   684  	// Configure the type
   685  	var actual time.Duration
   686  	typ.Static(FetchResult{Value: 42}, nil).Times(1).Run(func(args mock.Arguments) {
   687  		opts := args.Get(0).(FetchOptions)
   688  		actual = opts.Timeout
   689  	})
   690  
   691  	// Get, should fetch
   692  	req := TestRequest(t, RequestInfo{Key: "hello"})
   693  	result, meta, err := c.Get("t", req)
   694  	require.NoError(err)
   695  	require.Equal(42, result)
   696  	require.False(meta.Hit)
   697  
   698  	// Test the timeout
   699  	require.Equal(timeout, actual)
   700  }
   701  
   702  // Test that entries expire
   703  func TestCacheGet_expire(t *testing.T) {
   704  	t.Parallel()
   705  
   706  	require := require.New(t)
   707  
   708  	typ := TestType(t)
   709  	defer typ.AssertExpectations(t)
   710  	c := TestCache(t)
   711  
   712  	// Register the type with a timeout
   713  	c.RegisterType("t", typ, &RegisterOptions{
   714  		LastGetTTL: 400 * time.Millisecond,
   715  	})
   716  
   717  	// Configure the type
   718  	typ.Static(FetchResult{Value: 42}, nil).Times(2)
   719  
   720  	// Get, should fetch
   721  	req := TestRequest(t, RequestInfo{Key: "hello"})
   722  	result, meta, err := c.Get("t", req)
   723  	require.NoError(err)
   724  	require.Equal(42, result)
   725  	require.False(meta.Hit)
   726  
   727  	// Wait for a non-trivial amount of time to sanity check the age increases at
   728  	// least this amount. Note that this is not a fudge for some timing-dependent
   729  	// background work it's just ensuring a non-trivial time elapses between the
   730  	// request above and below serilaly in this thread so short time is OK.
   731  	time.Sleep(5 * time.Millisecond)
   732  
   733  	// Get, should not fetch, verified via the mock assertions above
   734  	req = TestRequest(t, RequestInfo{Key: "hello"})
   735  	result, meta, err = c.Get("t", req)
   736  	require.NoError(err)
   737  	require.Equal(42, result)
   738  	require.True(meta.Hit)
   739  	require.True(meta.Age > 5*time.Millisecond)
   740  
   741  	// Sleep for the expiry
   742  	time.Sleep(500 * time.Millisecond)
   743  
   744  	// Get, should fetch
   745  	req = TestRequest(t, RequestInfo{Key: "hello"})
   746  	result, meta, err = c.Get("t", req)
   747  	require.NoError(err)
   748  	require.Equal(42, result)
   749  	require.False(meta.Hit)
   750  
   751  	// Sleep a tiny bit just to let maybe some background calls happen
   752  	// then verify that we still only got the one call
   753  	time.Sleep(20 * time.Millisecond)
   754  	typ.AssertExpectations(t)
   755  }
   756  
   757  // Test that entries reset their TTL on Get
   758  func TestCacheGet_expireResetGet(t *testing.T) {
   759  	t.Parallel()
   760  
   761  	require := require.New(t)
   762  
   763  	typ := TestType(t)
   764  	defer typ.AssertExpectations(t)
   765  	c := TestCache(t)
   766  
   767  	// Register the type with a timeout
   768  	c.RegisterType("t", typ, &RegisterOptions{
   769  		LastGetTTL: 150 * time.Millisecond,
   770  	})
   771  
   772  	// Configure the type
   773  	typ.Static(FetchResult{Value: 42}, nil).Times(2)
   774  
   775  	// Get, should fetch
   776  	req := TestRequest(t, RequestInfo{Key: "hello"})
   777  	result, meta, err := c.Get("t", req)
   778  	require.NoError(err)
   779  	require.Equal(42, result)
   780  	require.False(meta.Hit)
   781  
   782  	// Fetch multiple times, where the total time is well beyond
   783  	// the TTL. We should not trigger any fetches during this time.
   784  	for i := 0; i < 5; i++ {
   785  		// Sleep a bit
   786  		time.Sleep(50 * time.Millisecond)
   787  
   788  		// Get, should not fetch
   789  		req = TestRequest(t, RequestInfo{Key: "hello"})
   790  		result, meta, err = c.Get("t", req)
   791  		require.NoError(err)
   792  		require.Equal(42, result)
   793  		require.True(meta.Hit)
   794  	}
   795  
   796  	time.Sleep(200 * time.Millisecond)
   797  
   798  	// Get, should fetch
   799  	req = TestRequest(t, RequestInfo{Key: "hello"})
   800  	result, meta, err = c.Get("t", req)
   801  	require.NoError(err)
   802  	require.Equal(42, result)
   803  	require.False(meta.Hit)
   804  
   805  	// Sleep a tiny bit just to let maybe some background calls happen
   806  	// then verify that we still only got the one call
   807  	time.Sleep(20 * time.Millisecond)
   808  	typ.AssertExpectations(t)
   809  }
   810  
   811  // Test a Get with a request that returns the same cache key across
   812  // two different "types" returns two separate results.
   813  func TestCacheGet_duplicateKeyDifferentType(t *testing.T) {
   814  	t.Parallel()
   815  
   816  	require := require.New(t)
   817  
   818  	typ := TestType(t)
   819  	defer typ.AssertExpectations(t)
   820  	typ2 := TestType(t)
   821  	defer typ2.AssertExpectations(t)
   822  
   823  	c := TestCache(t)
   824  	c.RegisterType("t", typ, nil)
   825  	c.RegisterType("t2", typ2, nil)
   826  
   827  	// Configure the types
   828  	typ.Static(FetchResult{Value: 100}, nil)
   829  	typ2.Static(FetchResult{Value: 200}, nil)
   830  
   831  	// Get, should fetch
   832  	req := TestRequest(t, RequestInfo{Key: "foo"})
   833  	result, meta, err := c.Get("t", req)
   834  	require.NoError(err)
   835  	require.Equal(100, result)
   836  	require.False(meta.Hit)
   837  
   838  	// Get from t2 with same key, should fetch
   839  	req = TestRequest(t, RequestInfo{Key: "foo"})
   840  	result, meta, err = c.Get("t2", req)
   841  	require.NoError(err)
   842  	require.Equal(200, result)
   843  	require.False(meta.Hit)
   844  
   845  	// Get from t again with same key, should cache
   846  	req = TestRequest(t, RequestInfo{Key: "foo"})
   847  	result, meta, err = c.Get("t", req)
   848  	require.NoError(err)
   849  	require.Equal(100, result)
   850  	require.True(meta.Hit)
   851  
   852  	// Sleep a tiny bit just to let maybe some background calls happen
   853  	// then verify that we still only got the one call
   854  	time.Sleep(20 * time.Millisecond)
   855  	typ.AssertExpectations(t)
   856  	typ2.AssertExpectations(t)
   857  }
   858  
   859  // Test that Get partitions the caches based on DC so two equivalent requests
   860  // to different datacenters are automatically cached even if their keys are
   861  // the same.
   862  func TestCacheGet_partitionDC(t *testing.T) {
   863  	t.Parallel()
   864  
   865  	c := TestCache(t)
   866  	c.RegisterType("t", &testPartitionType{}, nil)
   867  
   868  	// Perform multiple gets
   869  	getCh1 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{
   870  		Datacenter: "dc1", Key: "hello"}))
   871  	getCh2 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{
   872  		Datacenter: "dc9", Key: "hello"}))
   873  
   874  	// Should return both!
   875  	TestCacheGetChResult(t, getCh1, "dc1")
   876  	TestCacheGetChResult(t, getCh2, "dc9")
   877  }
   878  
   879  // Test that Get partitions the caches based on token so two equivalent requests
   880  // with different ACL tokens do not return the same result.
   881  func TestCacheGet_partitionToken(t *testing.T) {
   882  	t.Parallel()
   883  
   884  	c := TestCache(t)
   885  	c.RegisterType("t", &testPartitionType{}, nil)
   886  
   887  	// Perform multiple gets
   888  	getCh1 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{
   889  		Token: "", Key: "hello"}))
   890  	getCh2 := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{
   891  		Token: "foo", Key: "hello"}))
   892  
   893  	// Should return both!
   894  	TestCacheGetChResult(t, getCh1, "")
   895  	TestCacheGetChResult(t, getCh2, "foo")
   896  }
   897  
   898  // testPartitionType implements Type for testing that simply returns a value
   899  // comprised of the request DC and ACL token, used for testing cache
   900  // partitioning.
   901  type testPartitionType struct{}
   902  
   903  func (t *testPartitionType) Fetch(opts FetchOptions, r Request) (FetchResult, error) {
   904  	info := r.CacheInfo()
   905  	return FetchResult{
   906  		Value: fmt.Sprintf("%s%s", info.Datacenter, info.Token),
   907  	}, nil
   908  }
   909  
   910  func (t *testPartitionType) SupportsBlocking() bool {
   911  	return true
   912  }
   913  
   914  // Test that background refreshing reports correct Age in failure and happy
   915  // states.
   916  func TestCacheGet_refreshAge(t *testing.T) {
   917  	t.Parallel()
   918  
   919  	require := require.New(t)
   920  
   921  	typ := TestType(t)
   922  	defer typ.AssertExpectations(t)
   923  	c := TestCache(t)
   924  	c.RegisterType("t", typ, &RegisterOptions{
   925  		Refresh:        true,
   926  		RefreshTimer:   0,
   927  		RefreshTimeout: 5 * time.Minute,
   928  	})
   929  
   930  	// Configure the type
   931  	var index, shouldFail uint64
   932  
   933  	typ.On("Fetch", mock.Anything, mock.Anything).
   934  		Return(func(o FetchOptions, r Request) FetchResult {
   935  			idx := atomic.LoadUint64(&index)
   936  			if atomic.LoadUint64(&shouldFail) == 1 {
   937  				t.Logf("Failing Fetch at index %d", idx)
   938  				return FetchResult{Value: nil, Index: idx}
   939  			}
   940  			if o.MinIndex == idx {
   941  				t.Logf("Sleeping Fetch at index %d", idx)
   942  				// Simulate waiting for a new value
   943  				time.Sleep(5 * time.Millisecond)
   944  			}
   945  			t.Logf("Returning Fetch at index %d", idx)
   946  			return FetchResult{Value: int(idx * 2), Index: idx}
   947  		}, func(o FetchOptions, r Request) error {
   948  			if atomic.LoadUint64(&shouldFail) == 1 {
   949  				return errors.New("test error")
   950  			}
   951  			return nil
   952  		})
   953  
   954  	// Set initial index/value
   955  	atomic.StoreUint64(&index, 4)
   956  
   957  	// Fetch
   958  	resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
   959  	TestCacheGetChResult(t, resultCh, 8)
   960  
   961  	{
   962  		// Wait a few milliseconds after initial fetch to check age is not reporting
   963  		// actual age.
   964  		time.Sleep(2 * time.Millisecond)
   965  
   966  		// Fetch again, non-blocking
   967  		result, meta, err := c.Get("t", TestRequest(t, RequestInfo{Key: "hello"}))
   968  		require.NoError(err)
   969  		require.Equal(8, result)
   970  		require.True(meta.Hit)
   971  		// Age should be zero since background refresh was "active"
   972  		require.Equal(time.Duration(0), meta.Age)
   973  	}
   974  
   975  	// Now fail the next background sync
   976  	atomic.StoreUint64(&shouldFail, 1)
   977  
   978  	// Wait until the current request times out and starts failing. The request
   979  	// should take a maximum of 5ms to return but give it some headroom to allow
   980  	// it to finish 5ms sleep, unblock and next background request to be attemoted
   981  	// and fail and state updated in noisy CI... We might want to retry if this is
   982  	// still flaky but see if a longer wait is sufficient for now.
   983  	time.Sleep(50 * time.Millisecond)
   984  
   985  	var lastAge time.Duration
   986  	{
   987  		result, meta, err := c.Get("t", TestRequest(t, RequestInfo{Key: "hello"}))
   988  		require.NoError(err)
   989  		require.Equal(8, result)
   990  		require.True(meta.Hit)
   991  		// Age should be non-zero since background refresh was "active"
   992  		require.True(meta.Age > 0)
   993  		lastAge = meta.Age
   994  	}
   995  	// Wait a bit longer - age should increase by at least this much
   996  	time.Sleep(5 * time.Millisecond)
   997  	{
   998  		result, meta, err := c.Get("t", TestRequest(t, RequestInfo{Key: "hello"}))
   999  		require.NoError(err)
  1000  		require.Equal(8, result)
  1001  		require.True(meta.Hit)
  1002  		require.True(meta.Age > (lastAge + (1 * time.Millisecond)))
  1003  	}
  1004  
  1005  	// Now unfail the background refresh
  1006  	atomic.StoreUint64(&shouldFail, 0)
  1007  
  1008  	// And update the data so we can see when the background task is working again
  1009  	// (won't be immediate due to backoff on the errors).
  1010  	atomic.AddUint64(&index, 1)
  1011  
  1012  	t0 := time.Now()
  1013  
  1014  	timeout := true
  1015  	// Allow up to 5 seconds since the error backoff is likely to have kicked in
  1016  	// and causes this to take different amounts of time depending on how quickly
  1017  	// the test thread got down here relative to the failures.
  1018  	for attempts := 0; attempts < 50; attempts++ {
  1019  		time.Sleep(100 * time.Millisecond)
  1020  		result, meta, err := c.Get("t", TestRequest(t, RequestInfo{Key: "hello"}))
  1021  		// Should never error even if background is failing as we have cached value
  1022  		require.NoError(err)
  1023  		require.True(meta.Hit)
  1024  		// Got the new value!
  1025  		if result == 10 {
  1026  			// Age should be zero since background refresh is "active" again
  1027  			t.Logf("Succeeded after %d attempts", attempts)
  1028  			require.Equal(time.Duration(0), meta.Age)
  1029  			timeout = false
  1030  			break
  1031  		}
  1032  	}
  1033  	require.False(timeout, "failed to observe update after %s", time.Since(t0))
  1034  }
  1035  
  1036  func TestCacheGet_nonRefreshAge(t *testing.T) {
  1037  	t.Parallel()
  1038  
  1039  	require := require.New(t)
  1040  
  1041  	typ := TestType(t)
  1042  	defer typ.AssertExpectations(t)
  1043  	c := TestCache(t)
  1044  	c.RegisterType("t", typ, &RegisterOptions{
  1045  		Refresh:    false,
  1046  		LastGetTTL: 100 * time.Millisecond,
  1047  	})
  1048  
  1049  	// Configure the type
  1050  	var index uint64
  1051  
  1052  	typ.On("Fetch", mock.Anything, mock.Anything).
  1053  		Return(func(o FetchOptions, r Request) FetchResult {
  1054  			idx := atomic.LoadUint64(&index)
  1055  			return FetchResult{Value: int(idx * 2), Index: idx}
  1056  		}, nil)
  1057  
  1058  	// Set initial index/value
  1059  	atomic.StoreUint64(&index, 4)
  1060  
  1061  	// Fetch
  1062  	resultCh := TestCacheGetCh(t, c, "t", TestRequest(t, RequestInfo{Key: "hello"}))
  1063  	TestCacheGetChResult(t, resultCh, 8)
  1064  
  1065  	var lastAge time.Duration
  1066  	{
  1067  		// Wait a few milliseconds after initial fetch to check age IS reporting
  1068  		// actual age.
  1069  		time.Sleep(5 * time.Millisecond)
  1070  
  1071  		// Fetch again, non-blocking
  1072  		result, meta, err := c.Get("t", TestRequest(t, RequestInfo{Key: "hello"}))
  1073  		require.NoError(err)
  1074  		require.Equal(8, result)
  1075  		require.True(meta.Hit)
  1076  		require.True(meta.Age > (5 * time.Millisecond))
  1077  		lastAge = meta.Age
  1078  	}
  1079  
  1080  	// Wait for expiry
  1081  	time.Sleep(200 * time.Millisecond)
  1082  
  1083  	{
  1084  		result, meta, err := c.Get("t", TestRequest(t, RequestInfo{Key: "hello"}))
  1085  		require.NoError(err)
  1086  		require.Equal(8, result)
  1087  		require.False(meta.Hit)
  1088  		// Age should smaller again
  1089  		require.True(meta.Age < lastAge)
  1090  	}
  1091  
  1092  	{
  1093  		// Wait for a non-trivial amount of time to sanity check the age increases at
  1094  		// least this amount. Note that this is not a fudge for some timing-dependent
  1095  		// background work it's just ensuring a non-trivial time elapses between the
  1096  		// request above and below serilaly in this thread so short time is OK.
  1097  		time.Sleep(5 * time.Millisecond)
  1098  
  1099  		// Fetch again, non-blocking
  1100  		result, meta, err := c.Get("t", TestRequest(t, RequestInfo{Key: "hello"}))
  1101  		require.NoError(err)
  1102  		require.Equal(8, result)
  1103  		require.True(meta.Hit)
  1104  		require.True(meta.Age > (5 * time.Millisecond))
  1105  		lastAge = meta.Age
  1106  	}
  1107  
  1108  	// Now verify that setting MaxAge results in cache invalidation
  1109  	{
  1110  		result, meta, err := c.Get("t", TestRequest(t, RequestInfo{
  1111  			Key:    "hello",
  1112  			MaxAge: 1 * time.Millisecond,
  1113  		}))
  1114  		require.NoError(err)
  1115  		require.Equal(8, result)
  1116  		require.False(meta.Hit)
  1117  		// Age should smaller again
  1118  		require.True(meta.Age < lastAge)
  1119  	}
  1120  }
  1121  
  1122  func TestCacheGet_nonBlockingType(t *testing.T) {
  1123  	t.Parallel()
  1124  
  1125  	typ := TestTypeNonBlocking(t)
  1126  	defer typ.AssertExpectations(t)
  1127  	c := TestCache(t)
  1128  	c.RegisterType("t", typ, nil)
  1129  
  1130  	// Configure the type
  1131  	typ.Static(FetchResult{Value: 42, Index: 1}, nil).Once()
  1132  	typ.Static(FetchResult{Value: 43, Index: 2}, nil).Twice().
  1133  		Run(func(args mock.Arguments) {
  1134  			opts := args.Get(0).(FetchOptions)
  1135  			// MinIndex should never be set for a non-blocking type.
  1136  			require.Equal(t, uint64(0), opts.MinIndex)
  1137  		})
  1138  
  1139  	require := require.New(t)
  1140  
  1141  	// Get, should fetch
  1142  	req := TestRequest(t, RequestInfo{Key: "hello"})
  1143  	result, meta, err := c.Get("t", req)
  1144  	require.NoError(err)
  1145  	require.Equal(42, result)
  1146  	require.False(meta.Hit)
  1147  
  1148  	// Get, should not fetch since we have a cached value
  1149  	req = TestRequest(t, RequestInfo{Key: "hello"})
  1150  	result, meta, err = c.Get("t", req)
  1151  	require.NoError(err)
  1152  	require.Equal(42, result)
  1153  	require.True(meta.Hit)
  1154  
  1155  	// Get, should not attempt to fetch with blocking even if requested. The
  1156  	// assertions below about the value being the same combined with the fact the
  1157  	// mock will only return that value on first call suffice to show that
  1158  	// blocking request is not being attempted.
  1159  	req = TestRequest(t, RequestInfo{
  1160  		Key:      "hello",
  1161  		MinIndex: 1,
  1162  		Timeout:  10 * time.Minute,
  1163  	})
  1164  	result, meta, err = c.Get("t", req)
  1165  	require.NoError(err)
  1166  	require.Equal(42, result)
  1167  	require.True(meta.Hit)
  1168  
  1169  	time.Sleep(10 * time.Millisecond)
  1170  
  1171  	// Get with a max age should fetch again
  1172  	req = TestRequest(t, RequestInfo{Key: "hello", MaxAge: 5 * time.Millisecond})
  1173  	result, meta, err = c.Get("t", req)
  1174  	require.NoError(err)
  1175  	require.Equal(43, result)
  1176  	require.False(meta.Hit)
  1177  
  1178  	// Get with a must revalidate should fetch again even without a delay.
  1179  	req = TestRequest(t, RequestInfo{Key: "hello", MustRevalidate: true})
  1180  	result, meta, err = c.Get("t", req)
  1181  	require.NoError(err)
  1182  	require.Equal(43, result)
  1183  	require.False(meta.Hit)
  1184  
  1185  	// Sleep a tiny bit just to let maybe some background calls happen
  1186  	// then verify that we still only got the one call
  1187  	time.Sleep(20 * time.Millisecond)
  1188  	typ.AssertExpectations(t)
  1189  }