github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/kv/kvclient/kvcoord/range_cache_test.go (about) 1 // Copyright 2014 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 package kvcoord 12 13 import ( 14 "bytes" 15 "context" 16 "fmt" 17 "reflect" 18 "sync" 19 "sync/atomic" 20 "testing" 21 "time" 22 23 "github.com/biogo/store/llrb" 24 "github.com/cockroachdb/cockroach/pkg/keys" 25 "github.com/cockroachdb/cockroach/pkg/roachpb" 26 "github.com/cockroachdb/cockroach/pkg/settings/cluster" 27 "github.com/cockroachdb/cockroach/pkg/util/leaktest" 28 "github.com/cockroachdb/cockroach/pkg/util/stop" 29 "github.com/cockroachdb/cockroach/pkg/util/tracing" 30 "github.com/cockroachdb/errors" 31 "github.com/stretchr/testify/require" 32 ) 33 34 type testDescriptorDB struct { 35 data llrb.Tree 36 cache *RangeDescriptorCache 37 lookupCount int64 38 disablePrefetch bool 39 pauseChan chan struct{} 40 // listeners[key] is closed when a lookup on the key happens. 41 listeners map[string]chan struct{} 42 } 43 44 type testDescriptorNode struct { 45 *roachpb.RangeDescriptor 46 } 47 48 func (a testDescriptorNode) Compare(b llrb.Comparable) int { 49 aKey := a.RangeDescriptor.EndKey 50 bKey := b.(testDescriptorNode).RangeDescriptor.EndKey 51 return bytes.Compare(aKey, bKey) 52 } 53 54 // notifyOn returns a channel that will be closed when the next lookup on key 55 // happens. 56 func (db *testDescriptorDB) notifyOn(key roachpb.RKey) <-chan struct{} { 57 if db.listeners == nil { 58 db.listeners = make(map[string]chan struct{}) 59 } 60 ch := make(chan struct{}) 61 db.listeners[key.String()] = ch 62 return ch 63 } 64 65 // getDescriptors scans the testDescriptorDB starting at the provided key in the 66 // specified direction and collects the first RangeDescriptors that it finds. 67 func (db *testDescriptorDB) getDescriptors( 68 key roachpb.RKey, useReverseScan bool, 69 ) ([]roachpb.RangeDescriptor, []roachpb.RangeDescriptor, error) { 70 rs := make([]roachpb.RangeDescriptor, 0, 1) 71 preRs := make([]roachpb.RangeDescriptor, 0, 2) 72 for i := 0; i < 3; i++ { 73 var endKey roachpb.RKey 74 if useReverseScan { 75 endKey = key 76 } else { 77 endKey = key.Next() 78 } 79 80 v := db.data.Ceil(testDescriptorNode{ 81 &roachpb.RangeDescriptor{ 82 EndKey: endKey, 83 }, 84 }) 85 if v == nil { 86 break 87 } 88 desc := *(v.(testDescriptorNode).RangeDescriptor) 89 if i == 0 { 90 rs = append(rs, desc) 91 // Fake an intent. 92 desc.RangeID++ 93 desc.Generation = desc.Generation + 1 94 rs = append(rs, desc) 95 } else if db.disablePrefetch { 96 break 97 } else { 98 preRs = append(preRs, desc) 99 } 100 // Break to keep from skidding off the end of the available ranges. 101 if desc.EndKey.Equal(roachpb.RKeyMax) { 102 break 103 } 104 105 if useReverseScan { 106 key = desc.StartKey 107 } else { 108 key = desc.EndKey 109 } 110 } 111 return rs, preRs, nil 112 } 113 114 func (db *testDescriptorDB) FirstRange() (*roachpb.RangeDescriptor, error) { 115 rs, _, err := db.getDescriptors(roachpb.RKeyMin, false /* useReverseScan */) 116 if err != nil { 117 return nil, err 118 } 119 return &rs[0], nil 120 } 121 122 func (db *testDescriptorDB) RangeLookup( 123 ctx context.Context, key roachpb.RKey, useReverseScan bool, 124 ) ([]roachpb.RangeDescriptor, []roachpb.RangeDescriptor, error) { 125 // Notify the test of the lookup, if the test wants notifications. 126 if ch, ok := db.listeners[key.String()]; ok { 127 close(ch) 128 } 129 select { 130 case <-db.pauseChan: 131 case <-ctx.Done(): 132 return nil, nil, ctx.Err() 133 } 134 135 atomic.AddInt64(&db.lookupCount, 1) 136 rs, preRs, err := db.getDescriptors(key, useReverseScan) 137 if err != nil { 138 return nil, nil, err 139 } 140 141 if err := db.simulateLookupScan(ctx, key, &rs[0], useReverseScan); err != nil { 142 return nil, nil, err 143 } 144 return rs, preRs, nil 145 } 146 147 // For each RangeLookup, we also perform a cache lookup for the descriptor 148 // which holds that key. This mimics the behavior of DistSender, which uses 149 // the cache when performing a ScanRequest over the meta range to find the 150 // desired descriptor. 151 // 152 // This isn't exactly correct, because DistSender will actually keep 153 // scanning until it prefetches the desired number of descriptors, but it's 154 // close enough for testing. 155 func (db *testDescriptorDB) simulateLookupScan( 156 ctx context.Context, key roachpb.RKey, foundDesc *roachpb.RangeDescriptor, useReverseScan bool, 157 ) error { 158 metaKey := keys.RangeMetaKey(key) 159 for { 160 desc, _, err := db.cache.LookupRangeDescriptorWithEvictionToken(ctx, metaKey, nil, useReverseScan) 161 if err != nil { 162 return err 163 } 164 // If the descriptor for metaKey does not contain the EndKey of the 165 // descriptor we're going to return, simulate a scan continuation. 166 // This can happen in the case of meta2 splits. 167 if desc.ContainsKey(keys.RangeMetaKey(foundDesc.EndKey)) { 168 break 169 } 170 metaKey = desc.EndKey 171 } 172 return nil 173 } 174 175 func (db *testDescriptorDB) splitRange(t *testing.T, key roachpb.RKey) { 176 v := db.data.Ceil(testDescriptorNode{&roachpb.RangeDescriptor{EndKey: key}}) 177 if v == nil { 178 t.Fatalf("Error splitting range at key %s, range to split not found", string(key)) 179 } 180 val := v.(testDescriptorNode) 181 if bytes.Equal(val.EndKey, key) { 182 t.Fatalf("Attempt to split existing range at Endkey: %s", string(key)) 183 } 184 newGen := val.Generation + 1 185 db.data.Insert(testDescriptorNode{ 186 &roachpb.RangeDescriptor{ 187 StartKey: val.StartKey, 188 EndKey: key, 189 Generation: newGen, 190 }, 191 }) 192 db.data.Insert(testDescriptorNode{ 193 &roachpb.RangeDescriptor{ 194 StartKey: key, 195 EndKey: val.EndKey, 196 Generation: newGen, 197 }, 198 }) 199 } 200 201 func (db *testDescriptorDB) pauseRangeLookups() { 202 db.pauseChan = make(chan struct{}) 203 } 204 205 func (db *testDescriptorDB) resumeRangeLookups() { 206 close(db.pauseChan) 207 } 208 209 func newTestDescriptorDB() *testDescriptorDB { 210 db := &testDescriptorDB{ 211 pauseChan: make(chan struct{}), 212 } 213 // NOTE: The range descriptors created below are not initialized with a 214 // generation. The ones created by splitting them will have generations, 215 // though. Not putting generations in these initial ones is done for diversity 216 // in the tests. 217 td1 := &roachpb.RangeDescriptor{ 218 StartKey: roachpb.RKeyMin, 219 EndKey: roachpb.RKey(keys.Meta2Prefix), 220 } 221 td2 := &roachpb.RangeDescriptor{ 222 StartKey: td1.EndKey, 223 EndKey: roachpb.RKey(keys.MetaMax), 224 } 225 td3 := &roachpb.RangeDescriptor{ 226 StartKey: td2.EndKey, 227 EndKey: roachpb.RKeyMax, 228 } 229 db.data.Insert(testDescriptorNode{td1}) 230 db.data.Insert(testDescriptorNode{td2}) 231 db.data.Insert(testDescriptorNode{td3}) 232 db.resumeRangeLookups() 233 return db 234 } 235 236 func initTestDescriptorDB(t *testing.T) *testDescriptorDB { 237 st := cluster.MakeTestingClusterSettings() 238 db := newTestDescriptorDB() 239 for i, char := range "abcdefghijklmnopqrstuvwx" { 240 // Create splits on each character: 241 // [min,a), [a,b), [b,c), [c,d), [d,e), etc. 242 db.splitRange(t, roachpb.RKey(string(char))) 243 if i > 0 && i%6 == 0 { 244 // Create meta2 splits on every 6th character: 245 // [meta(min),meta(g)), [meta(g),meta(m)), [meta(m),meta(s)), etc. 246 db.splitRange(t, keys.RangeMetaKey(roachpb.RKey(string(char)))) 247 } 248 } 249 db.cache = NewRangeDescriptorCache(st, db, staticSize(2<<10), stop.NewStopper()) 250 return db 251 } 252 253 // assertLookupCountEq fails unless exactly the number of lookups have been observed. 254 func (db *testDescriptorDB) assertLookupCountEq(t *testing.T, exp int64, key string) { 255 t.Helper() 256 if exp != db.lookupCount { 257 t.Errorf("expected lookup count %d after %s, was %d", exp, key, db.lookupCount) 258 } 259 db.lookupCount = 0 260 } 261 262 // assertLookupCountEq fails unless number of lookups observed is >= from and <= to. 263 func (db *testDescriptorDB) assertLookupCount(t *testing.T, from, to int64, key string) { 264 t.Helper() 265 if from > db.lookupCount || to < db.lookupCount { 266 t.Errorf("expected lookup count in [%d, %d] after %s, was %d", from, to, key, db.lookupCount) 267 } 268 db.lookupCount = 0 269 } 270 271 func doLookup( 272 ctx context.Context, rc *RangeDescriptorCache, key string, 273 ) (*roachpb.RangeDescriptor, *EvictionToken) { 274 return doLookupWithToken(ctx, rc, key, nil, false) 275 } 276 277 func evict(ctx context.Context, rc *RangeDescriptorCache, desc *roachpb.RangeDescriptor) bool { 278 rc.rangeCache.Lock() 279 defer rc.rangeCache.Unlock() 280 return rc.evictCachedRangeDescriptorLocked(ctx, desc) 281 } 282 283 func doLookupWithToken( 284 ctx context.Context, 285 rc *RangeDescriptorCache, 286 key string, 287 evictToken *EvictionToken, 288 useReverseScan bool, 289 ) (*roachpb.RangeDescriptor, *EvictionToken) { 290 // NOTE: This function panics on errors because it is often called from other 291 // goroutines than the test's main one. 292 293 r, returnToken, err := rc.lookupRangeDescriptorInternal( 294 ctx, roachpb.RKey(key), evictToken, useReverseScan) 295 if err != nil { 296 panic(fmt.Sprintf("unexpected error from LookupRangeDescriptor: %s", err)) 297 } 298 keyAddr, err := keys.Addr(roachpb.Key(key)) 299 if err != nil { 300 panic(err) 301 } 302 if (useReverseScan && !r.ContainsKeyInverted(keyAddr)) || (!useReverseScan && !r.ContainsKey(keyAddr)) { 303 panic(fmt.Sprintf("Returned range did not contain key: %s-%s, %s", r.StartKey, r.EndKey, key)) 304 } 305 return r, returnToken 306 } 307 308 // TestDescriptorDBGetDescriptors verifies that getDescriptors returns correct descriptors. 309 func TestDescriptorDBGetDescriptors(t *testing.T) { 310 defer leaktest.AfterTest(t)() 311 db := initTestDescriptorDB(t) 312 313 key := roachpb.RKey("k") 314 expectedRspansMap := map[bool][]roachpb.RSpan{ 315 true: { 316 roachpb.RSpan{Key: roachpb.RKey("j"), EndKey: roachpb.RKey("k")}, // real 317 roachpb.RSpan{Key: roachpb.RKey("j"), EndKey: roachpb.RKey("k")}, // fake intent 318 roachpb.RSpan{Key: roachpb.RKey("i"), EndKey: roachpb.RKey("j")}, 319 roachpb.RSpan{Key: roachpb.RKey("h"), EndKey: roachpb.RKey("i")}, 320 }, 321 false: { 322 roachpb.RSpan{Key: roachpb.RKey("k"), EndKey: roachpb.RKey("l")}, // real 323 roachpb.RSpan{Key: roachpb.RKey("k"), EndKey: roachpb.RKey("l")}, // fake intent 324 roachpb.RSpan{Key: roachpb.RKey("l"), EndKey: roachpb.RKey("m")}, 325 roachpb.RSpan{Key: roachpb.RKey("m"), EndKey: roachpb.RKey("n")}, 326 }, 327 } 328 329 for useReverseScan, expectedRspans := range expectedRspansMap { 330 descs, preDescs, pErr := db.getDescriptors(key, useReverseScan) 331 if pErr != nil { 332 t.Fatal(pErr) 333 } 334 335 descSpans := make([]roachpb.RSpan, len(descs)) 336 for i := range descs { 337 descSpans[i] = descs[i].RSpan() 338 } 339 if !reflect.DeepEqual(descSpans, expectedRspans[:2]) { 340 t.Errorf("useReverseScan=%t: expected %s, got %s", useReverseScan, expectedRspans[:2], descSpans) 341 } 342 preDescSpans := make([]roachpb.RSpan, len(preDescs)) 343 for i := range preDescs { 344 preDescSpans[i] = preDescs[i].RSpan() 345 } 346 if !reflect.DeepEqual(preDescSpans, expectedRspans[2:]) { 347 t.Errorf("useReverseScan=%t: expected %s, got %s", useReverseScan, expectedRspans[2:], preDescSpans) 348 } 349 } 350 } 351 352 func TestRangeCacheAssumptions(t *testing.T) { 353 defer leaktest.AfterTest(t)() 354 expKeyMin := keys.RangeMetaKey(keys.RangeMetaKey(keys.RangeMetaKey(roachpb.RKey("test")))) 355 if !bytes.Equal(expKeyMin, roachpb.RKeyMin) { 356 t.Fatalf("RangeCache relies on RangeMetaKey returning KeyMin after two levels, but got %s", expKeyMin) 357 } 358 } 359 360 // TestRangeCache is a simple test which verifies that metadata ranges 361 // are being cached and retrieved properly. It sets up a fake backing 362 // store for the cache, and measures how often that backing store is 363 // lookuped when looking up metadata keys through the cache. 364 func TestRangeCache(t *testing.T) { 365 defer leaktest.AfterTest(t)() 366 db := initTestDescriptorDB(t) 367 ctx := context.Background() 368 369 // Totally uncached range. 370 // Retrieves [meta(min),meta(g)) and [a,b). 371 // Prefetches [meta(g),meta(m)), [meta(m),meta(s)), [b,c), and [c,d). 372 doLookup(ctx, db.cache, "aa") 373 db.assertLookupCountEq(t, 2, "aa") 374 375 // Descriptors for the following ranges should be cached. 376 doLookup(ctx, db.cache, "ab") 377 db.assertLookupCountEq(t, 0, "ab") 378 doLookup(ctx, db.cache, "ba") 379 db.assertLookupCountEq(t, 0, "ba") 380 doLookup(ctx, db.cache, "cz") 381 db.assertLookupCountEq(t, 0, "cz") 382 383 // Metadata 2 ranges aren't cached, metadata 1 range is. 384 // Retrieves [d,e). 385 // Prefetches [e,f) and [f,g). 386 de, _ := doLookup(ctx, db.cache, "d") 387 db.assertLookupCountEq(t, 1, "d") 388 doLookup(ctx, db.cache, "fa") 389 db.assertLookupCountEq(t, 0, "fa") 390 391 // Metadata 2 ranges aren't cached, metadata 1 range is. 392 // Retrieves [i,j). 393 // Prefetches [j,k) and [k,l). 394 doLookup(ctx, db.cache, "ij") 395 db.assertLookupCountEq(t, 1, "ij") 396 doLookup(ctx, db.cache, "jk") 397 db.assertLookupCountEq(t, 0, "jk") 398 399 // Totally uncached range. 400 // Retrieves [meta(s),meta(max)) and [r,s). 401 // Prefetches [s,t) and [t,u). 402 // 403 // Notice that the lookup key "ra" will not initially go to 404 // [meta(s),meta(max)), but instead will go to [meta(m),meta(s)). This is 405 // an example where the RangeLookup scan will continue onto a new range. 406 doLookup(ctx, db.cache, "ra") 407 db.assertLookupCountEq(t, 2, "ra") 408 409 // Metadata 2 ranges aren't cached, metadata 1 range is. 410 // Retrieves [v,w). 411 // Prefetches [w,x) and [x,max). 412 doLookup(ctx, db.cache, "vu") 413 db.assertLookupCountEq(t, 1, "vu") 414 415 // Evicts [d,e). 416 require.True(t, evict(ctx, db.cache, de)) 417 // Evicts [meta(min),meta(g)). 418 require.True(t, db.cache.EvictByKey(ctx, keys.RangeMetaKey(roachpb.RKey("da")))) 419 doLookup(ctx, db.cache, "fa") 420 db.assertLookupCountEq(t, 0, "fa") 421 // Totally uncached range. 422 // Retrieves [meta(min),meta(g)) and [d,e). 423 // Prefetches [e,f) and [f,g). 424 doLookup(ctx, db.cache, "da") 425 db.assertLookupCountEq(t, 2, "da") 426 427 // Looking up a descriptor that lands on an end-key should work 428 // without a cache miss. 429 doLookup(ctx, db.cache, "a") 430 db.assertLookupCountEq(t, 0, "a") 431 432 // Attempt to compare-and-evict with a descriptor that is not equal to the 433 // cached one; it should not alter the cache. 434 desc, _ := doLookup(ctx, db.cache, "cz") 435 descCpy := *desc 436 descCpy.IncrementGeneration() 437 require.False(t, evict(ctx, db.cache, &descCpy)) 438 _, evictToken := doLookup(ctx, db.cache, "cz") 439 db.assertLookupCountEq(t, 0, "cz") 440 // Now evict with the actual descriptor. The cache should clear the 441 // descriptor. 442 // Evicts [c,d). 443 evictToken.Evict(ctx) 444 // Meta2 range is cached. 445 // Retrieves [c,d). 446 // Prefetches [c,e) and [e,f). 447 doLookup(ctx, db.cache, "cz") 448 db.assertLookupCountEq(t, 1, "cz") 449 } 450 451 // TestRangeCacheCoalescedRequests verifies that concurrent lookups for 452 // the same key will be coalesced onto the same database lookup. 453 func TestRangeCacheCoalescedRequests(t *testing.T) { 454 defer leaktest.AfterTest(t)() 455 db := initTestDescriptorDB(t) 456 ctx := context.Background() 457 458 pauseLookupResumeAndAssert := func(key string, expected int64) { 459 var wg sync.WaitGroup 460 db.pauseRangeLookups() 461 462 // We're going to perform 3 lookups on the same key, in parallel, while 463 // lookups are paused. Either they're all expected to get cache hits (in the 464 // case where expected == 0), or there will be one request actually blocked 465 // in the db and the other two will get coalesced onto it. 466 var coalesced chan struct{} 467 if expected > 0 { 468 coalesced = make(chan struct{}) 469 db.cache.coalesced = coalesced 470 } 471 for i := 0; i < 3; i++ { 472 wg.Add(1) 473 go func() { 474 doLookupWithToken(ctx, db.cache, key, nil, false) 475 wg.Done() 476 }() 477 } 478 479 // Wait for requests to be coalesced before unblocking the db. 480 if coalesced != nil { 481 for i := 0; i < 2; i++ { 482 <-coalesced 483 } 484 } 485 486 db.resumeRangeLookups() 487 wg.Wait() 488 db.assertLookupCountEq(t, expected, key) 489 } 490 491 // Totally uncached range. 492 // Retrieves [meta(min),meta(g)) and [a,b). 493 // Prefetches [meta(g),meta(m)), [meta(m),meta(s)), [b,c), and [c,d). 494 pauseLookupResumeAndAssert("aa", 2) 495 496 // Metadata 2 ranges aren't cached, metadata 1 range is. 497 // Retrieves [d,e). 498 // Prefetches [e,f) and [f,g). 499 pauseLookupResumeAndAssert("d", 1) 500 pauseLookupResumeAndAssert("ea", 0) 501 } 502 503 // TestRangeCacheContextCancellation tests the behavior that for an ongoing 504 // RangeDescriptor lookup, if the context passed in gets canceled the lookup 505 // returns with an error indicating so. Canceling the ctx does not stop the 506 // in-flight lookup though (even though the requester has returned from 507 // lookupRangeDescriptorInternal()) - other requesters that joined the same 508 // flight are unaffected by the ctx cancelation. 509 func TestRangeCacheContextCancellation(t *testing.T) { 510 defer leaktest.AfterTest(t)() 511 db := initTestDescriptorDB(t) 512 513 // lookupAndWaitUntilJoin performs a RangeDescriptor lookup in a new 514 // goroutine and blocks until the request is added to the inflight request 515 // map. It returns a channel that transmits the error return value from the 516 // lookup. 517 lookupAndWaitUntilJoin := func(ctx context.Context, key roachpb.RKey, expectDBLookup bool) chan error { 518 errC := make(chan error) 519 var blocked <-chan struct{} 520 if expectDBLookup { 521 blocked = db.notifyOn(key) 522 } else { 523 ch := make(chan struct{}) 524 db.cache.coalesced = ch 525 blocked = ch 526 } 527 go func() { 528 _, _, err := db.cache.lookupRangeDescriptorInternal(ctx, key, nil, false) 529 errC <- err 530 }() 531 <-blocked 532 return errC 533 } 534 535 expectContextCancellation := func(t *testing.T, c <-chan error) { 536 t.Helper() 537 if err := <-c; !errors.Is(err, context.Canceled) { 538 t.Errorf("expected context cancellation error, found %v", err) 539 } 540 } 541 expectNoError := func(t *testing.T, c <-chan error) { 542 t.Helper() 543 if err := <-c; err != nil { 544 t.Errorf("unexpected error, found %v", err) 545 } 546 } 547 548 ctx1, cancel := context.WithCancel(context.Background()) // leader 549 ctx2 := context.Background() 550 ctx3 := context.Background() 551 552 db.pauseRangeLookups() 553 key1 := roachpb.RKey("aa") 554 errC1 := lookupAndWaitUntilJoin(ctx1, key1, true) 555 errC2 := lookupAndWaitUntilJoin(ctx2, key1, false) 556 557 // Cancel the leader and check that it gets an error. 558 cancel() 559 expectContextCancellation(t, errC1) 560 561 // While lookups are still blocked, launch another one. This new request 562 // should join the flight just like c2. 563 errC3 := lookupAndWaitUntilJoin(ctx3, key1, false) 564 565 // Let the flight finish. 566 db.resumeRangeLookups() 567 expectNoError(t, errC2) 568 expectNoError(t, errC3) 569 } 570 571 // TestRangeCacheDetectSplit verifies that when the cache detects a split 572 // it will properly coalesce all requests to the right half of the split and 573 // will prefetch the left half of the split. 574 func TestRangeCacheDetectSplit(t *testing.T) { 575 defer leaktest.AfterTest(t)() 576 db := initTestDescriptorDB(t) 577 ctx := context.Background() 578 579 pauseLookupResumeAndAssert := func(key string, expected int64, evictToken *EvictionToken) { 580 var wg sync.WaitGroup 581 db.pauseRangeLookups() 582 583 // We're going to perform 3 lookups on the same key, in parallel, while 584 // lookups are paused. Either they're all expected to get cache hits (in the 585 // case where expected == 0), or there will be one request actually blocked 586 // in the db and the other two will get coalesced onto it. 587 var coalesced chan struct{} 588 if expected > 0 { 589 coalesced = make(chan struct{}) 590 db.cache.coalesced = coalesced 591 } 592 593 for i := 0; i < 3; i++ { 594 wg.Add(1) 595 go func(id int) { 596 // Each request goes to a different key. 597 doLookupWithToken(ctx, db.cache, fmt.Sprintf("%s%d", key, id), evictToken, false) 598 wg.Done() 599 }(i) 600 } 601 // Wait for requests to be coalesced before unblocking the db. 602 if coalesced != nil { 603 for i := 0; i < 2; i++ { 604 <-coalesced 605 } 606 } 607 608 db.resumeRangeLookups() 609 wg.Wait() 610 db.assertLookupCountEq(t, expected, key) 611 } 612 613 // A request initially looks up the range descriptor ["a"-"b"). 614 doLookup(ctx, db.cache, "aa") 615 db.assertLookupCountEq(t, 2, "aa") 616 617 // A split breaks up the range into ["a"-"an") and ["an"-"b"). 618 db.splitRange(t, roachpb.RKey("an")) 619 620 // A request is sent to the stale descriptor on the right half 621 // such that a RangeKeyMismatchError is returned. 622 _, evictToken := doLookup(ctx, db.cache, "az") 623 // mismatchErrRange mocks out a RangeKeyMismatchError.Range response. 624 ranges, _, pErr := db.getDescriptors(roachpb.RKey("aa"), false) 625 if pErr != nil { 626 t.Fatal(pErr) 627 } 628 mismatchErrRange := ranges[0] 629 // The stale descriptor is evicted, the new descriptor from the error is 630 // replaced, and a new lookup is initialized. 631 evictToken.EvictAndReplace(ctx, mismatchErrRange) 632 pauseLookupResumeAndAssert("az", 1, evictToken) 633 634 // Both sides of the split are now correctly cached. 635 doLookup(ctx, db.cache, "aa") 636 db.assertLookupCountEq(t, 0, "aa") 637 doLookup(ctx, db.cache, "az") 638 db.assertLookupCountEq(t, 0, "az") 639 } 640 641 // Verifies that the end key of a stale descriptor is used as a request key 642 // when the request is for the reverse scan. 643 func TestRangeCacheDetectSplitReverseScan(t *testing.T) { 644 defer leaktest.AfterTest(t)() 645 db := initTestDescriptorDB(t) 646 ctx := context.Background() 647 648 // A request initially looks up the range descriptor ["a"-"b"). 649 doLookup(ctx, db.cache, "aa") 650 db.assertLookupCountEq(t, 2, "aa") 651 652 // A split breaks up the range into ["a"-"an") and ["an"-"b"). 653 db.splitRange(t, roachpb.RKey("an")) 654 655 // A request is sent to the stale descriptor on the right half 656 // such that a RangeKeyMismatchError is returned. 657 useReverseScan := true 658 _, evictToken := doLookupWithToken(ctx, db.cache, "az", nil, useReverseScan) 659 // mismatchErrRange mocks out a RangeKeyMismatchError.Range response. 660 ranges, _, pErr := db.getDescriptors(roachpb.RKey("aa"), false) 661 if pErr != nil { 662 t.Fatal(pErr) 663 } 664 mismatchErrRange := ranges[0] 665 // The stale descriptor is evicted, the new descriptor from the error is 666 // replaced, and a new lookup is initialized. 667 // Evict the cached descriptor ["a", "b") and insert ["a"-"an") 668 evictToken.EvictAndReplace(ctx, mismatchErrRange) 669 670 // Create two lookup requests with key "a" and "az". The lookup on "az" uses 671 // the evictToken returned by the previous lookup. 672 // 673 // The requests will *not* be coalesced, and two different descriptors should 674 // be returned ([KeyMin-,"a") and ["an-b")). 675 lookups := []struct { 676 key string 677 evictToken *EvictionToken 678 }{ 679 {"a", nil}, 680 {"az", evictToken}, 681 } 682 db.pauseRangeLookups() 683 var wg, waitJoin sync.WaitGroup 684 for _, lookup := range lookups { 685 wg.Add(1) 686 blocked := db.notifyOn(roachpb.RKey(lookup.key)) 687 go func(key string, evictToken *EvictionToken) { 688 doLookupWithToken(ctx, db.cache, key, evictToken, useReverseScan) 689 wg.Done() 690 }(lookup.key, lookup.evictToken) 691 <-blocked 692 } 693 waitJoin.Wait() 694 db.resumeRangeLookups() 695 wg.Wait() 696 db.assertLookupCount(t, 2, 3, "a and az") 697 698 // Both are now correctly cached. 699 doLookupWithToken(ctx, db.cache, "a", nil, useReverseScan) 700 db.assertLookupCountEq(t, 0, "a") 701 doLookupWithToken(ctx, db.cache, "az", nil, useReverseScan) 702 db.assertLookupCountEq(t, 0, "az") 703 } 704 705 // Test that the range cache deals with situations where requests have to be 706 // retried internally because they've been wrongly-coalesced the first time 707 // around. 708 func TestRangeCacheHandleDoubleSplit(t *testing.T) { 709 defer leaktest.AfterTest(t)() 710 711 // The tests starts with the descriptor [a-an) in the cache. 712 // There are 3 ranges of interest: [a-an)[an-at)[at-b). 713 // We're going to start a bunch of range lookups in order, and we'll assert 714 // what happens to each one. 715 type exp int 716 const ( 717 cacheHit exp = iota 718 lookupLeader 719 lookupCoalesced 720 lookupWronglyCoalesced 721 ) 722 testCases := []struct { 723 reverseScan bool 724 keys []struct { 725 key string 726 exp exp 727 } 728 }{ 729 { 730 // [forward case] 731 // - "aa" will hit the cache 732 // - all others will join a coalesced request to "an" 733 // + will lookup the meta2 desc 734 // + will lookup the ["an"-"at") desc 735 // - "an" and "ao" will get the correct range back 736 // - "at" and "az" will make a second lookup 737 // + will lookup the ["at"-"b") desc 738 reverseScan: false, 739 keys: []struct { 740 key string 741 exp exp 742 }{ 743 {key: "aa", exp: cacheHit}, 744 {key: "an", exp: lookupLeader}, 745 {key: "ao", exp: lookupCoalesced}, 746 {key: "at", exp: lookupWronglyCoalesced}, 747 {key: "az", exp: lookupWronglyCoalesced}, 748 }, 749 }, 750 { 751 // [reverse case] 752 // - "aa" and "an" will hit the cache 753 // - all others will join a coalesced request to "ao" 754 // + will lookup the meta2 desc 755 // + will lookup the ["at"-"b") desc 756 // - "ao" will get the right range back 757 // - "at" and "az" will make a second lookup 758 // + will lookup the ["an"-"at") desc 759 reverseScan: true, 760 keys: []struct { 761 key string 762 exp exp 763 }{ 764 {key: "aa", exp: cacheHit}, 765 {key: "an", exp: cacheHit}, 766 {key: "ao", exp: lookupLeader}, 767 {key: "at", exp: lookupCoalesced}, 768 {key: "az", exp: lookupWronglyCoalesced}, 769 }, 770 }, 771 } 772 773 for _, tc := range testCases { 774 t.Run(fmt.Sprintf("reverse=%t", tc.reverseScan), func(t *testing.T) { 775 db := initTestDescriptorDB(t) 776 db.disablePrefetch = true 777 ctx := context.Background() 778 779 // A request initially looks up the range descriptor ["a"-"b"). 780 doLookup(ctx, db.cache, "aa") 781 db.assertLookupCountEq(t, 2, "aa") 782 783 // A split breaks up the range into ["a"-"an"), ["an"-"at"), ["at"-"b"). 784 db.splitRange(t, roachpb.RKey("an")) 785 db.splitRange(t, roachpb.RKey("at")) 786 787 // A request is sent to the stale descriptor on the right half 788 // such that a RangeKeyMismatchError is returned. 789 _, evictToken := doLookup(ctx, db.cache, "az") 790 // mismatchErrRange mocks out a RangeKeyMismatchError.Range response. 791 ranges, _, pErr := db.getDescriptors(roachpb.RKey("aa"), false) 792 if pErr != nil { 793 t.Fatal(pErr) 794 } 795 mismatchErrRange := ranges[0] 796 // The stale descriptor is evicted, the new descriptor from the error is 797 // replaced, and a new lookup is initialized. 798 evictToken.EvictAndReplace(ctx, mismatchErrRange) 799 800 // wg will be used to wait for all the lookups to complete. 801 wg := sync.WaitGroup{} 802 wg.Add(len(tc.keys)) 803 804 // lookup will kick of an async range lookup. If the request is expected 805 // to block by either going to the db or be coalesced onto another 806 // request, this function will wait until the request gets blocked. 807 lookup := func(key roachpb.RKey, exp exp) { 808 var blocked <-chan struct{} 809 var expLog string 810 switch exp { 811 case lookupLeader: 812 blocked = db.notifyOn(key) 813 case lookupWronglyCoalesced: 814 expLog = "bad lookup coalescing; retrying" 815 ch := make(chan struct{}) 816 db.cache.coalesced = ch 817 blocked = ch 818 case lookupCoalesced: 819 expLog = "coalesced range lookup request onto in-flight one" 820 ch := make(chan struct{}) 821 db.cache.coalesced = ch 822 blocked = ch 823 } 824 825 go func(ctx context.Context) { 826 defer wg.Done() 827 var desc *roachpb.RangeDescriptor 828 // Each request goes to a different key. 829 var err error 830 ctx, getRecording, cancel := tracing.ContextWithRecordingSpan(ctx, "test") 831 defer cancel() 832 desc, _, err = db.cache.lookupRangeDescriptorInternal( 833 ctx, key, evictToken, 834 tc.reverseScan) 835 require.NoError(t, err) 836 if tc.reverseScan { 837 if !desc.ContainsKeyInverted(key) { 838 t.Errorf("desc %s does not contain exclusive end key %s", desc, key) 839 } 840 } else { 841 if !desc.ContainsKey(key) { 842 t.Errorf("desc %s does not contain key %s", desc, key) 843 } 844 } 845 if expLog != "" { 846 rec := getRecording() 847 _, ok := rec.FindLogMessage(expLog) 848 if !ok { 849 t.Errorf("didn't find expected message in trace for %s: %s. Recording:\n%s", 850 key, expLog, rec) 851 } 852 } 853 }(ctx) 854 855 // If we're expecting this request to block, wait for that. 856 if blocked != nil { 857 select { 858 case <-blocked: 859 case <-time.After(10 * time.Second): 860 t.Errorf("request didn't block:%s", key) 861 } 862 } 863 // Reset the notification channel; if the lookup is internally retried 864 // we won't be waiting for a 2nd notification. 865 db.cache.coalesced = nil 866 } 867 868 // Block all the lookups at the db level. 869 db.pauseRangeLookups() 870 // Kick off all the lookups, in order. The cache hits will finish, the rest 871 // will get blocked. 872 for _, look := range tc.keys { 873 lookup(roachpb.RKey(look.key), look.exp) 874 } 875 876 // All the requests that didn't hit the cache are now blocked. Unblock 877 // them. 878 db.resumeRangeLookups() 879 // Wait for all requests to finish. 880 wg.Wait() 881 db.assertLookupCountEq(t, 2, "an and az") 882 883 // All three descriptors are now correctly cached. 884 doLookup(ctx, db.cache, "aa") 885 db.assertLookupCountEq(t, 0, "aa") 886 doLookup(ctx, db.cache, "ao") 887 db.assertLookupCountEq(t, 0, "ao") 888 doLookup(ctx, db.cache, "az") 889 db.assertLookupCountEq(t, 0, "az") 890 }) 891 } 892 } 893 894 func TestRangeCacheUseIntents(t *testing.T) { 895 defer leaktest.AfterTest(t)() 896 db := initTestDescriptorDB(t) 897 ctx := context.Background() 898 899 // A request initially looks up the range descriptor ["a"-"b"). 900 abDesc, evictToken := doLookup(ctx, db.cache, "aa") 901 db.assertLookupCountEq(t, 2, "aa") 902 903 // Perform a lookup now that the cache is populated. 904 abDescLookup, _ := doLookup(ctx, db.cache, "aa") 905 db.assertLookupCountEq(t, 0, "aa") 906 907 // The descriptors should be the same. 908 if !reflect.DeepEqual(abDesc, abDescLookup) { 909 t.Errorf("expected initial range descriptor to be returned from lookup, found %v", abDescLookup) 910 } 911 912 // The current descriptor is found to be stale, so it is evicted. The next cache 913 // lookup should return the descriptor from the intents, without performing another 914 // db lookup. 915 evictToken.Evict(ctx) 916 abDescIntent, _ := doLookup(ctx, db.cache, "aa") 917 db.assertLookupCountEq(t, 0, "aa") 918 919 // The descriptors should be different. 920 if reflect.DeepEqual(abDesc, abDescIntent) { 921 t.Errorf("expected initial range descriptor to be different from the one from intents, found %v", abDesc) 922 } 923 } 924 925 // TestRangeCacheClearOverlapping verifies that existing, overlapping 926 // cached entries are cleared when adding a new entry. 927 func TestRangeCacheClearOverlapping(t *testing.T) { 928 defer leaktest.AfterTest(t)() 929 ctx := context.Background() 930 931 defDesc := &roachpb.RangeDescriptor{ 932 StartKey: roachpb.RKeyMin, 933 EndKey: roachpb.RKeyMax, 934 Generation: 0, 935 } 936 937 st := cluster.MakeTestingClusterSettings() 938 cache := NewRangeDescriptorCache(st, nil, staticSize(2<<10), stop.NewStopper()) 939 cache.rangeCache.cache.Add(rangeCacheKey(keys.RangeMetaKey(roachpb.RKeyMax)), defDesc) 940 941 // Now, add a new, overlapping set of descriptors. 942 minToBDesc := &roachpb.RangeDescriptor{ 943 StartKey: roachpb.RKeyMin, 944 EndKey: roachpb.RKey("b"), 945 Generation: 1, 946 } 947 bToMaxDesc := &roachpb.RangeDescriptor{ 948 StartKey: roachpb.RKey("b"), 949 EndKey: roachpb.RKeyMax, 950 Generation: 1, 951 } 952 curGeneration := int64(1) 953 require.True(t, cache.clearOlderOverlapping(ctx, minToBDesc)) 954 cache.rangeCache.cache.Add(rangeCacheKey(keys.RangeMetaKey(roachpb.RKey("b"))), minToBDesc) 955 if desc := cache.GetCachedRangeDescriptor(roachpb.RKey("b"), false); desc != nil { 956 t.Errorf("descriptor unexpectedly non-nil: %s", desc) 957 } 958 require.True(t, cache.clearOlderOverlapping(ctx, bToMaxDesc)) 959 cache.rangeCache.cache.Add(rangeCacheKey(keys.RangeMetaKey(roachpb.RKeyMax)), bToMaxDesc) 960 if desc := cache.GetCachedRangeDescriptor(roachpb.RKey("b"), false); desc != bToMaxDesc { 961 t.Errorf("expected descriptor %s; got %s", bToMaxDesc, desc) 962 } 963 964 // Add default descriptor back which should remove two split descriptors. 965 defDescCpy := *defDesc 966 curGeneration++ 967 defDescCpy.Generation = curGeneration 968 require.True(t, cache.clearOlderOverlapping(ctx, &defDescCpy)) 969 cache.rangeCache.cache.Add(rangeCacheKey(keys.RangeMetaKey(roachpb.RKeyMax)), defDesc) 970 for _, key := range []roachpb.RKey{roachpb.RKey("a"), roachpb.RKey("b")} { 971 if desc := cache.GetCachedRangeDescriptor(key, false); desc != defDesc { 972 t.Errorf("expected descriptor %s for key %s; got %s", defDesc, key, desc) 973 } 974 } 975 976 // Insert ["b", "c") and then insert ["a", b"). Verify that the former is not evicted by the latter. 977 curGeneration++ 978 bToCDesc := &roachpb.RangeDescriptor{ 979 StartKey: roachpb.RKey("b"), 980 EndKey: roachpb.RKey("c"), 981 Generation: curGeneration, 982 } 983 require.True(t, cache.clearOlderOverlapping(ctx, bToCDesc)) 984 cache.rangeCache.cache.Add(rangeCacheKey(keys.RangeMetaKey(roachpb.RKey("c"))), bToCDesc) 985 if desc := cache.GetCachedRangeDescriptor(roachpb.RKey("c"), true); desc != bToCDesc { 986 t.Errorf("expected descriptor %s; got %s", bToCDesc, desc) 987 } 988 989 curGeneration++ 990 aToBDesc := &roachpb.RangeDescriptor{ 991 StartKey: roachpb.RKey("a"), 992 EndKey: roachpb.RKey("b"), 993 Generation: curGeneration, 994 } 995 require.True(t, cache.clearOlderOverlapping(ctx, aToBDesc)) 996 cache.rangeCache.cache.Add(rangeCacheKey(keys.RangeMetaKey(roachpb.RKey("b"))), aToBDesc) 997 if desc := cache.GetCachedRangeDescriptor(roachpb.RKey("c"), true); desc != bToCDesc { 998 t.Errorf("expected descriptor %s; got %s", bToCDesc, desc) 999 } 1000 } 1001 1002 // TestRangeCacheClearOverlappingMeta prevents regression of a bug which caused 1003 // a panic when clearing overlapping descriptors for [KeyMin, Meta2Key). The 1004 // issue was that when attempting to clear out descriptors which were subsumed 1005 // by the above range, an iteration over the corresponding meta keys was 1006 // performed, with the left endpoint excluded. This exclusion was incorrect: it 1007 // first incremented the start key (KeyMin) and then formed the meta key; for 1008 // KeyMin this leads to Meta2Prefix\x00. For the above EndKey, the meta key is 1009 // a Meta1key which sorts before Meta2Prefix\x00, causing a panic. The fix was 1010 // simply to increment the meta key for StartKey, not StartKey itself. 1011 func TestRangeCacheClearOverlappingMeta(t *testing.T) { 1012 defer leaktest.AfterTest(t)() 1013 ctx := context.Background() 1014 1015 firstDesc := roachpb.RangeDescriptor{ 1016 StartKey: roachpb.RKeyMin, 1017 EndKey: roachpb.RKey("zzz"), 1018 } 1019 restDesc := roachpb.RangeDescriptor{ 1020 StartKey: firstDesc.EndKey, 1021 EndKey: roachpb.RKeyMax, 1022 } 1023 1024 st := cluster.MakeTestingClusterSettings() 1025 cache := NewRangeDescriptorCache(st, nil, staticSize(2<<10), stop.NewStopper()) 1026 cache.InsertRangeDescriptors(ctx, firstDesc, restDesc) 1027 1028 // Add new range, corresponding to splitting the first range at a meta key. 1029 metaSplitDesc := &roachpb.RangeDescriptor{ 1030 StartKey: roachpb.RKeyMin, 1031 EndKey: keys.RangeMetaKey(roachpb.RKey("foo")), 1032 } 1033 func() { 1034 defer func() { 1035 if r := recover(); r != nil { 1036 t.Fatalf("invocation of clearOlderOverlapping panicked: %v", r) 1037 } 1038 }() 1039 cache.clearOlderOverlapping(ctx, metaSplitDesc) 1040 }() 1041 } 1042 1043 // TestGetCachedRangeDescriptorInverted verifies the correctness of the result 1044 // that is returned by getCachedRangeDescriptor with inverted=true. 1045 func TestGetCachedRangeDescriptorInverted(t *testing.T) { 1046 defer leaktest.AfterTest(t)() 1047 1048 testData := []*roachpb.RangeDescriptor{ 1049 {StartKey: roachpb.RKey("a"), EndKey: roachpb.RKey("c")}, 1050 {StartKey: roachpb.RKey("c"), EndKey: roachpb.RKey("e")}, 1051 {StartKey: roachpb.RKey("g"), EndKey: roachpb.RKey("z")}, 1052 } 1053 1054 st := cluster.MakeTestingClusterSettings() 1055 cache := NewRangeDescriptorCache(st, nil, staticSize(2<<10), stop.NewStopper()) 1056 for _, rd := range testData { 1057 cache.rangeCache.cache.Add(rangeCacheKey(keys.RangeMetaKey(rd.EndKey)), rd) 1058 } 1059 1060 testCases := []struct { 1061 queryKey roachpb.RKey 1062 cacheKey rangeCacheKey 1063 rng *roachpb.RangeDescriptor 1064 }{ 1065 { 1066 // Check range start key. 1067 queryKey: roachpb.RKey("a"), 1068 cacheKey: nil, 1069 rng: nil, 1070 }, 1071 { 1072 // Check range end key. 1073 queryKey: roachpb.RKey("c"), 1074 cacheKey: rangeCacheKey(keys.RangeMetaKey(roachpb.RKey("c"))), 1075 rng: &roachpb.RangeDescriptor{StartKey: roachpb.RKey("a"), EndKey: roachpb.RKey("c")}, 1076 }, 1077 { 1078 // Check range middle key. 1079 queryKey: roachpb.RKey("d"), 1080 cacheKey: rangeCacheKey(keys.RangeMetaKey(roachpb.RKey("e"))), 1081 rng: &roachpb.RangeDescriptor{StartKey: roachpb.RKey("c"), EndKey: roachpb.RKey("e")}, 1082 }, 1083 { 1084 // Check miss range key. 1085 queryKey: roachpb.RKey("f"), 1086 cacheKey: nil, 1087 rng: nil, 1088 }, 1089 { 1090 // Check range start key with previous range miss. 1091 queryKey: roachpb.RKey("g"), 1092 cacheKey: nil, 1093 rng: nil, 1094 }, 1095 } 1096 1097 for _, test := range testCases { 1098 cache.rangeCache.RLock() 1099 targetRange, entry := cache.getCachedRangeDescriptorLocked(test.queryKey, true /* inverted */) 1100 cache.rangeCache.RUnlock() 1101 if !reflect.DeepEqual(targetRange, test.rng) { 1102 t.Fatalf("expect range %v, actual get %v", test.rng, targetRange) 1103 } 1104 var cacheKey rangeCacheKey 1105 if entry != nil { 1106 cacheKey = entry.Key.(rangeCacheKey) 1107 } 1108 if !reflect.DeepEqual(cacheKey, test.cacheKey) { 1109 t.Fatalf("expect cache key %v, actual get %v", test.cacheKey, cacheKey) 1110 } 1111 } 1112 } 1113 1114 func TestRangeCacheGeneration(t *testing.T) { 1115 defer leaktest.AfterTest(t)() 1116 ctx := context.Background() 1117 1118 descAM1 := &roachpb.RangeDescriptor{ 1119 StartKey: roachpb.RKey("a"), 1120 EndKey: roachpb.RKey("m"), 1121 Generation: 1, 1122 } 1123 descMZ3 := &roachpb.RangeDescriptor{ 1124 StartKey: roachpb.RKey("m"), 1125 EndKey: roachpb.RKey("z"), 1126 Generation: 3, 1127 } 1128 1129 descBY0 := &roachpb.RangeDescriptor{ 1130 StartKey: roachpb.RKey("b"), 1131 EndKey: roachpb.RKey("y"), 1132 Generation: 0, 1133 } 1134 descBY2 := &roachpb.RangeDescriptor{ 1135 StartKey: roachpb.RKey("b"), 1136 EndKey: roachpb.RKey("y"), 1137 Generation: 2, 1138 } 1139 descBY4 := &roachpb.RangeDescriptor{ 1140 StartKey: roachpb.RKey("b"), 1141 EndKey: roachpb.RKey("y"), 1142 Generation: 4, 1143 } 1144 1145 testCases := []struct { 1146 name string 1147 insertDesc *roachpb.RangeDescriptor 1148 queryKeys []roachpb.RKey 1149 expectedDesc []*roachpb.RangeDescriptor 1150 }{ 1151 { 1152 // descBY0 is ignored since the existing keyspace is covered by 1153 // descriptors of generations 1 and 3, respectively. 1154 name: "evict 0", 1155 insertDesc: descBY0, 1156 queryKeys: []roachpb.RKey{roachpb.RKey("b"), roachpb.RKey("y")}, 1157 expectedDesc: []*roachpb.RangeDescriptor{descAM1, descMZ3}, 1158 }, 1159 { 1160 // descBY2 evicts descAM1, but not descMZ3 based on Generation. Since 1161 // there is an overlapping descriptor with higher Generation (descMZ3), 1162 // it is not inserted. 1163 name: "evict 1", 1164 insertDesc: descBY2, 1165 queryKeys: []roachpb.RKey{roachpb.RKey("b"), roachpb.RKey("y")}, 1166 expectedDesc: []*roachpb.RangeDescriptor{nil, descMZ3}, 1167 }, 1168 { 1169 // descBY4 replaces both existing descriptors and it is inserted. 1170 name: "evict 2", 1171 insertDesc: descBY4, 1172 queryKeys: []roachpb.RKey{roachpb.RKey("b"), roachpb.RKey("y")}, 1173 expectedDesc: []*roachpb.RangeDescriptor{descBY4, nil}, 1174 }, 1175 } 1176 1177 for _, tc := range testCases { 1178 t.Run(tc.name, func(t *testing.T) { 1179 st := cluster.MakeTestingClusterSettings() 1180 cache := NewRangeDescriptorCache(st, nil, staticSize(2<<10), stop.NewStopper()) 1181 cache.InsertRangeDescriptors(ctx, *descAM1, *descMZ3, *tc.insertDesc) 1182 1183 for index, queryKey := range tc.queryKeys { 1184 if actualDesc := cache.GetCachedRangeDescriptor(queryKey, false); !tc.expectedDesc[index].Equal(actualDesc) { 1185 t.Errorf("expected descriptor %s; got %s", tc.expectedDesc[index], actualDesc) 1186 } 1187 } 1188 }) 1189 } 1190 }