github.com/dolthub/dolt/go@v0.40.5-0.20240520175717-68db7794bea6/store/nbs/root_tracker_test.go (about) 1 // Copyright 2019 Dolthub, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 // 15 // This file incorporates work covered by the following copyright and 16 // permission notice: 17 // 18 // Copyright 2016 Attic Labs, Inc. All rights reserved. 19 // Licensed under the Apache License, version 2.0: 20 // http://www.apache.org/licenses/LICENSE-2.0 21 22 package nbs 23 24 import ( 25 "context" 26 "errors" 27 "fmt" 28 "sync" 29 "testing" 30 "time" 31 32 "github.com/stretchr/testify/assert" 33 "github.com/stretchr/testify/require" 34 35 "github.com/dolthub/dolt/go/store/chunks" 36 "github.com/dolthub/dolt/go/store/constants" 37 "github.com/dolthub/dolt/go/store/hash" 38 ) 39 40 func TestChunkStoreZeroValue(t *testing.T) { 41 assert := assert.New(t) 42 _, _, _, store := makeStoreWithFakes(t) 43 defer func() { 44 require.NoError(t, store.Close()) 45 }() 46 47 // No manifest file gets written until the first call to Commit(). Prior to that, Root() will simply return hash.Hash{}. 48 h, err := store.Root(context.Background()) 49 require.NoError(t, err) 50 assert.Equal(hash.Hash{}, h) 51 assert.Equal(constants.FormatLD1String, store.Version()) 52 } 53 54 func TestChunkStoreVersion(t *testing.T) { 55 assert := assert.New(t) 56 _, _, _, store := makeStoreWithFakes(t) 57 defer func() { 58 require.NoError(t, store.Close()) 59 }() 60 61 assert.Equal(constants.FormatLD1String, store.Version()) 62 newChunk := chunks.NewChunk([]byte("new root")) 63 require.NoError(t, store.Put(context.Background(), newChunk, noopGetAddrs)) 64 newRoot := newChunk.Hash() 65 66 if assert.True(store.Commit(context.Background(), newRoot, hash.Hash{})) { 67 assert.Equal(constants.FormatLD1String, store.Version()) 68 } 69 } 70 71 func TestChunkStoreRebase(t *testing.T) { 72 assert := assert.New(t) 73 fm, p, q, store := makeStoreWithFakes(t) 74 defer func() { 75 require.NoError(t, store.Close()) 76 require.EqualValues(t, 0, q.Usage()) 77 }() 78 79 h, err := store.Root(context.Background()) 80 require.NoError(t, err) 81 assert.Equal(hash.Hash{}, h) 82 assert.Equal(constants.FormatLD1String, store.Version()) 83 84 // Simulate another process writing a manifest behind store's back. 85 newRoot, chunks, err := interloperWrite(fm, p, []byte("new root"), []byte("hello2"), []byte("goodbye2"), []byte("badbye2")) 86 require.NoError(t, err) 87 88 // state in store shouldn't change 89 h, err = store.Root(context.Background()) 90 require.NoError(t, err) 91 assert.Equal(hash.Hash{}, h) 92 assert.Equal(constants.FormatLD1String, store.Version()) 93 94 err = store.Rebase(context.Background()) 95 require.NoError(t, err) 96 97 // NOW it should 98 h, err = store.Root(context.Background()) 99 require.NoError(t, err) 100 assert.Equal(newRoot, h) 101 assert.Equal(constants.FormatLD1String, store.Version()) 102 assertDataInStore(chunks, store, assert) 103 } 104 105 func TestChunkStoreCommit(t *testing.T) { 106 assert := assert.New(t) 107 _, _, q, store := makeStoreWithFakes(t) 108 defer func() { 109 require.NoError(t, store.Close()) 110 require.EqualValues(t, 0, q.Usage()) 111 }() 112 113 h, err := store.Root(context.Background()) 114 require.NoError(t, err) 115 assert.Equal(hash.Hash{}, h) 116 117 newRootChunk := chunks.NewChunk([]byte("new root")) 118 newRoot := newRootChunk.Hash() 119 err = store.Put(context.Background(), newRootChunk, noopGetAddrs) 120 require.NoError(t, err) 121 success, err := store.Commit(context.Background(), newRoot, hash.Hash{}) 122 require.NoError(t, err) 123 if assert.True(success) { 124 has, err := store.Has(context.Background(), newRoot) 125 require.NoError(t, err) 126 assert.True(has) 127 h, err := store.Root(context.Background()) 128 require.NoError(t, err) 129 assert.Equal(newRoot, h) 130 } 131 132 secondRootChunk := chunks.NewChunk([]byte("newer root")) 133 secondRoot := secondRootChunk.Hash() 134 err = store.Put(context.Background(), secondRootChunk, noopGetAddrs) 135 require.NoError(t, err) 136 success, err = store.Commit(context.Background(), secondRoot, newRoot) 137 require.NoError(t, err) 138 if assert.True(success) { 139 h, err := store.Root(context.Background()) 140 require.NoError(t, err) 141 assert.Equal(secondRoot, h) 142 has, err := store.Has(context.Background(), newRoot) 143 require.NoError(t, err) 144 assert.True(has) 145 has, err = store.Has(context.Background(), secondRoot) 146 require.NoError(t, err) 147 assert.True(has) 148 } 149 } 150 151 func TestChunkStoreManifestAppearsAfterConstruction(t *testing.T) { 152 assert := assert.New(t) 153 fm, p, q, store := makeStoreWithFakes(t) 154 defer func() { 155 require.NoError(t, store.Close()) 156 require.EqualValues(t, 0, q.Usage()) 157 }() 158 159 h, err := store.Root(context.Background()) 160 require.NoError(t, err) 161 assert.Equal(hash.Hash{}, h) 162 assert.Equal(constants.FormatLD1String, store.Version()) 163 164 // Simulate another process writing a manifest behind store's back. 165 interloperWrite(fm, p, []byte("new root"), []byte("hello2"), []byte("goodbye2"), []byte("badbye2")) 166 167 // state in store shouldn't change 168 h, err = store.Root(context.Background()) 169 require.NoError(t, err) 170 assert.Equal(hash.Hash{}, h) 171 assert.Equal(constants.FormatLD1String, store.Version()) 172 } 173 174 func TestChunkStoreManifestFirstWriteByOtherProcess(t *testing.T) { 175 assert := assert.New(t) 176 fm := &fakeManifest{} 177 mm := manifestManager{fm, newManifestCache(0), newManifestLocks()} 178 q := NewUnlimitedMemQuotaProvider() 179 defer func() { 180 require.EqualValues(t, 0, q.Usage()) 181 }() 182 p := newFakeTablePersister(q) 183 184 // Simulate another process writing a manifest behind store's back. 185 newRoot, chunks, err := interloperWrite(fm, p, []byte("new root"), []byte("hello2"), []byte("goodbye2"), []byte("badbye2")) 186 require.NoError(t, err) 187 188 store, err := newNomsBlockStore(context.Background(), constants.FormatLD1String, mm, p, q, inlineConjoiner{defaultMaxTables}, defaultMemTableSize) 189 require.NoError(t, err) 190 defer func() { 191 require.NoError(t, store.Close()) 192 }() 193 194 h, err := store.Root(context.Background()) 195 require.NoError(t, err) 196 assert.Equal(newRoot, h) 197 assert.Equal(constants.FormatLD1String, store.Version()) 198 assertDataInStore(chunks, store, assert) 199 } 200 201 func TestChunkStoreCommitOptimisticLockFail(t *testing.T) { 202 assert := assert.New(t) 203 fm, p, q, store := makeStoreWithFakes(t) 204 defer func() { 205 require.NoError(t, store.Close()) 206 require.EqualValues(t, 0, q.Usage()) 207 }() 208 209 // Simulate another process writing a manifest behind store's back. 210 newRoot, chks, err := interloperWrite(fm, p, []byte("new root"), []byte("hello2"), []byte("goodbye2"), []byte("badbye2")) 211 require.NoError(t, err) 212 213 newChunk := chunks.NewChunk([]byte("new root 2")) 214 require.NoError(t, store.Put(context.Background(), newChunk, noopGetAddrs)) 215 newRoot2 := newChunk.Hash() 216 success, err := store.Commit(context.Background(), newRoot2, hash.Hash{}) 217 require.NoError(t, err) 218 assert.False(success) 219 assertDataInStore(chks, store, assert) 220 success, err = store.Commit(context.Background(), newRoot2, newRoot) 221 require.NoError(t, err) 222 assert.True(success) 223 } 224 225 func TestChunkStoreManifestPreemptiveOptimisticLockFail(t *testing.T) { 226 assert := assert.New(t) 227 fm := &fakeManifest{} 228 mm := manifestManager{fm, newManifestCache(defaultManifestCacheSize), newManifestLocks()} 229 q := NewUnlimitedMemQuotaProvider() 230 p := newFakeTablePersister(q) 231 232 c := inlineConjoiner{defaultMaxTables} 233 234 store, err := newNomsBlockStore(context.Background(), constants.FormatLD1String, mm, p, q, c, defaultMemTableSize) 235 require.NoError(t, err) 236 defer func() { 237 require.NoError(t, store.Close()) 238 require.EqualValues(t, 0, q.Usage()) 239 }() 240 241 // Simulate another goroutine writing a manifest behind store's back. 242 interloper, err := newNomsBlockStore(context.Background(), constants.FormatLD1String, mm, p, q, c, defaultMemTableSize) 243 require.NoError(t, err) 244 defer func() { 245 require.NoError(t, interloper.Close()) 246 }() 247 248 chunk := chunks.NewChunk([]byte("hello")) 249 err = interloper.Put(context.Background(), chunk, noopGetAddrs) 250 require.NoError(t, err) 251 assert.True(interloper.Commit(context.Background(), chunk.Hash(), hash.Hash{})) 252 253 // Try to land a new chunk in store, which should fail AND not persist the contents of store.mt 254 chunk = chunks.NewChunk([]byte("goodbye")) 255 err = store.Put(context.Background(), chunk, noopGetAddrs) 256 require.NoError(t, err) 257 assert.NotNil(store.mt) 258 assert.False(store.Commit(context.Background(), chunk.Hash(), hash.Hash{})) 259 assert.NotNil(store.mt) 260 261 h, err := store.Root(context.Background()) 262 require.NoError(t, err) 263 success, err := store.Commit(context.Background(), chunk.Hash(), h) 264 require.NoError(t, err) 265 assert.True(success) 266 assert.Nil(store.mt) 267 268 h, err = store.Root(context.Background()) 269 require.NoError(t, err) 270 assert.Equal(chunk.Hash(), h) 271 assert.Equal(constants.FormatLD1String, store.Version()) 272 } 273 274 func TestChunkStoreCommitLocksOutFetch(t *testing.T) { 275 assert := assert.New(t) 276 fm := &fakeManifest{name: "foo"} 277 upm := &updatePreemptManifest{manifest: fm} 278 mm := manifestManager{upm, newManifestCache(defaultManifestCacheSize), newManifestLocks()} 279 q := NewUnlimitedMemQuotaProvider() 280 p := newFakeTablePersister(q) 281 c := inlineConjoiner{defaultMaxTables} 282 283 store, err := newNomsBlockStore(context.Background(), constants.FormatLD1String, mm, p, q, c, defaultMemTableSize) 284 require.NoError(t, err) 285 defer func() { 286 require.NoError(t, store.Close()) 287 require.EqualValues(t, 0, q.Usage()) 288 }() 289 290 // store.Commit() should lock out calls to mm.Fetch() 291 wg := sync.WaitGroup{} 292 fetched := manifestContents{} 293 upm.preUpdate = func() { 294 wg.Add(1) 295 go func() { 296 defer wg.Done() 297 var err error 298 _, fetched, _, err = mm.Fetch(context.Background(), nil) 299 require.NoError(t, err) 300 }() 301 } 302 303 rootChunk := chunks.NewChunk([]byte("new root")) 304 err = store.Put(context.Background(), rootChunk, noopGetAddrs) 305 require.NoError(t, err) 306 h, err := store.Root(context.Background()) 307 require.NoError(t, err) 308 success, err := store.Commit(context.Background(), rootChunk.Hash(), h) 309 require.NoError(t, err) 310 assert.True(success) 311 312 wg.Wait() 313 h, err = store.Root(context.Background()) 314 require.NoError(t, err) 315 assert.Equal(h, fetched.root) 316 } 317 318 func TestChunkStoreSerializeCommits(t *testing.T) { 319 assert := assert.New(t) 320 fm := &fakeManifest{name: "foo"} 321 upm := &updatePreemptManifest{manifest: fm} 322 mc := newManifestCache(defaultManifestCacheSize) 323 l := newManifestLocks() 324 q := NewUnlimitedMemQuotaProvider() 325 p := newFakeTablePersister(q) 326 327 c := inlineConjoiner{defaultMaxTables} 328 329 store, err := newNomsBlockStore(context.Background(), constants.FormatLD1String, manifestManager{upm, mc, l}, p, q, c, defaultMemTableSize) 330 require.NoError(t, err) 331 defer func() { 332 require.NoError(t, store.Close()) 333 require.EqualValues(t, 0, q.Usage()) 334 }() 335 336 storeChunk := chunks.NewChunk([]byte("store")) 337 interloperChunk := chunks.NewChunk([]byte("interloper")) 338 updateCount := 0 339 340 interloper, err := newNomsBlockStore( 341 context.Background(), 342 constants.FormatLD1String, 343 manifestManager{ 344 updatePreemptManifest{fm, func() { updateCount++ }}, mc, l, 345 }, 346 p, 347 q, 348 c, 349 defaultMemTableSize) 350 require.NoError(t, err) 351 defer func() { 352 require.NoError(t, interloper.Close()) 353 }() 354 355 wg := sync.WaitGroup{} 356 upm.preUpdate = func() { 357 wg.Add(1) 358 go func() { 359 defer wg.Done() 360 err := interloper.Put(context.Background(), interloperChunk, noopGetAddrs) 361 require.NoError(t, err) 362 h, err := interloper.Root(context.Background()) 363 require.NoError(t, err) 364 success, err := interloper.Commit(context.Background(), h, h) 365 require.NoError(t, err) 366 assert.True(success) 367 }() 368 369 updateCount++ 370 } 371 372 err = store.Put(context.Background(), storeChunk, noopGetAddrs) 373 require.NoError(t, err) 374 h, err := store.Root(context.Background()) 375 require.NoError(t, err) 376 success, err := store.Commit(context.Background(), h, h) 377 require.NoError(t, err) 378 assert.True(success) 379 380 wg.Wait() 381 assert.Equal(2, updateCount) 382 assert.True(interloper.Has(context.Background(), storeChunk.Hash())) 383 assert.True(interloper.Has(context.Background(), interloperChunk.Hash())) 384 } 385 386 func makeStoreWithFakes(t *testing.T) (fm *fakeManifest, p tablePersister, q MemoryQuotaProvider, store *NomsBlockStore) { 387 fm = &fakeManifest{} 388 mm := manifestManager{fm, newManifestCache(0), newManifestLocks()} 389 q = NewUnlimitedMemQuotaProvider() 390 p = newFakeTablePersister(q) 391 store, err := newNomsBlockStore(context.Background(), constants.FormatLD1String, mm, p, q, inlineConjoiner{defaultMaxTables}, 0) 392 require.NoError(t, err) 393 return 394 } 395 396 // Simulate another process writing a manifest behind store's back. 397 func interloperWrite(fm *fakeManifest, p tablePersister, rootChunk []byte, chunks ...[]byte) (newRoot hash.Hash, persisted [][]byte, err error) { 398 newLock, newRoot := computeAddr([]byte("locker")), hash.Of(rootChunk) 399 persisted = append(chunks, rootChunk) 400 401 var src chunkSource 402 src, err = p.Persist(context.Background(), createMemTable(persisted), nil, &Stats{}) 403 if err != nil { 404 return hash.Hash{}, nil, err 405 } 406 407 fm.set(constants.FormatLD1String, newLock, newRoot, []tableSpec{{src.hash(), uint32(len(chunks) + 1)}}, nil) 408 409 if err = src.close(); err != nil { 410 return [20]byte{}, nil, err 411 } 412 return 413 } 414 415 func createMemTable(chunks [][]byte) *memTable { 416 mt := newMemTable(1 << 10) 417 for _, c := range chunks { 418 mt.addChunk(computeAddr(c), c) 419 } 420 return mt 421 } 422 423 func assertDataInStore(slices [][]byte, store chunks.ChunkStore, assert *assert.Assertions) { 424 for _, data := range slices { 425 ok, err := store.Has(context.Background(), chunks.NewChunk(data).Hash()) 426 assert.NoError(err) 427 assert.True(ok) 428 } 429 } 430 431 // fakeManifest simulates a fileManifest without touching disk. 432 type fakeManifest struct { 433 name string 434 contents manifestContents 435 mu sync.RWMutex 436 } 437 438 func (fm *fakeManifest) Name() string { return fm.name } 439 440 // ParseIfExists returns any fake manifest data the caller has injected using 441 // Update() or set(). It treats an empty |fm.lock| as a non-existent manifest. 442 func (fm *fakeManifest) ParseIfExists(ctx context.Context, stats *Stats, readHook func() error) (bool, manifestContents, error) { 443 fm.mu.RLock() 444 defer fm.mu.RUnlock() 445 if !fm.contents.lock.IsEmpty() { 446 return true, fm.contents, nil 447 } 448 449 return false, manifestContents{}, nil 450 } 451 452 // Update checks whether |lastLock| == |fm.lock| and, if so, updates internal 453 // fake manifest state as per the manifest.Update() contract: |fm.lock| is set 454 // to |newLock|, |fm.root| is set to |newRoot|, and the contents of |specs| 455 // replace |fm.tableSpecs|. If |lastLock| != |fm.lock|, then the update 456 // fails. Regardless of success or failure, the current state is returned. 457 func (fm *fakeManifest) Update(ctx context.Context, lastLock hash.Hash, newContents manifestContents, stats *Stats, writeHook func() error) (manifestContents, error) { 458 fm.mu.Lock() 459 defer fm.mu.Unlock() 460 if fm.contents.lock == lastLock { 461 fm.contents = manifestContents{ 462 manifestVers: StorageVersion, 463 nbfVers: newContents.nbfVers, 464 lock: newContents.lock, 465 root: newContents.root, 466 gcGen: hash.Hash{}, 467 } 468 fm.contents.specs = make([]tableSpec, len(newContents.specs)) 469 copy(fm.contents.specs, newContents.specs) 470 if newContents.appendix != nil && len(newContents.appendix) > 0 { 471 fm.contents.appendix = make([]tableSpec, len(newContents.appendix)) 472 copy(fm.contents.appendix, newContents.appendix) 473 } 474 } 475 return fm.contents, nil 476 } 477 478 func (fm *fakeManifest) set(version string, lock hash.Hash, root hash.Hash, specs, appendix []tableSpec) { 479 fm.contents = manifestContents{ 480 manifestVers: StorageVersion, 481 nbfVers: version, 482 lock: lock, 483 root: root, 484 gcGen: hash.Hash{}, 485 specs: specs, 486 appendix: appendix, 487 } 488 } 489 490 func newFakeTableSet(q MemoryQuotaProvider) tableSet { 491 return tableSet{p: newFakeTablePersister(q), q: q, rl: make(chan struct{}, 1)} 492 } 493 494 func newFakeTablePersister(q MemoryQuotaProvider) fakeTablePersister { 495 return fakeTablePersister{q, map[hash.Hash][]byte{}, map[hash.Hash]bool{}, map[hash.Hash]bool{}, &sync.RWMutex{}} 496 } 497 498 type fakeTablePersister struct { 499 q MemoryQuotaProvider 500 sources map[hash.Hash][]byte 501 sourcesToFail map[hash.Hash]bool 502 opened map[hash.Hash]bool 503 mu *sync.RWMutex 504 } 505 506 var _ tablePersister = fakeTablePersister{} 507 508 func (ftp fakeTablePersister) Persist(ctx context.Context, mt *memTable, haver chunkReader, stats *Stats) (chunkSource, error) { 509 if mustUint32(mt.count()) == 0 { 510 return emptyChunkSource{}, nil 511 } 512 513 name, data, chunkCount, err := mt.write(haver, stats) 514 if err != nil { 515 return emptyChunkSource{}, err 516 } else if chunkCount == 0 { 517 return emptyChunkSource{}, nil 518 } 519 520 ftp.mu.Lock() 521 ftp.sources[name] = data 522 ftp.mu.Unlock() 523 524 ti, err := parseTableIndexByCopy(ctx, data, ftp.q) 525 if err != nil { 526 return nil, err 527 } 528 529 cs, err := newTableReader(ti, tableReaderAtFromBytes(data), fileBlockSize) 530 if err != nil { 531 return emptyChunkSource{}, err 532 } 533 return chunkSourceAdapter{cs, name}, nil 534 } 535 536 func (ftp fakeTablePersister) ConjoinAll(ctx context.Context, sources chunkSources, stats *Stats) (chunkSource, cleanupFunc, error) { 537 name, data, chunkCount, err := compactSourcesToBuffer(sources) 538 if err != nil { 539 return nil, nil, err 540 } else if chunkCount == 0 { 541 return emptyChunkSource{}, func() {}, nil 542 } 543 544 ftp.mu.Lock() 545 defer ftp.mu.Unlock() 546 ftp.sources[name] = data 547 548 ti, err := parseTableIndexByCopy(ctx, data, ftp.q) 549 if err != nil { 550 return nil, nil, err 551 } 552 553 cs, err := newTableReader(ti, tableReaderAtFromBytes(data), fileBlockSize) 554 if err != nil { 555 return nil, nil, err 556 } 557 return chunkSourceAdapter{cs, name}, func() {}, nil 558 } 559 560 func compactSourcesToBuffer(sources chunkSources) (name hash.Hash, data []byte, chunkCount uint32, err error) { 561 totalData := uint64(0) 562 for _, src := range sources { 563 chunkCount += mustUint32(src.count()) 564 totalData += mustUint64(src.uncompressedLen()) 565 } 566 if chunkCount == 0 { 567 return 568 } 569 570 maxSize := maxTableSize(uint64(chunkCount), totalData) 571 buff := make([]byte, maxSize) // This can blow up RAM 572 tw := newTableWriter(buff, nil) 573 errString := "" 574 575 ctx := context.Background() 576 for _, src := range sources { 577 ch := make(chan extractRecord) 578 go func() { 579 defer close(ch) 580 err = extractAllChunks(ctx, src, func(rec extractRecord) { 581 ch <- rec 582 }) 583 if err != nil { 584 ch <- extractRecord{a: src.hash(), err: err} 585 } 586 }() 587 588 for rec := range ch { 589 if rec.err != nil { 590 errString += fmt.Sprintf("Failed to extract %s:\n %v\n******\n\n", rec.a, rec.err) 591 continue 592 } 593 tw.addChunk(rec.a, rec.data) 594 } 595 } 596 597 if errString != "" { 598 return hash.Hash{}, nil, 0, fmt.Errorf(errString) 599 } 600 601 tableSize, name, err := tw.finish() 602 603 if err != nil { 604 return hash.Hash{}, nil, 0, err 605 } 606 607 return name, buff[:tableSize], chunkCount, nil 608 } 609 610 func (ftp fakeTablePersister) Open(ctx context.Context, name hash.Hash, chunkCount uint32, stats *Stats) (chunkSource, error) { 611 ftp.mu.Lock() 612 defer ftp.mu.Unlock() 613 614 if _, ok := ftp.sourcesToFail[name]; ok { 615 return nil, errors.New("intentional failure") 616 } 617 data := ftp.sources[name] 618 ftp.opened[name] = true 619 620 ti, err := parseTableIndexByCopy(ctx, data, ftp.q) 621 if err != nil { 622 return nil, err 623 } 624 625 cs, err := newTableReader(ti, tableReaderAtFromBytes(data), fileBlockSize) 626 if err != nil { 627 return emptyChunkSource{}, err 628 } 629 return chunkSourceAdapter{cs, name}, nil 630 } 631 632 func (ftp fakeTablePersister) Exists(ctx context.Context, name hash.Hash, chunkCount uint32, stats *Stats) (bool, error) { 633 if _, ok := ftp.sourcesToFail[name]; ok { 634 return false, errors.New("intentional failure") 635 } 636 return true, nil 637 } 638 639 func (ftp fakeTablePersister) PruneTableFiles(_ context.Context, _ func() []hash.Hash, _ time.Time) error { 640 return chunks.ErrUnsupportedOperation 641 } 642 643 func (ftp fakeTablePersister) Close() error { 644 return nil 645 } 646 647 func (ftp fakeTablePersister) AccessMode() chunks.ExclusiveAccessMode { 648 return chunks.ExclusiveAccessMode_Shared 649 } 650 651 func extractAllChunks(ctx context.Context, src chunkSource, cb func(rec extractRecord)) (err error) { 652 var index tableIndex 653 if index, err = src.index(); err != nil { 654 return err 655 } 656 657 for i := uint32(0); i < index.chunkCount(); i++ { 658 var h hash.Hash 659 _, err = index.indexEntry(i, &h) 660 if err != nil { 661 return err 662 } 663 664 data, err := src.get(ctx, h, nil) 665 if err != nil { 666 return err 667 } 668 cb(extractRecord{a: h, data: data}) 669 } 670 return 671 }