github.com/dolthub/dolt/go@v0.40.5-0.20240520175717-68db7794bea6/store/nbs/store_test.go (about) 1 // Copyright 2019 Dolthub, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package nbs 16 17 import ( 18 "bytes" 19 "context" 20 "encoding/binary" 21 "fmt" 22 "io" 23 "math/rand" 24 "os" 25 "path/filepath" 26 "sync" 27 "testing" 28 "time" 29 30 "github.com/google/uuid" 31 "github.com/stretchr/testify/assert" 32 "github.com/stretchr/testify/require" 33 "golang.org/x/sync/errgroup" 34 35 "github.com/dolthub/dolt/go/libraries/utils/set" 36 "github.com/dolthub/dolt/go/libraries/utils/test" 37 "github.com/dolthub/dolt/go/store/chunks" 38 "github.com/dolthub/dolt/go/store/hash" 39 "github.com/dolthub/dolt/go/store/types" 40 "github.com/dolthub/dolt/go/store/util/tempfiles" 41 ) 42 43 func makeTestLocalStore(t *testing.T, maxTableFiles int) (st *NomsBlockStore, nomsDir string, q MemoryQuotaProvider) { 44 ctx := context.Background() 45 nomsDir = filepath.Join(tempfiles.MovableTempFileProvider.GetTempDir(), "noms_"+uuid.New().String()[:8]) 46 err := os.MkdirAll(nomsDir, os.ModePerm) 47 require.NoError(t, err) 48 49 // create a v5 manifest 50 fm, err := getFileManifest(ctx, nomsDir, asyncFlush) 51 require.NoError(t, err) 52 _, err = fm.Update(ctx, hash.Hash{}, manifestContents{}, &Stats{}, nil) 53 require.NoError(t, err) 54 55 q = NewUnlimitedMemQuotaProvider() 56 st, err = newLocalStore(ctx, types.Format_Default.VersionString(), nomsDir, defaultMemTableSize, maxTableFiles, q) 57 require.NoError(t, err) 58 return st, nomsDir, q 59 } 60 61 type fileToData map[string][]byte 62 63 func writeLocalTableFiles(t *testing.T, st *NomsBlockStore, numTableFiles, seed int) (map[string]int, fileToData) { 64 ctx := context.Background() 65 fileToData := make(fileToData, numTableFiles) 66 fileIDToNumChunks := make(map[string]int, numTableFiles) 67 for i := 0; i < numTableFiles; i++ { 68 var chunkData [][]byte 69 for j := 0; j < i+1; j++ { 70 chunkData = append(chunkData, []byte(fmt.Sprintf("%d:%d:%d", i, j, seed))) 71 } 72 data, addr, err := buildTable(chunkData) 73 require.NoError(t, err) 74 fileID := addr.String() 75 fileToData[fileID] = data 76 fileIDToNumChunks[fileID] = i + 1 77 err = st.WriteTableFile(ctx, fileID, i+1, nil, func() (io.ReadCloser, uint64, error) { 78 return io.NopCloser(bytes.NewReader(data)), uint64(len(data)), nil 79 }) 80 require.NoError(t, err) 81 } 82 return fileIDToNumChunks, fileToData 83 } 84 85 func populateLocalStore(t *testing.T, st *NomsBlockStore, numTableFiles int) fileToData { 86 ctx := context.Background() 87 fileIDToNumChunks, fileToData := writeLocalTableFiles(t, st, numTableFiles, 0) 88 err := st.AddTableFilesToManifest(ctx, fileIDToNumChunks) 89 require.NoError(t, err) 90 return fileToData 91 } 92 93 func TestNBSAsTableFileStore(t *testing.T) { 94 ctx := context.Background() 95 96 numTableFiles := 128 97 assert.Greater(t, defaultMaxTables, numTableFiles) 98 st, _, q := makeTestLocalStore(t, defaultMaxTables) 99 defer func() { 100 require.NoError(t, st.Close()) 101 require.Equal(t, uint64(0), q.Usage()) 102 }() 103 fileToData := populateLocalStore(t, st, numTableFiles) 104 105 _, sources, _, err := st.Sources(ctx) 106 require.NoError(t, err) 107 assert.Equal(t, numTableFiles, len(sources)) 108 109 for _, src := range sources { 110 fileID := src.FileID() 111 expected, ok := fileToData[fileID] 112 require.True(t, ok) 113 114 rd, contentLength, err := src.Open(context.Background()) 115 require.NoError(t, err) 116 require.Equal(t, len(expected), int(contentLength)) 117 118 data, err := io.ReadAll(rd) 119 require.NoError(t, err) 120 121 err = rd.Close() 122 require.NoError(t, err) 123 124 assert.Equal(t, expected, data) 125 } 126 127 size, err := st.Size(ctx) 128 require.NoError(t, err) 129 require.Greater(t, size, uint64(0)) 130 } 131 132 func TestConcurrentPuts(t *testing.T) { 133 st, _, _ := makeTestLocalStore(t, 100) 134 defer st.Close() 135 136 errgrp, ctx := errgroup.WithContext(context.Background()) 137 138 n := 10 139 hashes := make([]hash.Hash, n) 140 for i := 0; i < n; i++ { 141 c := makeChunk(uint32(i)) 142 hashes[i] = c.Hash() 143 errgrp.Go(func() error { 144 err := st.Put(ctx, c, noopGetAddrs) 145 require.NoError(t, err) 146 return nil 147 }) 148 } 149 150 err := errgrp.Wait() 151 require.NoError(t, err) 152 require.Equal(t, uint64(n), st.putCount) 153 154 for i := 0; i < n; i++ { 155 h := hashes[i] 156 c, err := st.Get(ctx, h) 157 require.NoError(t, err) 158 require.False(t, c.IsEmpty()) 159 } 160 } 161 162 func makeChunk(i uint32) chunks.Chunk { 163 b := make([]byte, 4) 164 binary.BigEndian.PutUint32(b, i) 165 return chunks.NewChunk(b) 166 } 167 168 type tableFileSet map[string]chunks.TableFile 169 170 func (s tableFileSet) contains(fileName string) (ok bool) { 171 _, ok = s[fileName] 172 return ok 173 } 174 175 // findAbsent returns the table file names in |ftd| that don't exist in |s| 176 func (s tableFileSet) findAbsent(ftd fileToData) (absent []string) { 177 for fileID := range ftd { 178 if !s.contains(fileID) { 179 absent = append(absent, fileID) 180 } 181 } 182 return absent 183 } 184 185 func tableFileSetFromSources(sources []chunks.TableFile) (s tableFileSet) { 186 s = make(tableFileSet, len(sources)) 187 for _, src := range sources { 188 s[src.FileID()] = src 189 } 190 return s 191 } 192 193 func TestNBSPruneTableFiles(t *testing.T) { 194 ctx := context.Background() 195 196 // over populate table files 197 numTableFiles := 64 198 maxTableFiles := 16 199 st, nomsDir, _ := makeTestLocalStore(t, maxTableFiles) 200 defer st.Close() 201 fileToData := populateLocalStore(t, st, numTableFiles) 202 203 _, toDeleteToData := writeLocalTableFiles(t, st, numTableFiles, 32) 204 205 // add a chunk and flush to trigger a conjoin 206 c := chunks.NewChunk([]byte("it's a boy!")) 207 addrs := hash.NewHashSet() 208 ok, err := st.addChunk(ctx, c, func(c chunks.Chunk) chunks.GetAddrsCb { 209 return func(ctx context.Context, _ hash.HashSet, _ chunks.PendingRefExists) error { 210 addrs.Insert(c.Hash()) 211 return nil 212 } 213 }, st.hasMany) 214 require.NoError(t, err) 215 require.True(t, ok) 216 ok, err = st.Commit(ctx, st.upstream.root, st.upstream.root) 217 require.True(t, ok) 218 require.NoError(t, err) 219 220 _, sources, _, err := st.Sources(ctx) 221 require.NoError(t, err) 222 assert.Greater(t, numTableFiles, len(sources)) 223 224 // find which input table files were conjoined 225 tfSet := tableFileSetFromSources(sources) 226 absent := tfSet.findAbsent(fileToData) 227 // assert some input table files were conjoined 228 assert.NotEmpty(t, absent) 229 230 toDelete := tfSet.findAbsent(toDeleteToData) 231 assert.Len(t, toDelete, len(toDeleteToData)) 232 233 currTableFiles := func(dirName string) *set.StrSet { 234 infos, err := os.ReadDir(dirName) 235 require.NoError(t, err) 236 curr := set.NewStrSet(nil) 237 for _, fi := range infos { 238 if fi.Name() != manifestFileName && fi.Name() != lockFileName { 239 curr.Add(fi.Name()) 240 } 241 } 242 return curr 243 } 244 245 preGC := currTableFiles(nomsDir) 246 for _, tf := range sources { 247 assert.True(t, preGC.Contains(tf.FileID())) 248 } 249 for _, fileName := range toDelete { 250 assert.True(t, preGC.Contains(fileName)) 251 } 252 253 err = st.PruneTableFiles(ctx) 254 require.NoError(t, err) 255 256 postGC := currTableFiles(nomsDir) 257 for _, tf := range sources { 258 assert.True(t, postGC.Contains(tf.FileID())) 259 } 260 for _, fileName := range absent { 261 assert.False(t, postGC.Contains(fileName)) 262 } 263 for _, fileName := range toDelete { 264 assert.False(t, postGC.Contains(fileName)) 265 } 266 infos, err := os.ReadDir(nomsDir) 267 require.NoError(t, err) 268 269 // assert that we only have files for current sources, 270 // the manifest, and the lock file 271 assert.Equal(t, len(sources)+2, len(infos)) 272 273 size, err := st.Size(ctx) 274 require.NoError(t, err) 275 require.Greater(t, size, uint64(0)) 276 } 277 278 func makeChunkSet(N, size int) (s map[hash.Hash]chunks.Chunk) { 279 bb := make([]byte, size*N) 280 time.Sleep(10) 281 rand.Seed(time.Now().UnixNano()) 282 rand.Read(bb) 283 284 s = make(map[hash.Hash]chunks.Chunk, N) 285 offset := 0 286 for i := 0; i < N; i++ { 287 c := chunks.NewChunk(bb[offset : offset+size]) 288 s[c.Hash()] = c 289 offset += size 290 } 291 292 return 293 } 294 295 func TestNBSCopyGC(t *testing.T) { 296 ctx := context.Background() 297 st, _, _ := makeTestLocalStore(t, 8) 298 defer st.Close() 299 300 keepers := makeChunkSet(64, 64) 301 tossers := makeChunkSet(64, 64) 302 303 for _, c := range keepers { 304 err := st.Put(ctx, c, noopGetAddrs) 305 require.NoError(t, err) 306 } 307 for h, c := range keepers { 308 out, err := st.Get(ctx, h) 309 require.NoError(t, err) 310 assert.Equal(t, c, out) 311 } 312 313 for h := range tossers { 314 // assert mutually exclusive chunk sets 315 c, ok := keepers[h] 316 require.False(t, ok) 317 assert.Equal(t, chunks.Chunk{}, c) 318 } 319 for _, c := range tossers { 320 err := st.Put(ctx, c, noopGetAddrs) 321 require.NoError(t, err) 322 } 323 for h, c := range tossers { 324 out, err := st.Get(ctx, h) 325 require.NoError(t, err) 326 assert.Equal(t, c, out) 327 } 328 329 r, err := st.Root(ctx) 330 require.NoError(t, err) 331 332 ok, err := st.Commit(ctx, r, r) 333 require.NoError(t, err) 334 require.True(t, ok) 335 336 keepChan := make(chan []hash.Hash, 16) 337 var msErr error 338 wg := &sync.WaitGroup{} 339 wg.Add(1) 340 go func() { 341 require.NoError(t, st.BeginGC(nil)) 342 msErr = st.MarkAndSweepChunks(ctx, keepChan, nil) 343 st.EndGC() 344 wg.Done() 345 }() 346 for h := range keepers { 347 keepChan <- []hash.Hash{h} 348 } 349 close(keepChan) 350 wg.Wait() 351 require.NoError(t, msErr) 352 353 for h, c := range keepers { 354 out, err := st.Get(ctx, h) 355 require.NoError(t, err) 356 assert.Equal(t, c, out) 357 } 358 for h := range tossers { 359 out, err := st.Get(ctx, h) 360 require.NoError(t, err) 361 assert.Equal(t, chunks.EmptyChunk, out) 362 } 363 } 364 365 func persistTableFileSources(t *testing.T, p tablePersister, numTableFiles int) (map[hash.Hash]uint32, []hash.Hash) { 366 tableFileMap := make(map[hash.Hash]uint32, numTableFiles) 367 mapIds := make([]hash.Hash, numTableFiles) 368 369 for i := 0; i < numTableFiles; i++ { 370 var chunkData [][]byte 371 for j := 0; j < i+1; j++ { 372 chunkData = append(chunkData, []byte(fmt.Sprintf("%d:%d", i, j))) 373 } 374 _, addr, err := buildTable(chunkData) 375 require.NoError(t, err) 376 fileIDHash, ok := hash.MaybeParse(addr.String()) 377 require.True(t, ok) 378 tableFileMap[fileIDHash] = uint32(i + 1) 379 mapIds[i] = fileIDHash 380 cs, err := p.Persist(context.Background(), createMemTable(chunkData), nil, &Stats{}) 381 require.NoError(t, err) 382 require.NoError(t, cs.close()) 383 384 } 385 return tableFileMap, mapIds 386 } 387 388 func prepStore(ctx context.Context, t *testing.T, assert *assert.Assertions) (*fakeManifest, tablePersister, MemoryQuotaProvider, *NomsBlockStore, *Stats, chunks.Chunk) { 389 fm, p, q, store := makeStoreWithFakes(t) 390 h, err := store.Root(ctx) 391 require.NoError(t, err) 392 assert.Equal(hash.Hash{}, h) 393 394 rootChunk := chunks.NewChunk([]byte("root")) 395 rootHash := rootChunk.Hash() 396 err = store.Put(ctx, rootChunk, noopGetAddrs) 397 require.NoError(t, err) 398 success, err := store.Commit(ctx, rootHash, hash.Hash{}) 399 require.NoError(t, err) 400 if assert.True(success) { 401 has, err := store.Has(ctx, rootHash) 402 require.NoError(t, err) 403 assert.True(has) 404 h, err := store.Root(ctx) 405 require.NoError(t, err) 406 assert.Equal(rootHash, h) 407 } 408 409 stats := &Stats{} 410 411 _, upstream, err := fm.ParseIfExists(ctx, stats, nil) 412 require.NoError(t, err) 413 // expect single spec for initial commit 414 assert.Equal(1, upstream.NumTableSpecs()) 415 // Start with no appendixes 416 assert.Equal(0, upstream.NumAppendixSpecs()) 417 return fm, p, q, store, stats, rootChunk 418 } 419 420 func TestNBSUpdateManifestWithAppendixOptions(t *testing.T) { 421 assert := assert.New(t) 422 ctx := context.Background() 423 424 _, p, q, store, _, _ := prepStore(ctx, t, assert) 425 defer func() { 426 require.NoError(t, store.Close()) 427 require.EqualValues(t, 0, q.Usage()) 428 }() 429 430 // persist tablefiles to tablePersister 431 appendixUpdates, appendixIds := persistTableFileSources(t, p, 4) 432 433 tests := []struct { 434 description string 435 option ManifestAppendixOption 436 appendixSpecIds []hash.Hash 437 expectedNumberOfSpecs int 438 expectedNumberOfAppendixSpecs int 439 expectedError error 440 }{ 441 { 442 description: "should error on unsupported appendix option", 443 appendixSpecIds: appendixIds[:1], 444 expectedError: ErrUnsupportedManifestAppendixOption, 445 }, 446 { 447 description: "should append to appendix", 448 option: ManifestAppendixOption_Append, 449 appendixSpecIds: appendixIds[:2], 450 expectedNumberOfSpecs: 3, 451 expectedNumberOfAppendixSpecs: 2, 452 }, 453 { 454 description: "should replace appendix", 455 option: ManifestAppendixOption_Set, 456 appendixSpecIds: appendixIds[3:], 457 expectedNumberOfSpecs: 2, 458 expectedNumberOfAppendixSpecs: 1, 459 }, 460 { 461 description: "should set appendix to nil", 462 option: ManifestAppendixOption_Set, 463 appendixSpecIds: []hash.Hash{}, 464 expectedNumberOfSpecs: 1, 465 expectedNumberOfAppendixSpecs: 0, 466 }, 467 } 468 469 for _, test := range tests { 470 t.Run(test.description, func(t *testing.T) { 471 updates := make(map[hash.Hash]uint32) 472 for _, id := range test.appendixSpecIds { 473 updates[id] = appendixUpdates[id] 474 } 475 476 if test.expectedError == nil { 477 info, err := store.UpdateManifestWithAppendix(ctx, updates, test.option) 478 require.NoError(t, err) 479 assert.Equal(test.expectedNumberOfSpecs, info.NumTableSpecs()) 480 assert.Equal(test.expectedNumberOfAppendixSpecs, info.NumAppendixSpecs()) 481 } else { 482 _, err := store.UpdateManifestWithAppendix(ctx, updates, test.option) 483 assert.Equal(test.expectedError, err) 484 } 485 }) 486 } 487 } 488 489 func TestNBSUpdateManifestWithAppendix(t *testing.T) { 490 assert := assert.New(t) 491 ctx := context.Background() 492 493 fm, p, q, store, stats, _ := prepStore(ctx, t, assert) 494 defer func() { 495 require.NoError(t, store.Close()) 496 require.EqualValues(t, 0, q.Usage()) 497 }() 498 499 _, upstream, err := fm.ParseIfExists(ctx, stats, nil) 500 require.NoError(t, err) 501 502 // persist tablefile to tablePersister 503 appendixUpdates, appendixIds := persistTableFileSources(t, p, 1) 504 505 // Ensure appendix (and specs) are updated 506 appendixFileId := appendixIds[0] 507 updates := map[hash.Hash]uint32{appendixFileId: appendixUpdates[appendixFileId]} 508 newContents, err := store.UpdateManifestWithAppendix(ctx, updates, ManifestAppendixOption_Append) 509 require.NoError(t, err) 510 assert.Equal(upstream.NumTableSpecs()+1, newContents.NumTableSpecs()) 511 assert.Equal(1, newContents.NumAppendixSpecs()) 512 assert.Equal(newContents.GetTableSpecInfo(0), newContents.GetAppendixTableSpecInfo(0)) 513 } 514 515 func TestNBSUpdateManifestRetainsAppendix(t *testing.T) { 516 assert := assert.New(t) 517 ctx := context.Background() 518 519 fm, p, q, store, stats, _ := prepStore(ctx, t, assert) 520 defer func() { 521 require.NoError(t, store.Close()) 522 require.EqualValues(t, 0, q.Usage()) 523 }() 524 525 _, upstream, err := fm.ParseIfExists(ctx, stats, nil) 526 require.NoError(t, err) 527 528 // persist tablefile to tablePersister 529 specUpdates, specIds := persistTableFileSources(t, p, 3) 530 531 // Update the manifest 532 firstSpecId := specIds[0] 533 newContents, err := store.UpdateManifest(ctx, map[hash.Hash]uint32{firstSpecId: specUpdates[firstSpecId]}) 534 require.NoError(t, err) 535 assert.Equal(1+upstream.NumTableSpecs(), newContents.NumTableSpecs()) 536 assert.Equal(0, upstream.NumAppendixSpecs()) 537 538 _, upstream, err = fm.ParseIfExists(ctx, stats, nil) 539 require.NoError(t, err) 540 541 // Update the appendix 542 appendixSpecId := specIds[1] 543 updates := map[hash.Hash]uint32{appendixSpecId: specUpdates[appendixSpecId]} 544 newContents, err = store.UpdateManifestWithAppendix(ctx, updates, ManifestAppendixOption_Append) 545 require.NoError(t, err) 546 assert.Equal(1+upstream.NumTableSpecs(), newContents.NumTableSpecs()) 547 assert.Equal(1+upstream.NumAppendixSpecs(), newContents.NumAppendixSpecs()) 548 assert.Equal(newContents.GetAppendixTableSpecInfo(0), newContents.GetTableSpecInfo(0)) 549 550 _, upstream, err = fm.ParseIfExists(ctx, stats, nil) 551 require.NoError(t, err) 552 553 // Update the manifest again to show 554 // it successfully retains the appendix 555 // and the appendix specs are properly prepended 556 // to the |manifestContents.specs| 557 secondSpecId := specIds[2] 558 newContents, err = store.UpdateManifest(ctx, map[hash.Hash]uint32{secondSpecId: specUpdates[secondSpecId]}) 559 require.NoError(t, err) 560 assert.Equal(1+upstream.NumTableSpecs(), newContents.NumTableSpecs()) 561 assert.Equal(upstream.NumAppendixSpecs(), newContents.NumAppendixSpecs()) 562 assert.Equal(newContents.GetAppendixTableSpecInfo(0), newContents.GetTableSpecInfo(0)) 563 } 564 565 func TestNBSCommitRetainsAppendix(t *testing.T) { 566 assert := assert.New(t) 567 ctx := context.Background() 568 569 fm, p, q, store, stats, rootChunk := prepStore(ctx, t, assert) 570 defer func() { 571 require.NoError(t, store.Close()) 572 require.EqualValues(t, 0, q.Usage()) 573 }() 574 575 _, upstream, err := fm.ParseIfExists(ctx, stats, nil) 576 require.NoError(t, err) 577 578 // persist tablefile to tablePersister 579 appendixUpdates, appendixIds := persistTableFileSources(t, p, 1) 580 581 // Update the appendix 582 appendixFileId := appendixIds[0] 583 updates := map[hash.Hash]uint32{appendixFileId: appendixUpdates[appendixFileId]} 584 newContents, err := store.UpdateManifestWithAppendix(ctx, updates, ManifestAppendixOption_Append) 585 require.NoError(t, err) 586 assert.Equal(1+upstream.NumTableSpecs(), newContents.NumTableSpecs()) 587 assert.Equal(1, newContents.NumAppendixSpecs()) 588 589 _, upstream, err = fm.ParseIfExists(ctx, stats, nil) 590 require.NoError(t, err) 591 592 // Make second Commit 593 secondRootChunk := chunks.NewChunk([]byte("newer root")) 594 secondRoot := secondRootChunk.Hash() 595 err = store.Put(ctx, secondRootChunk, noopGetAddrs) 596 require.NoError(t, err) 597 success, err := store.Commit(ctx, secondRoot, rootChunk.Hash()) 598 require.NoError(t, err) 599 if assert.True(success) { 600 h, err := store.Root(ctx) 601 require.NoError(t, err) 602 assert.Equal(secondRoot, h) 603 has, err := store.Has(context.Background(), rootChunk.Hash()) 604 require.NoError(t, err) 605 assert.True(has) 606 has, err = store.Has(context.Background(), secondRoot) 607 require.NoError(t, err) 608 assert.True(has) 609 } 610 611 // Ensure commit did not blow away appendix 612 _, newUpstream, err := fm.ParseIfExists(ctx, stats, nil) 613 require.NoError(t, err) 614 assert.Equal(1+upstream.NumTableSpecs(), newUpstream.NumTableSpecs()) 615 assert.Equal(upstream.NumAppendixSpecs(), newUpstream.NumAppendixSpecs()) 616 assert.Equal(upstream.GetAppendixTableSpecInfo(0), newUpstream.GetTableSpecInfo(0)) 617 assert.Equal(newUpstream.GetTableSpecInfo(0), newUpstream.GetAppendixTableSpecInfo(0)) 618 } 619 620 func TestNBSOverwriteManifest(t *testing.T) { 621 assert := assert.New(t) 622 ctx := context.Background() 623 624 fm, p, q, store, stats, _ := prepStore(ctx, t, assert) 625 defer func() { 626 require.NoError(t, store.Close()) 627 require.EqualValues(t, 0, q.Usage()) 628 }() 629 630 // Generate a random root hash 631 newRoot := hash.New(test.RandomData(20)) 632 // Create new table files and appendices 633 newTableFiles, _ := persistTableFileSources(t, p, rand.Intn(4)+1) 634 newAppendices, _ := persistTableFileSources(t, p, rand.Intn(4)+1) 635 636 err := OverwriteStoreManifest(ctx, store, newRoot, newTableFiles, newAppendices) 637 require.NoError(t, err) 638 639 // Verify that the persisted contents are correct 640 _, newContents, err := fm.ParseIfExists(ctx, stats, nil) 641 require.NoError(t, err) 642 assert.Equal(len(newTableFiles)+len(newAppendices), newContents.NumTableSpecs()) 643 assert.Equal(len(newAppendices), newContents.NumAppendixSpecs()) 644 assert.Equal(newRoot, newContents.GetRoot()) 645 } 646 647 func TestGuessPrefixOrdinal(t *testing.T) { 648 prefixes := make([]uint64, 256) 649 for i := range prefixes { 650 prefixes[i] = uint64(i << 56) 651 } 652 653 for i, pre := range prefixes { 654 guess := GuessPrefixOrdinal(pre, 256) 655 assert.Equal(t, i, guess) 656 } 657 }