github.com/ethersphere/bee/v2@v2.2.0/pkg/storer/internal/reserve/reserve_test.go (about) 1 // Copyright 2023 The Swarm Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package reserve_test 6 7 import ( 8 "bytes" 9 "context" 10 "errors" 11 "math" 12 "math/rand" 13 "testing" 14 "time" 15 16 "github.com/ethersphere/bee/v2/pkg/crypto" 17 "github.com/ethersphere/bee/v2/pkg/log" 18 "github.com/ethersphere/bee/v2/pkg/postage" 19 postagetesting "github.com/ethersphere/bee/v2/pkg/postage/testing" 20 soctesting "github.com/ethersphere/bee/v2/pkg/soc/testing" 21 "github.com/ethersphere/bee/v2/pkg/storage" 22 chunk "github.com/ethersphere/bee/v2/pkg/storage/testing" 23 "github.com/ethersphere/bee/v2/pkg/storer/internal" 24 "github.com/ethersphere/bee/v2/pkg/storer/internal/chunkstamp" 25 "github.com/ethersphere/bee/v2/pkg/storer/internal/reserve" 26 "github.com/ethersphere/bee/v2/pkg/storer/internal/stampindex" 27 "github.com/ethersphere/bee/v2/pkg/storer/internal/transaction" 28 "github.com/ethersphere/bee/v2/pkg/swarm" 29 kademlia "github.com/ethersphere/bee/v2/pkg/topology/mock" 30 "github.com/stretchr/testify/assert" 31 ) 32 33 func TestReserve(t *testing.T) { 34 t.Parallel() 35 36 baseAddr := swarm.RandAddress(t) 37 38 ts := internal.NewInmemStorage() 39 40 r, err := reserve.New( 41 baseAddr, 42 ts, 43 0, kademlia.NewTopologyDriver(), 44 log.Noop, 45 ) 46 if err != nil { 47 t.Fatal(err) 48 } 49 50 for b := 0; b < 2; b++ { 51 for i := 1; i < 51; i++ { 52 ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b) 53 err := r.Put(context.Background(), ch) 54 if err != nil { 55 t.Fatal(err) 56 } 57 stampHash, err := ch.Stamp().Hash() 58 if err != nil { 59 t.Fatal(err) 60 } 61 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: uint8(b), BatchID: ch.Stamp().BatchID(), Address: ch.Address(), StampHash: stampHash}, false) 62 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: uint8(b), BinID: uint64(i), StampHash: stampHash}, false) 63 checkChunk(t, ts, ch, false) 64 h, err := r.Has(ch.Address(), ch.Stamp().BatchID(), stampHash) 65 if err != nil { 66 t.Fatal(err) 67 } 68 if !h { 69 t.Fatalf("expected chunk addr %s binID %d", ch.Address(), i) 70 } 71 72 chGet, err := r.Get(context.Background(), ch.Address(), ch.Stamp().BatchID(), stampHash) 73 if err != nil { 74 t.Fatal(err) 75 } 76 if !chGet.Equal(ch) { 77 t.Fatalf("expected addr %s, got %s", ch.Address(), chGet.Address()) 78 } 79 } 80 } 81 } 82 83 func TestReserveChunkType(t *testing.T) { 84 t.Parallel() 85 86 ctx := context.Background() 87 baseAddr := swarm.RandAddress(t) 88 89 ts := internal.NewInmemStorage() 90 91 r, err := reserve.New( 92 baseAddr, 93 ts, 94 0, kademlia.NewTopologyDriver(), 95 log.Noop, 96 ) 97 if err != nil { 98 t.Fatal(err) 99 } 100 101 storedChunksCA := 0 102 storedChunksSO := 0 103 for i := 0; i < 100; i++ { 104 ch := chunk.GenerateTestRandomChunk() 105 if rand.Intn(2) == 0 { 106 storedChunksCA++ 107 } else { 108 ch = chunk.GenerateTestRandomSoChunk(t, ch) 109 storedChunksSO++ 110 } 111 if err := r.Put(ctx, ch); err != nil { 112 t.Errorf("unexpected error: %v", err) 113 } 114 } 115 116 err = ts.IndexStore().Iterate(storage.Query{ 117 Factory: func() storage.Item { return &reserve.ChunkBinItem{} }, 118 }, func(res storage.Result) (bool, error) { 119 item := res.Entry.(*reserve.ChunkBinItem) 120 if item.ChunkType == swarm.ChunkTypeContentAddressed { 121 storedChunksCA-- 122 } else if item.ChunkType == swarm.ChunkTypeSingleOwner { 123 storedChunksSO-- 124 } else { 125 t.Fatalf("unexpected chunk type: %d", item.ChunkType) 126 } 127 return false, nil 128 }) 129 if err != nil { 130 t.Errorf("unexpected error: %v", err) 131 } 132 133 if storedChunksCA != 0 { 134 t.Fatal("unexpected number of content addressed chunks") 135 } 136 if storedChunksSO != 0 { 137 t.Fatal("unexpected number of single owner chunks") 138 } 139 } 140 141 func TestSameChunkAddress(t *testing.T) { 142 t.Parallel() 143 144 ctx := context.Background() 145 baseAddr := swarm.RandAddress(t) 146 147 ts := internal.NewInmemStorage() 148 149 r, err := reserve.New( 150 baseAddr, 151 ts, 152 0, kademlia.NewTopologyDriver(), 153 log.Noop, 154 ) 155 if err != nil { 156 t.Fatal(err) 157 } 158 159 binBinIDs := make(map[uint8]uint64) 160 161 t.Run("same stamp index and older timestamp", func(t *testing.T) { 162 size1 := r.Size() 163 signer := getSigner(t) 164 batch := postagetesting.MustNewBatch() 165 s1 := soctesting.GenerateMockSocWithSigner(t, []byte("data"), signer) 166 ch1 := s1.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 0, 1)) 167 s2 := soctesting.GenerateMockSocWithSigner(t, []byte("update"), signer) 168 ch2 := s2.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 0, 0)) 169 err = r.Put(ctx, ch1) 170 if err != nil { 171 t.Fatal(err) 172 } 173 bin := swarm.Proximity(baseAddr.Bytes(), ch1.Address().Bytes()) 174 binBinIDs[bin] += 1 175 err = r.Put(ctx, ch2) 176 if !errors.Is(err, storage.ErrOverwriteNewerChunk) { 177 t.Fatal("expected error") 178 } 179 size2 := r.Size() 180 if size2-size1 != 1 { 181 t.Fatalf("expected reserve size to increase by 1, got %d", size2-size1) 182 } 183 }) 184 185 t.Run("different stamp index and older timestamp", func(t *testing.T) { 186 size1 := r.Size() 187 signer := getSigner(t) 188 batch := postagetesting.MustNewBatch() 189 s1 := soctesting.GenerateMockSocWithSigner(t, []byte("data"), signer) 190 ch1 := s1.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 0, 2)) 191 s2 := soctesting.GenerateMockSocWithSigner(t, []byte("update"), signer) 192 ch2 := s2.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 1, 0)) 193 err = r.Put(ctx, ch1) 194 if err != nil { 195 t.Fatal(err) 196 } 197 bin := swarm.Proximity(baseAddr.Bytes(), ch1.Address().Bytes()) 198 binBinIDs[bin] += 1 199 err = r.Put(ctx, ch2) 200 if !errors.Is(err, storage.ErrOverwriteNewerChunk) { 201 t.Fatal("expected error") 202 } 203 size2 := r.Size() 204 if size2-size1 != 1 { 205 t.Fatalf("expected reserve size to increase by 1, got %d", size2-size1) 206 } 207 }) 208 209 replace := func(t *testing.T, ch1 swarm.Chunk, ch2 swarm.Chunk, ch1BinID, ch2BinID uint64) { 210 t.Helper() 211 212 err := r.Put(ctx, ch1) 213 if err != nil { 214 t.Fatal(err) 215 } 216 217 err = r.Put(ctx, ch2) 218 if err != nil { 219 t.Fatal(err) 220 } 221 222 ch1StampHash, err := ch1.Stamp().Hash() 223 if err != nil { 224 t.Fatal(err) 225 } 226 227 ch2StampHash, err := ch2.Stamp().Hash() 228 if err != nil { 229 t.Fatal(err) 230 } 231 232 bin := swarm.Proximity(baseAddr.Bytes(), ch1.Address().Bytes()) 233 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: bin, BatchID: ch1.Stamp().BatchID(), Address: ch1.Address(), StampHash: ch1StampHash}, true) 234 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: bin, BatchID: ch2.Stamp().BatchID(), Address: ch2.Address(), StampHash: ch2StampHash}, false) 235 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin, BinID: ch1BinID, StampHash: ch1StampHash}, true) 236 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin, BinID: ch2BinID, StampHash: ch2StampHash}, false) 237 ch, err := ts.ChunkStore().Get(ctx, ch2.Address()) 238 if err != nil { 239 t.Fatal(err) 240 } 241 if !bytes.Equal(ch.Data(), ch2.Data()) { 242 t.Fatalf("expected chunk data to be updated") 243 } 244 } 245 246 t.Run("same stamp index and newer timestamp", func(t *testing.T) { 247 size1 := r.Size() 248 signer := getSigner(t) 249 batch := postagetesting.MustNewBatch() 250 s1 := soctesting.GenerateMockSocWithSigner(t, []byte("data"), signer) 251 ch1 := s1.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 0, 3)) 252 s2 := soctesting.GenerateMockSocWithSigner(t, []byte("update"), signer) 253 ch2 := s2.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 0, 4)) 254 bin := swarm.Proximity(baseAddr.Bytes(), ch1.Address().Bytes()) 255 binBinIDs[bin] += 2 256 replace(t, ch1, ch2, binBinIDs[bin]-1, binBinIDs[bin]) 257 size2 := r.Size() 258 if size2-size1 != 1 { 259 t.Fatalf("expected reserve size to increase by 1, got %d", size2-size1) 260 } 261 }) 262 263 t.Run("different stamp index and newer timestamp", func(t *testing.T) { 264 size1 := r.Size() 265 signer := getSigner(t) 266 batch := postagetesting.MustNewBatch() 267 s1 := soctesting.GenerateMockSocWithSigner(t, []byte("data"), signer) 268 ch1 := s1.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 0, 5)) 269 s2 := soctesting.GenerateMockSocWithSigner(t, []byte("update"), signer) 270 ch2 := s2.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 1, 6)) 271 bin := swarm.Proximity(baseAddr.Bytes(), ch1.Address().Bytes()) 272 binBinIDs[bin] += 2 273 replace(t, ch1, ch2, binBinIDs[bin]-1, binBinIDs[bin]) 274 size2 := r.Size() 275 if size2-size1 != 1 { 276 t.Fatalf("expected reserve size to increase by 1, got %d", size2-size1) 277 } 278 }) 279 280 t.Run("not a soc and newer timestamp", func(t *testing.T) { 281 size1 := r.Size() 282 batch := postagetesting.MustNewBatch() 283 ch1 := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(postagetesting.MustNewFields(batch.ID, 0, 7)) 284 ch2 := swarm.NewChunk(ch1.Address(), []byte("update")).WithStamp(postagetesting.MustNewFields(batch.ID, 0, 8)) 285 err := r.Put(ctx, ch1) 286 if err != nil { 287 t.Fatal(err) 288 } 289 290 bin1 := swarm.Proximity(baseAddr.Bytes(), ch1.Address().Bytes()) 291 binBinIDs[bin1] += 1 292 293 err = r.Put(ctx, ch2) 294 if err != nil { 295 t.Fatal(err) 296 } 297 298 bin2 := swarm.Proximity(baseAddr.Bytes(), ch2.Address().Bytes()) 299 binBinIDs[bin2] += 1 300 301 ch, err := ts.ChunkStore().Get(ctx, ch2.Address()) 302 if err != nil { 303 t.Fatal(err) 304 } 305 306 if !bytes.Equal(ch.Data(), ch1.Data()) { 307 t.Fatalf("expected chunk data to not be updated") 308 } 309 310 size2 := r.Size() 311 if size2-size1 != 1 { 312 t.Fatalf("expected reserve size to increase by 2, got %d", size2-size1) 313 } 314 }) 315 316 t.Run("chunk with different batchID remains untouched", func(t *testing.T) { 317 noReplace := func(ch1, ch2 swarm.Chunk) { 318 t.Helper() 319 err = r.Put(ctx, ch1) 320 if err != nil { 321 t.Fatal(err) 322 } 323 324 err = r.Put(ctx, ch2) 325 if err != nil { 326 t.Fatal(err) 327 } 328 329 ch1StampHash, err := ch1.Stamp().Hash() 330 if err != nil { 331 t.Fatal(err) 332 } 333 ch2StampHash, err := ch2.Stamp().Hash() 334 if err != nil { 335 t.Fatal(err) 336 } 337 338 bin := swarm.Proximity(baseAddr.Bytes(), ch2.Address().Bytes()) 339 binBinIDs[bin] += 2 340 341 // expect both entries in reserve 342 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: bin, BatchID: ch1.Stamp().BatchID(), Address: ch1.Address(), StampHash: ch1StampHash}, false) 343 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: bin, BatchID: ch2.Stamp().BatchID(), Address: ch2.Address(), StampHash: ch2StampHash}, false) 344 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin, BinID: binBinIDs[bin] - 1}, false) 345 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin, BinID: binBinIDs[bin]}, false) 346 347 // expect new chunk to NOT replace old one 348 ch, err := ts.ChunkStore().Get(ctx, ch2.Address()) 349 if err != nil { 350 t.Fatal(err) 351 } 352 if !bytes.Equal(ch.Data(), ch1.Data()) { 353 t.Fatalf("expected chunk data to not be updated") 354 } 355 } 356 357 size1 := r.Size() 358 359 // soc 360 signer := getSigner(t) 361 batch := postagetesting.MustNewBatch() 362 s1 := soctesting.GenerateMockSocWithSigner(t, []byte("data"), signer) 363 ch1 := s1.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 0, 3)) 364 batch = postagetesting.MustNewBatch() 365 s2 := soctesting.GenerateMockSocWithSigner(t, []byte("update"), signer) 366 ch2 := s2.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 0, 4)) 367 368 if !bytes.Equal(ch1.Address().Bytes(), ch2.Address().Bytes()) { 369 t.Fatalf("expected chunk addresses to be the same") 370 } 371 noReplace(ch1, ch2) 372 373 // cac 374 batch = postagetesting.MustNewBatch() 375 ch1 = chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(postagetesting.MustNewFields(batch.ID, 0, 5)) 376 batch = postagetesting.MustNewBatch() 377 ch2 = swarm.NewChunk(ch1.Address(), []byte("update")).WithStamp(postagetesting.MustNewFields(batch.ID, 0, 6)) 378 if !bytes.Equal(ch1.Address().Bytes(), ch2.Address().Bytes()) { 379 t.Fatalf("expected chunk addresses to be the same") 380 } 381 noReplace(ch1, ch2) 382 size2 := r.Size() 383 if size2-size1 != 4 { 384 t.Fatalf("expected reserve size to increase by 4, got %d", size2-size1) 385 } 386 }) 387 388 t.Run("same address but index collision with different chunk", func(t *testing.T) { 389 size1 := r.Size() 390 batch := postagetesting.MustNewBatch() 391 ch1 := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(postagetesting.MustNewFields(batch.ID, 0, 0)) 392 err = r.Put(ctx, ch1) 393 if err != nil { 394 t.Fatal(err) 395 } 396 bin1 := swarm.Proximity(baseAddr.Bytes(), ch1.Address().Bytes()) 397 binBinIDs[bin1] += 1 398 ch1BinID := binBinIDs[bin1] 399 ch1StampHash, err := ch1.Stamp().Hash() 400 if err != nil { 401 t.Fatal(err) 402 } 403 404 signer := getSigner(t) 405 s1 := soctesting.GenerateMockSocWithSigner(t, []byte("data"), signer) 406 ch2 := s1.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 1, 1)) 407 err = r.Put(ctx, ch2) 408 if err != nil { 409 t.Fatal(err) 410 } 411 bin2 := swarm.Proximity(baseAddr.Bytes(), ch2.Address().Bytes()) 412 binBinIDs[bin2] += 1 413 ch2BinID := binBinIDs[bin2] 414 ch2StampHash, err := ch2.Stamp().Hash() 415 if err != nil { 416 t.Fatal(err) 417 } 418 419 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: bin1, BatchID: ch1.Stamp().BatchID(), Address: ch1.Address(), StampHash: ch1StampHash}, false) 420 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: bin2, BatchID: ch2.Stamp().BatchID(), Address: ch2.Address(), StampHash: ch2StampHash}, false) 421 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin1, BinID: binBinIDs[bin1], StampHash: ch1StampHash}, false) 422 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin2, BinID: binBinIDs[bin2], StampHash: ch2StampHash}, false) 423 424 s2 := soctesting.GenerateMockSocWithSigner(t, []byte("update"), signer) 425 ch3 := s2.Chunk().WithStamp(postagetesting.MustNewFields(batch.ID, 0, 2)) 426 err = r.Put(ctx, ch3) 427 if err != nil { 428 t.Fatal(err) 429 } 430 binBinIDs[bin2] += 1 431 ch3StampHash, err := ch3.Stamp().Hash() 432 if err != nil { 433 t.Fatal(err) 434 } 435 ch3BinID := binBinIDs[bin2] 436 437 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: bin1, BatchID: ch1.Stamp().BatchID(), Address: ch1.Address(), StampHash: ch1StampHash}, true) 438 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: bin2, BatchID: ch2.Stamp().BatchID(), Address: ch2.Address(), StampHash: ch2StampHash}, true) 439 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: bin2, BatchID: ch3.Stamp().BatchID(), Address: ch3.Address(), StampHash: ch3StampHash}, false) 440 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin1, BinID: ch1BinID, StampHash: ch1StampHash}, true) 441 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin2, BinID: ch2BinID, StampHash: ch2StampHash}, true) 442 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: bin2, BinID: ch3BinID, StampHash: ch3StampHash}, false) 443 444 size2 := r.Size() 445 446 // (ch1 + ch2) == 2 and then ch3 reduces reserve size by 1 447 if size2-size1 != 1 { 448 t.Fatalf("expected reserve size to increase by 1, got %d", size2-size1) 449 } 450 }) 451 452 } 453 454 func TestReplaceOldIndex(t *testing.T) { 455 t.Parallel() 456 457 baseAddr := swarm.RandAddress(t) 458 459 ts := internal.NewInmemStorage() 460 461 r, err := reserve.New( 462 baseAddr, 463 ts, 464 0, kademlia.NewTopologyDriver(), 465 log.Noop, 466 ) 467 if err != nil { 468 t.Fatal(err) 469 } 470 471 batch := postagetesting.MustNewBatch() 472 ch1 := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(postagetesting.MustNewFields(batch.ID, 0, 0)) 473 ch2 := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(postagetesting.MustNewFields(batch.ID, 0, 1)) 474 475 err = r.Put(context.Background(), ch1) 476 if err != nil { 477 t.Fatal(err) 478 } 479 480 err = r.Put(context.Background(), ch2) 481 if err != nil { 482 t.Fatal(err) 483 } 484 485 // Chunk 1 must be gone 486 ch1StampHash, err := ch1.Stamp().Hash() 487 if err != nil { 488 t.Fatal(err) 489 } 490 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch1.Stamp().BatchID(), Address: ch1.Address(), StampHash: ch1StampHash}, true) 491 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: 1, StampHash: ch1StampHash}, true) 492 checkChunk(t, ts, ch1, true) 493 494 // Chunk 2 must be stored 495 ch2StampHash, err := ch2.Stamp().Hash() 496 if err != nil { 497 t.Fatal(err) 498 } 499 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch2.Stamp().BatchID(), Address: ch2.Address(), StampHash: ch2StampHash}, false) 500 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: 2, StampHash: ch2StampHash}, false) 501 checkChunk(t, ts, ch2, false) 502 503 item, err := stampindex.Load(ts.IndexStore(), "reserve", ch2.Stamp()) 504 if err != nil { 505 t.Fatal(err) 506 } 507 if !item.ChunkAddress.Equal(ch2.Address()) { 508 t.Fatalf("wanted ch2 address") 509 } 510 } 511 512 func TestEvict(t *testing.T) { 513 t.Parallel() 514 515 baseAddr := swarm.RandAddress(t) 516 517 ts := internal.NewInmemStorage() 518 519 chunksPerBatch := 50 520 var chunks []swarm.Chunk 521 batches := []*postage.Batch{postagetesting.MustNewBatch(), postagetesting.MustNewBatch(), postagetesting.MustNewBatch()} 522 evictBatch := batches[1] 523 524 r, err := reserve.New( 525 baseAddr, 526 ts, 527 0, kademlia.NewTopologyDriver(), 528 log.Noop, 529 ) 530 if err != nil { 531 t.Fatal(err) 532 } 533 534 for i := 0; i < chunksPerBatch; i++ { 535 for b := 0; b < 3; b++ { 536 ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b).WithStamp(postagetesting.MustNewBatchStamp(batches[b].ID)) 537 chunks = append(chunks, ch) 538 err := r.Put(context.Background(), ch) 539 if err != nil { 540 t.Fatal(err) 541 } 542 } 543 } 544 545 totalEvicted := 0 546 for i := 0; i < 3; i++ { 547 evicted, err := r.EvictBatchBin(context.Background(), evictBatch.ID, math.MaxInt, uint8(i)) 548 if err != nil { 549 t.Fatal(err) 550 } 551 totalEvicted += evicted 552 } 553 554 if totalEvicted != chunksPerBatch { 555 t.Fatalf("got %d, want %d", totalEvicted, chunksPerBatch) 556 } 557 558 time.Sleep(time.Second) 559 560 for i, ch := range chunks { 561 binID := i%chunksPerBatch + 1 562 b := swarm.Proximity(baseAddr.Bytes(), ch.Address().Bytes()) 563 stampHash, err := ch.Stamp().Hash() 564 if err != nil { 565 t.Fatal(err) 566 } 567 _, err = r.Get(context.Background(), ch.Address(), ch.Stamp().BatchID(), stampHash) 568 if bytes.Equal(ch.Stamp().BatchID(), evictBatch.ID) { 569 if !errors.Is(err, storage.ErrNotFound) { 570 t.Fatalf("got err %v, want %v", err, storage.ErrNotFound) 571 } 572 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: b, BatchID: ch.Stamp().BatchID(), Address: ch.Address(), StampHash: stampHash}, true) 573 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: b, BinID: uint64(binID), StampHash: stampHash}, true) 574 checkChunk(t, ts, ch, true) 575 } else { 576 if err != nil { 577 t.Fatal(err) 578 } 579 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: b, BatchID: ch.Stamp().BatchID(), Address: ch.Address(), StampHash: stampHash}, false) 580 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: b, BinID: uint64(binID), StampHash: stampHash}, false) 581 checkChunk(t, ts, ch, false) 582 } 583 } 584 } 585 586 func TestEvictMaxCount(t *testing.T) { 587 t.Parallel() 588 589 baseAddr := swarm.RandAddress(t) 590 591 ts := internal.NewInmemStorage() 592 593 r, err := reserve.New( 594 baseAddr, 595 ts, 596 0, kademlia.NewTopologyDriver(), 597 log.Noop, 598 ) 599 if err != nil { 600 t.Fatal(err) 601 } 602 603 var chunks []swarm.Chunk 604 605 batch := postagetesting.MustNewBatch() 606 607 for b := 0; b < 2; b++ { 608 for i := 0; i < 10; i++ { 609 ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b).WithStamp(postagetesting.MustNewBatchStamp(batch.ID)) 610 chunks = append(chunks, ch) 611 err := r.Put(context.Background(), ch) 612 if err != nil { 613 t.Fatal(err) 614 } 615 } 616 } 617 618 evicted, err := r.EvictBatchBin(context.Background(), batch.ID, 10, 1) 619 if err != nil { 620 t.Fatal(err) 621 } 622 if evicted != 10 { 623 t.Fatalf("wanted evicted count 10, got %d", evicted) 624 } 625 626 for i, ch := range chunks { 627 stampHash, err := ch.Stamp().Hash() 628 if err != nil { 629 t.Fatal(err) 630 } 631 if i < 10 { 632 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: 0, BatchID: ch.Stamp().BatchID(), Address: ch.Address(), StampHash: stampHash}, true) 633 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: 0, BinID: uint64(i + 1), StampHash: stampHash}, true) 634 checkChunk(t, ts, ch, true) 635 } else { 636 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: 1, BatchID: ch.Stamp().BatchID(), Address: ch.Address(), StampHash: stampHash}, false) 637 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: 1, BinID: uint64(i - 10 + 1), StampHash: stampHash}, false) 638 checkChunk(t, ts, ch, false) 639 } 640 } 641 } 642 643 func TestIterate(t *testing.T) { 644 t.Parallel() 645 646 createReserve := func(t *testing.T) *reserve.Reserve { 647 t.Helper() 648 649 baseAddr := swarm.RandAddress(t) 650 651 ts := internal.NewInmemStorage() 652 653 r, err := reserve.New( 654 baseAddr, 655 ts, 656 0, kademlia.NewTopologyDriver(), 657 log.Noop, 658 ) 659 if err != nil { 660 t.Fatal(err) 661 } 662 663 for b := 0; b < 3; b++ { 664 for i := 0; i < 10; i++ { 665 ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b) 666 err := r.Put(context.Background(), ch) 667 if err != nil { 668 t.Fatal(err) 669 } 670 } 671 } 672 673 return r 674 } 675 676 t.Run("iterate bin", func(t *testing.T) { 677 t.Parallel() 678 679 r := createReserve(t) 680 681 var id uint64 = 1 682 err := r.IterateBin(1, 0, func(ch swarm.Address, binID uint64, _, _ []byte) (bool, error) { 683 if binID != id { 684 t.Fatalf("got %d, want %d", binID, id) 685 } 686 id++ 687 return false, nil 688 }) 689 if err != nil { 690 t.Fatal(err) 691 } 692 if id != 11 { 693 t.Fatalf("got %d, want %d", id, 11) 694 } 695 }) 696 697 t.Run("iterate chunks", func(t *testing.T) { 698 t.Parallel() 699 700 r := createReserve(t) 701 702 count := 0 703 err := r.IterateChunks(2, func(_ swarm.Chunk) (bool, error) { 704 count++ 705 return false, nil 706 }) 707 if err != nil { 708 t.Fatal(err) 709 } 710 if count != 10 { 711 t.Fatalf("got %d, want %d", count, 10) 712 } 713 }) 714 715 t.Run("iterate chunk items", func(t *testing.T) { 716 t.Parallel() 717 718 r := createReserve(t) 719 720 count := 0 721 err := r.IterateChunksItems(0, func(_ *reserve.ChunkBinItem) (bool, error) { 722 count++ 723 return false, nil 724 }) 725 if err != nil { 726 t.Fatal(err) 727 } 728 if count != 30 { 729 t.Fatalf("got %d, want %d", count, 30) 730 } 731 }) 732 733 t.Run("last bin id", func(t *testing.T) { 734 t.Parallel() 735 736 r := createReserve(t) 737 738 ids, _, err := r.LastBinIDs() 739 if err != nil { 740 t.Fatal(err) 741 } 742 for i, id := range ids { 743 if i < 3 { 744 if id != 10 { 745 t.Fatalf("got %d, want %d", id, 10) 746 } 747 } else { 748 if id != 0 { 749 t.Fatalf("got %d, want %d", id, 0) 750 } 751 } 752 } 753 }) 754 } 755 756 func TestReset(t *testing.T) { 757 t.Parallel() 758 759 baseAddr := swarm.RandAddress(t) 760 761 ts := internal.NewInmemStorage() 762 763 r, err := reserve.New( 764 baseAddr, 765 ts, 766 0, kademlia.NewTopologyDriver(), 767 log.Noop, 768 ) 769 if err != nil { 770 t.Fatal(err) 771 } 772 773 var chs []swarm.Chunk 774 775 var ( 776 bins = 5 777 chunksPerBin = 100 778 total = bins * chunksPerBin 779 ) 780 781 for b := 0; b < bins; b++ { 782 for i := 1; i <= chunksPerBin; i++ { 783 ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b) 784 err := r.Put(context.Background(), ch) 785 if err != nil { 786 t.Fatal(err) 787 } 788 stampHash, err := ch.Stamp().Hash() 789 if err != nil { 790 t.Fatal(err) 791 } 792 checkStore(t, ts.IndexStore(), &reserve.BatchRadiusItem{Bin: uint8(b), BatchID: ch.Stamp().BatchID(), Address: ch.Address(), StampHash: stampHash}, false) 793 checkStore(t, ts.IndexStore(), &reserve.ChunkBinItem{Bin: uint8(b), BinID: uint64(i), StampHash: stampHash}, false) 794 checkChunk(t, ts, ch, false) 795 _, err = r.Get(context.Background(), ch.Address(), ch.Stamp().BatchID(), stampHash) 796 if err != nil { 797 t.Fatal(err) 798 } 799 chs = append(chs, ch) 800 } 801 } 802 803 c, err := ts.IndexStore().Count(&reserve.BatchRadiusItem{}) 804 if err != nil { 805 t.Fatal(err) 806 } 807 assert.Equal(t, c, total) 808 c, err = ts.IndexStore().Count(&reserve.ChunkBinItem{}) 809 if err != nil { 810 t.Fatal(err) 811 } 812 assert.Equal(t, c, total) 813 c, err = ts.IndexStore().Count(&stampindex.Item{}) 814 if err != nil { 815 t.Fatal(err) 816 } 817 assert.Equal(t, c, total) 818 819 cItem := &chunkstamp.Item{} 820 cItem.SetScope([]byte("reserve")) 821 c, err = ts.IndexStore().Count(cItem) 822 if err != nil { 823 t.Fatal(err) 824 } 825 assert.Equal(t, c, total) 826 827 checkStore(t, ts.IndexStore(), &reserve.EpochItem{}, false) 828 829 err = r.Reset(context.Background()) 830 if err != nil { 831 t.Fatal(err) 832 } 833 834 c, err = ts.IndexStore().Count(&reserve.BatchRadiusItem{}) 835 if err != nil { 836 t.Fatal(err) 837 } 838 assert.Equal(t, c, 0) 839 c, err = ts.IndexStore().Count(&reserve.ChunkBinItem{}) 840 if err != nil { 841 t.Fatal(err) 842 } 843 assert.Equal(t, c, 0) 844 c, err = ts.IndexStore().Count(&stampindex.Item{}) 845 if err != nil { 846 t.Fatal(err) 847 } 848 assert.Equal(t, c, 0) 849 850 c, err = ts.IndexStore().Count(cItem) 851 if err != nil { 852 t.Fatal(err) 853 } 854 assert.Equal(t, c, 0) 855 856 checkStore(t, ts.IndexStore(), &reserve.EpochItem{}, true) 857 858 for _, c := range chs { 859 h, err := c.Stamp().Hash() 860 if err != nil { 861 t.Fatal(err) 862 } 863 _, err = r.Get(context.Background(), c.Address(), c.Stamp().BatchID(), h) 864 if !errors.Is(err, storage.ErrNotFound) { 865 t.Fatalf("expected error %v, got %v", storage.ErrNotFound, err) 866 } 867 } 868 } 869 870 func checkStore(t *testing.T, s storage.Reader, k storage.Key, gone bool) { 871 t.Helper() 872 h, err := s.Has(k) 873 if err != nil { 874 t.Fatal(err) 875 } 876 if gone && h { 877 t.Fatalf("unexpected entry in %s-%s ", k.Namespace(), k.ID()) 878 } 879 if !gone && !h { 880 t.Fatalf("expected entry in %s-%s ", k.Namespace(), k.ID()) 881 } 882 } 883 884 func checkChunk(t *testing.T, s transaction.ReadOnlyStore, ch swarm.Chunk, gone bool) { 885 t.Helper() 886 h, err := s.ChunkStore().Has(context.Background(), ch.Address()) 887 if err != nil { 888 t.Fatal(err) 889 } 890 891 _, err = chunkstamp.LoadWithBatchID(s.IndexStore(), "reserve", ch.Address(), ch.Stamp().BatchID()) 892 if !gone && err != nil { 893 t.Fatal(err) 894 } 895 if gone && !errors.Is(err, storage.ErrNotFound) { 896 t.Fatalf("got err %v, want %v", err, storage.ErrNotFound) 897 } 898 899 if gone && h { 900 t.Fatalf("unexpected entry %s", ch.Address()) 901 } 902 if !gone && !h { 903 t.Fatalf("expected entry %s", ch.Address()) 904 } 905 } 906 907 func getSigner(t *testing.T) crypto.Signer { 908 t.Helper() 909 privKey, err := crypto.GenerateSecp256k1Key() 910 if err != nil { 911 t.Fatal(err) 912 } 913 return crypto.NewDefaultSigner(privKey) 914 }