github.com/ethersphere/bee/v2@v2.2.0/pkg/storer/reserve_test.go (about)

     1  // Copyright 2023 The Swarm Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package storer_test
     6  
     7  import (
     8  	"bytes"
     9  	"context"
    10  	"encoding/hex"
    11  	"errors"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/ethersphere/bee/v2/pkg/postage"
    16  	batchstore "github.com/ethersphere/bee/v2/pkg/postage/batchstore/mock"
    17  	postagetesting "github.com/ethersphere/bee/v2/pkg/postage/testing"
    18  	pullerMock "github.com/ethersphere/bee/v2/pkg/puller/mock"
    19  	"github.com/ethersphere/bee/v2/pkg/spinlock"
    20  	"github.com/ethersphere/bee/v2/pkg/storage"
    21  	"github.com/ethersphere/bee/v2/pkg/storage/storagetest"
    22  	chunk "github.com/ethersphere/bee/v2/pkg/storage/testing"
    23  	"github.com/ethersphere/bee/v2/pkg/storer"
    24  	"github.com/ethersphere/bee/v2/pkg/storer/internal/chunkstamp"
    25  	"github.com/ethersphere/bee/v2/pkg/storer/internal/reserve"
    26  	"github.com/ethersphere/bee/v2/pkg/storer/internal/stampindex"
    27  	"github.com/ethersphere/bee/v2/pkg/swarm"
    28  )
    29  
    30  func TestIndexCollision(t *testing.T) {
    31  	t.Parallel()
    32  
    33  	testF := func(t *testing.T, baseAddr swarm.Address, storer *storer.DB) {
    34  		t.Helper()
    35  		stamp := postagetesting.MustNewBatchStamp(postagetesting.MustNewBatch().ID)
    36  		putter := storer.ReservePutter()
    37  
    38  		ch1 := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(stamp)
    39  		err := putter.Put(context.Background(), ch1)
    40  		if err != nil {
    41  			t.Fatal(err)
    42  		}
    43  
    44  		ch2 := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(stamp)
    45  		err = putter.Put(context.Background(), ch2)
    46  		if err == nil {
    47  			t.Fatal("expected index collision error")
    48  		}
    49  
    50  		ch1StampHash, err := ch1.Stamp().Hash()
    51  		if err != nil {
    52  			t.Fatal(err)
    53  		}
    54  		_, err = storer.ReserveGet(context.Background(), ch2.Address(), ch2.Stamp().BatchID(), ch1StampHash)
    55  		if !errors.Is(err, storage.ErrNotFound) {
    56  			t.Fatal(err)
    57  		}
    58  
    59  		ch2StampHash, err := ch1.Stamp().Hash()
    60  		if err != nil {
    61  			t.Fatal(err)
    62  		}
    63  		_, err = storer.ReserveGet(context.Background(), ch1.Address(), ch1.Stamp().BatchID(), ch2StampHash)
    64  		if err != nil {
    65  			t.Fatal(err)
    66  		}
    67  
    68  		t.Run("reserve size", reserveSizeTest(storer.Reserve(), 1))
    69  	}
    70  
    71  	t.Run("disk", func(t *testing.T) {
    72  		t.Parallel()
    73  		baseAddr := swarm.RandAddress(t)
    74  		storer, err := diskStorer(t, dbTestOps(baseAddr, 10, nil, nil, time.Minute))()
    75  		if err != nil {
    76  			t.Fatal(err)
    77  		}
    78  		storer.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(0), networkRadiusFunc(0))
    79  		testF(t, baseAddr, storer)
    80  	})
    81  	t.Run("mem", func(t *testing.T) {
    82  		t.Parallel()
    83  		baseAddr := swarm.RandAddress(t)
    84  		storer, err := memStorer(t, dbTestOps(baseAddr, 10, nil, nil, time.Minute))()
    85  		if err != nil {
    86  			t.Fatal(err)
    87  		}
    88  		storer.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(0), networkRadiusFunc(0))
    89  		testF(t, baseAddr, storer)
    90  	})
    91  }
    92  
    93  func TestReplaceOldIndex(t *testing.T) {
    94  	t.Parallel()
    95  
    96  	testF := func(t *testing.T, baseAddr swarm.Address, storer *storer.DB) {
    97  		t.Helper()
    98  
    99  		t.Run("", func(t *testing.T) {
   100  			batch := postagetesting.MustNewBatch()
   101  			ch_1 := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(postagetesting.MustNewFields(batch.ID, 0, 0))
   102  			ch_2 := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0).WithStamp(postagetesting.MustNewFields(batch.ID, 0, 1))
   103  
   104  			putter := storer.ReservePutter()
   105  
   106  			err := putter.Put(context.Background(), ch_1)
   107  			if err != nil {
   108  				t.Fatal(err)
   109  			}
   110  
   111  			err = putter.Put(context.Background(), ch_2)
   112  			if err != nil {
   113  				t.Fatal(err)
   114  			}
   115  
   116  			// Chunk 2 must be stored
   117  			checkSaved(t, storer, ch_2, true, true)
   118  			ch2StampHash, err := ch_2.Stamp().Hash()
   119  			if err != nil {
   120  				t.Fatal(err)
   121  			}
   122  			got, err := storer.ReserveGet(context.Background(), ch_2.Address(), ch_2.Stamp().BatchID(), ch2StampHash)
   123  			if err != nil {
   124  				t.Fatal(err)
   125  			}
   126  			if !got.Address().Equal(ch_2.Address()) {
   127  				t.Fatalf("got addr %s, want %d", got.Address(), ch_2.Address())
   128  			}
   129  			if !bytes.Equal(got.Stamp().BatchID(), ch_2.Stamp().BatchID()) {
   130  				t.Fatalf("got batchID %s, want %s", hex.EncodeToString(got.Stamp().BatchID()), hex.EncodeToString(ch_2.Stamp().BatchID()))
   131  			}
   132  
   133  			// Chunk 1 must be missing
   134  			item, err := stampindex.Load(storer.Storage().IndexStore(), "reserve", ch_1.Stamp())
   135  			if err != nil {
   136  				t.Fatal(err)
   137  			}
   138  			if !item.ChunkAddress.Equal(ch_2.Address()) {
   139  				t.Fatalf("wanted addr %s, got %s", ch_1.Address(), item.ChunkAddress)
   140  			}
   141  			_, err = chunkstamp.Load(storer.Storage().IndexStore(), "reserve", ch_1.Address())
   142  			if !errors.Is(err, storage.ErrNotFound) {
   143  				t.Fatalf("wanted err %s, got err %s", storage.ErrNotFound, err)
   144  			}
   145  
   146  			ch1StampHash, err := ch_1.Stamp().Hash()
   147  			if err != nil {
   148  				t.Fatal(err)
   149  			}
   150  			_, err = storer.ReserveGet(context.Background(), ch_1.Address(), ch_1.Stamp().BatchID(), ch1StampHash)
   151  			if !errors.Is(err, storage.ErrNotFound) {
   152  				t.Fatal(err)
   153  			}
   154  
   155  			t.Run("reserve size", reserveSizeTest(storer.Reserve(), 1))
   156  		})
   157  	}
   158  
   159  	t.Run("disk", func(t *testing.T) {
   160  		t.Parallel()
   161  		baseAddr := swarm.RandAddress(t)
   162  		storer, err := diskStorer(t, dbTestOps(baseAddr, 10, nil, nil, time.Minute))()
   163  		if err != nil {
   164  			t.Fatal(err)
   165  		}
   166  		storer.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(0), networkRadiusFunc(0))
   167  		testF(t, baseAddr, storer)
   168  	})
   169  	t.Run("mem", func(t *testing.T) {
   170  		t.Parallel()
   171  		baseAddr := swarm.RandAddress(t)
   172  		storer, err := memStorer(t, dbTestOps(baseAddr, 10, nil, nil, time.Minute))()
   173  		if err != nil {
   174  			t.Fatal(err)
   175  		}
   176  		storer.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(0), networkRadiusFunc(0))
   177  		testF(t, baseAddr, storer)
   178  	})
   179  }
   180  
   181  func TestEvictBatch(t *testing.T) {
   182  	t.Parallel()
   183  
   184  	baseAddr := swarm.RandAddress(t)
   185  
   186  	st, err := diskStorer(t, dbTestOps(baseAddr, 100, nil, nil, time.Minute))()
   187  	if err != nil {
   188  		t.Fatal(err)
   189  	}
   190  	st.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(0), networkRadiusFunc(0))
   191  
   192  	ctx := context.Background()
   193  
   194  	var chunks []swarm.Chunk
   195  	var chunksPerPO uint64 = 10
   196  	batches := []*postage.Batch{postagetesting.MustNewBatch(), postagetesting.MustNewBatch(), postagetesting.MustNewBatch()}
   197  	evictBatch := batches[1]
   198  
   199  	putter := st.ReservePutter()
   200  
   201  	for b := 0; b < 3; b++ {
   202  		for i := uint64(0); i < chunksPerPO; i++ {
   203  			ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b)
   204  			ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batches[b].ID))
   205  			chunks = append(chunks, ch)
   206  			err := putter.Put(ctx, ch)
   207  			if err != nil {
   208  				t.Fatal(err)
   209  			}
   210  		}
   211  	}
   212  
   213  	c, unsub := st.Events().Subscribe("batchExpiryDone")
   214  	t.Cleanup(unsub)
   215  
   216  	err = st.EvictBatch(ctx, evictBatch.ID)
   217  	if err != nil {
   218  		t.Fatal(err)
   219  	}
   220  	<-c
   221  
   222  	reserve := st.Reserve()
   223  
   224  	for _, ch := range chunks {
   225  		stampHash, err := ch.Stamp().Hash()
   226  		if err != nil {
   227  			t.Fatal(err)
   228  		}
   229  		has, err := st.ReserveHas(ch.Address(), ch.Stamp().BatchID(), stampHash)
   230  		if err != nil {
   231  			t.Fatal(err)
   232  		}
   233  
   234  		if bytes.Equal(ch.Stamp().BatchID(), evictBatch.ID) {
   235  			if has {
   236  				t.Fatal("store should NOT have chunk")
   237  			}
   238  			checkSaved(t, st, ch, false, false)
   239  		} else if !has {
   240  			t.Fatal("store should have chunk")
   241  			checkSaved(t, st, ch, true, true)
   242  		}
   243  	}
   244  
   245  	t.Run("reserve size", reserveSizeTest(st.Reserve(), 20))
   246  
   247  	if reserve.Radius() != 0 {
   248  		t.Fatalf("want radius %d, got radius %d", 0, reserve.Radius())
   249  	}
   250  
   251  	ids, _, err := st.ReserveLastBinIDs()
   252  	if err != nil {
   253  		t.Fatal(err)
   254  	}
   255  
   256  	for bin, id := range ids {
   257  		if bin < 3 && id != 10 {
   258  			t.Fatalf("bin %d got binID %d, want %d", bin, id, 10)
   259  		}
   260  		if bin >= 3 && id != 0 {
   261  			t.Fatalf("bin %d  got binID %d, want %d", bin, id, 0)
   262  		}
   263  	}
   264  }
   265  
   266  func TestUnreserveCap(t *testing.T) {
   267  	t.Parallel()
   268  
   269  	var (
   270  		storageRadius = 2
   271  		capacity      = 30
   272  	)
   273  
   274  	testF := func(t *testing.T, baseAddr swarm.Address, bs *batchstore.BatchStore, storer *storer.DB) {
   275  		t.Helper()
   276  
   277  		var chunksPO = make([][]swarm.Chunk, 5)
   278  		var chunksPerPO uint64 = 10
   279  
   280  		batch := postagetesting.MustNewBatch()
   281  		err := bs.Save(batch)
   282  		if err != nil {
   283  			t.Fatal(err)
   284  		}
   285  
   286  		ctx := context.Background()
   287  
   288  		putter := storer.ReservePutter()
   289  
   290  		c, unsub := storer.Events().Subscribe("reserveUnreserved")
   291  		defer unsub()
   292  
   293  		for b := 0; b < 5; b++ {
   294  			for i := uint64(0); i < chunksPerPO; i++ {
   295  				ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, b)
   296  				ch = ch.WithStamp(postagetesting.MustNewBatchStamp(batch.ID))
   297  				chunksPO[b] = append(chunksPO[b], ch)
   298  				err := putter.Put(ctx, ch)
   299  				if err != nil {
   300  					t.Fatal(err)
   301  				}
   302  			}
   303  		}
   304  
   305  	done:
   306  		for {
   307  			select {
   308  			case <-c:
   309  				if storer.ReserveSize() == capacity {
   310  					break done
   311  				}
   312  			case <-time.After(time.Second * 30):
   313  				if storer.ReserveSize() != capacity {
   314  					t.Fatal("timeout waiting for reserve to reach capacity")
   315  				}
   316  			}
   317  		}
   318  
   319  		for po, chunks := range chunksPO {
   320  			for _, ch := range chunks {
   321  				stampHash, err := ch.Stamp().Hash()
   322  				if err != nil {
   323  					t.Fatal(err)
   324  				}
   325  				has, err := storer.ReserveHas(ch.Address(), ch.Stamp().BatchID(), stampHash)
   326  				if err != nil {
   327  					t.Fatal(err)
   328  				}
   329  				if po < storageRadius {
   330  					if has {
   331  						t.Fatal("store should NOT have chunk at PO", po)
   332  					}
   333  					checkSaved(t, storer, ch, false, false)
   334  				} else if !has {
   335  					t.Fatal("store should have chunk at PO", po)
   336  				} else {
   337  					checkSaved(t, storer, ch, true, true)
   338  				}
   339  			}
   340  		}
   341  	}
   342  
   343  	t.Run("disk", func(t *testing.T) {
   344  		t.Parallel()
   345  		bs := batchstore.New()
   346  		baseAddr := swarm.RandAddress(t)
   347  		storer, err := diskStorer(t, dbTestOps(baseAddr, capacity, bs, nil, time.Minute))()
   348  		if err != nil {
   349  			t.Fatal(err)
   350  		}
   351  		storer.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(0), networkRadiusFunc(0))
   352  		testF(t, baseAddr, bs, storer)
   353  	})
   354  	t.Run("mem", func(t *testing.T) {
   355  		t.Parallel()
   356  		bs := batchstore.New()
   357  		baseAddr := swarm.RandAddress(t)
   358  		storer, err := memStorer(t, dbTestOps(baseAddr, capacity, bs, nil, time.Minute))()
   359  		if err != nil {
   360  			t.Fatal(err)
   361  		}
   362  		storer.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(0), networkRadiusFunc(0))
   363  		testF(t, baseAddr, bs, storer)
   364  	})
   365  }
   366  
   367  func TestNetworkRadius(t *testing.T) {
   368  	t.Parallel()
   369  
   370  	t.Run("disk", func(t *testing.T) {
   371  		t.Parallel()
   372  		baseAddr := swarm.RandAddress(t)
   373  		storer, err := diskStorer(t, dbTestOps(baseAddr, 10, nil, nil, time.Minute))()
   374  		if err != nil {
   375  			t.Fatal(err)
   376  		}
   377  		storer.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(0), networkRadiusFunc(1))
   378  		time.Sleep(time.Second)
   379  		if want, got := uint8(1), storer.StorageRadius(); want != got {
   380  			t.Fatalf("want radius %d, got radius %d", want, got)
   381  		}
   382  	})
   383  	t.Run("mem", func(t *testing.T) {
   384  		t.Parallel()
   385  		baseAddr := swarm.RandAddress(t)
   386  		storer, err := memStorer(t, dbTestOps(baseAddr, 10, nil, nil, time.Minute))()
   387  		if err != nil {
   388  			t.Fatal(err)
   389  		}
   390  		storer.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(0), networkRadiusFunc(1))
   391  		time.Sleep(time.Second)
   392  		if want, got := uint8(1), storer.StorageRadius(); want != got {
   393  			t.Fatalf("want radius %d, got radius %d", want, got)
   394  		}
   395  	})
   396  }
   397  
   398  func TestRadiusManager(t *testing.T) {
   399  	t.Parallel()
   400  
   401  	baseAddr := swarm.RandAddress(t)
   402  
   403  	waitForRadius := func(t *testing.T, reserve *reserve.Reserve, expectedRadius uint8) {
   404  		t.Helper()
   405  		err := spinlock.Wait(time.Second*30, func() bool {
   406  			return reserve.Radius() == expectedRadius
   407  		})
   408  		if err != nil {
   409  			t.Fatalf("timed out waiting for depth, expected %d found %d", expectedRadius, reserve.Radius())
   410  		}
   411  	}
   412  
   413  	waitForSize := func(t *testing.T, reserve *reserve.Reserve, size int) {
   414  		t.Helper()
   415  		err := spinlock.Wait(time.Second*30, func() bool {
   416  			return reserve.Size() == size
   417  		})
   418  		if err != nil {
   419  			t.Fatalf("timed out waiting for reserve size, expected %d found %d", size, reserve.Size())
   420  		}
   421  	}
   422  
   423  	t.Run("radius decrease due to under utilization", func(t *testing.T) {
   424  		t.Parallel()
   425  		bs := batchstore.New()
   426  
   427  		storer, err := memStorer(t, dbTestOps(baseAddr, 10, bs, nil, time.Millisecond*500))()
   428  		if err != nil {
   429  			t.Fatal(err)
   430  		}
   431  		storer.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(0), networkRadiusFunc(3))
   432  
   433  		batch := postagetesting.MustNewBatch()
   434  		err = bs.Save(batch)
   435  		if err != nil {
   436  			t.Fatal(err)
   437  		}
   438  
   439  		putter := storer.ReservePutter()
   440  
   441  		for i := 0; i < 4; i++ {
   442  			for j := 0; j < 10; j++ {
   443  				ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, i).WithStamp(postagetesting.MustNewBatchStamp(batch.ID))
   444  				err := putter.Put(context.Background(), ch)
   445  				if err != nil {
   446  					t.Fatal(err)
   447  				}
   448  			}
   449  		}
   450  
   451  		waitForSize(t, storer.Reserve(), 10)
   452  		waitForRadius(t, storer.Reserve(), 3)
   453  
   454  		err = storer.EvictBatch(context.Background(), batch.ID)
   455  		if err != nil {
   456  			t.Fatal(err)
   457  		}
   458  		waitForRadius(t, storer.Reserve(), 0)
   459  	})
   460  
   461  	t.Run("radius doesn't change due to non-zero pull rate", func(t *testing.T) {
   462  		t.Parallel()
   463  		storer, err := diskStorer(t, dbTestOps(baseAddr, 10, nil, nil, time.Millisecond*500))()
   464  		if err != nil {
   465  			t.Fatal(err)
   466  		}
   467  		storer.StartReserveWorker(context.Background(), pullerMock.NewMockRateReporter(1), networkRadiusFunc(3))
   468  		waitForRadius(t, storer.Reserve(), 3)
   469  	})
   470  }
   471  
   472  func TestSubscribeBin(t *testing.T) {
   473  	t.Parallel()
   474  
   475  	testF := func(t *testing.T, baseAddr swarm.Address, storer *storer.DB) {
   476  		t.Helper()
   477  		var (
   478  			chunks      []swarm.Chunk
   479  			chunksPerPO uint64 = 50
   480  			putter             = storer.ReservePutter()
   481  		)
   482  
   483  		for j := 0; j < 2; j++ {
   484  			for i := uint64(0); i < chunksPerPO; i++ {
   485  				ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, j)
   486  				chunks = append(chunks, ch)
   487  				err := putter.Put(context.Background(), ch)
   488  				if err != nil {
   489  					t.Fatal(err)
   490  				}
   491  			}
   492  		}
   493  
   494  		t.Run("subscribe full range", func(t *testing.T) {
   495  			t.Parallel()
   496  
   497  			binC, _, _ := storer.SubscribeBin(context.Background(), 0, 0)
   498  
   499  			i := uint64(0)
   500  			for c := range binC {
   501  				if !c.Address.Equal(chunks[i].Address()) {
   502  					t.Fatal("mismatch of chunks at index", i)
   503  				}
   504  				i++
   505  				if i == chunksPerPO {
   506  					return
   507  				}
   508  			}
   509  		})
   510  
   511  		t.Run("subscribe unsub", func(t *testing.T) {
   512  			t.Parallel()
   513  
   514  			binC, unsub, _ := storer.SubscribeBin(context.Background(), 0, 0)
   515  
   516  			<-binC
   517  			unsub()
   518  
   519  			select {
   520  			case <-binC:
   521  			case <-time.After(time.Second):
   522  				t.Fatal("still waiting on result")
   523  			}
   524  		})
   525  
   526  		t.Run("subscribe range higher bin", func(t *testing.T) {
   527  			t.Parallel()
   528  
   529  			binC, _, _ := storer.SubscribeBin(context.Background(), 0, 2)
   530  
   531  			i := uint64(1)
   532  			for c := range binC {
   533  				if !c.Address.Equal(chunks[i].Address()) {
   534  					t.Fatal("mismatch of chunks at index", i)
   535  				}
   536  				i++
   537  				if i == chunksPerPO {
   538  					return
   539  				}
   540  			}
   541  		})
   542  
   543  		t.Run("subscribe beyond range", func(t *testing.T) {
   544  			t.Parallel()
   545  
   546  			binC, _, _ := storer.SubscribeBin(context.Background(), 0, 2)
   547  			i := uint64(1)
   548  			timer := time.After(time.Millisecond * 500)
   549  
   550  		loop:
   551  			for {
   552  				select {
   553  				case c := <-binC:
   554  					if !c.Address.Equal(chunks[i].Address()) {
   555  						t.Fatal("mismatch of chunks at index", i)
   556  					}
   557  					i++
   558  				case <-timer:
   559  					break loop
   560  				}
   561  			}
   562  
   563  			if i != chunksPerPO {
   564  				t.Fatalf("mismatch of chunk count, got %d, want %d", i, chunksPerPO)
   565  			}
   566  		})
   567  	}
   568  
   569  	t.Run("disk", func(t *testing.T) {
   570  		t.Parallel()
   571  		baseAddr := swarm.RandAddress(t)
   572  		storer, err := diskStorer(t, dbTestOps(baseAddr, 100, nil, nil, time.Second))()
   573  		if err != nil {
   574  			t.Fatal(err)
   575  		}
   576  		testF(t, baseAddr, storer)
   577  	})
   578  	t.Run("mem", func(t *testing.T) {
   579  		t.Parallel()
   580  		baseAddr := swarm.RandAddress(t)
   581  		storer, err := memStorer(t, dbTestOps(baseAddr, 100, nil, nil, time.Second))()
   582  		if err != nil {
   583  			t.Fatal(err)
   584  		}
   585  		testF(t, baseAddr, storer)
   586  	})
   587  }
   588  
   589  func TestSubscribeBinTrigger(t *testing.T) {
   590  	t.Parallel()
   591  
   592  	testF := func(t *testing.T, baseAddr swarm.Address, storer *storer.DB) {
   593  		t.Helper()
   594  		var (
   595  			chunks      []swarm.Chunk
   596  			chunksPerPO uint64 = 5
   597  		)
   598  
   599  		putter := storer.ReservePutter()
   600  		for j := 0; j < 2; j++ {
   601  			for i := uint64(0); i < chunksPerPO; i++ {
   602  				ch := chunk.GenerateTestRandomChunkAt(t, baseAddr, j)
   603  				chunks = append(chunks, ch)
   604  				err := putter.Put(context.Background(), ch)
   605  				if err != nil {
   606  					t.Fatal(err)
   607  				}
   608  			}
   609  		}
   610  
   611  		binC, _, _ := storer.SubscribeBin(context.Background(), 0, 2)
   612  		i := uint64(1)
   613  		timer := time.After(time.Millisecond * 500)
   614  
   615  	loop:
   616  		for {
   617  			select {
   618  			case c := <-binC:
   619  				if !c.Address.Equal(chunks[i].Address()) {
   620  					t.Fatal("mismatch of chunks at index", i)
   621  				}
   622  				i++
   623  			case <-timer:
   624  				break loop
   625  			}
   626  		}
   627  
   628  		if i != chunksPerPO {
   629  			t.Fatalf("mismatch of chunk count, got %d, want %d", i, chunksPerPO)
   630  		}
   631  
   632  		newChunk := chunk.GenerateTestRandomChunkAt(t, baseAddr, 0)
   633  		putter = storer.ReservePutter()
   634  		err := putter.Put(context.Background(), newChunk)
   635  		if err != nil {
   636  			t.Fatal(err)
   637  		}
   638  
   639  		select {
   640  		case c := <-binC:
   641  			if !c.Address.Equal(newChunk.Address()) {
   642  				t.Fatal("mismatch of chunks")
   643  			}
   644  		case <-time.After(time.Second):
   645  			t.Fatal("timed out waiting for next chunk")
   646  		}
   647  	}
   648  
   649  	t.Run("disk", func(t *testing.T) {
   650  		t.Parallel()
   651  		baseAddr := swarm.RandAddress(t)
   652  		storer, err := diskStorer(t, dbTestOps(baseAddr, 100, nil, nil, time.Second))()
   653  		if err != nil {
   654  			t.Fatal(err)
   655  		}
   656  		testF(t, baseAddr, storer)
   657  	})
   658  	t.Run("mem", func(t *testing.T) {
   659  		t.Parallel()
   660  		baseAddr := swarm.RandAddress(t)
   661  		storer, err := memStorer(t, dbTestOps(baseAddr, 100, nil, nil, time.Second))()
   662  		if err != nil {
   663  			t.Fatal(err)
   664  		}
   665  		testF(t, baseAddr, storer)
   666  	})
   667  }
   668  
   669  func reserveSizeTest(rs *reserve.Reserve, want int) func(t *testing.T) {
   670  	return func(t *testing.T) {
   671  		t.Helper()
   672  		got := rs.Size()
   673  		if got != want {
   674  			t.Errorf("got reserve size %v, want %v", got, want)
   675  		}
   676  	}
   677  }
   678  
   679  func checkSaved(t *testing.T, st *storer.DB, ch swarm.Chunk, stampSaved, chunkStoreSaved bool) {
   680  	t.Helper()
   681  
   682  	var stampWantedErr error
   683  	if !stampSaved {
   684  		stampWantedErr = storage.ErrNotFound
   685  	}
   686  	_, err := stampindex.Load(st.Storage().IndexStore(), "reserve", ch.Stamp())
   687  	if !errors.Is(err, stampWantedErr) {
   688  		t.Fatalf("wanted err %s, got err %s", stampWantedErr, err)
   689  	}
   690  	_, err = chunkstamp.Load(st.Storage().IndexStore(), "reserve", ch.Address())
   691  	if !errors.Is(err, stampWantedErr) {
   692  		t.Fatalf("wanted err %s, got err %s", stampWantedErr, err)
   693  	}
   694  
   695  	var chunkStoreWantedErr error
   696  	if !chunkStoreSaved {
   697  		chunkStoreWantedErr = storage.ErrNotFound
   698  	}
   699  	gotCh, err := st.Storage().ChunkStore().Get(context.Background(), ch.Address())
   700  	if !errors.Is(err, chunkStoreWantedErr) {
   701  		t.Fatalf("wanted err %s, got err %s", chunkStoreWantedErr, err)
   702  	}
   703  	if chunkStoreSaved {
   704  		if !bytes.Equal(ch.Data(), gotCh.Data()) {
   705  			t.Fatalf("chunks are not equal: %s", ch.Address())
   706  		}
   707  	}
   708  }
   709  
   710  func BenchmarkReservePutter(b *testing.B) {
   711  	baseAddr := swarm.RandAddress(b)
   712  	storer, err := diskStorer(b, dbTestOps(baseAddr, 10000, nil, nil, time.Second))()
   713  	if err != nil {
   714  		b.Fatal(err)
   715  	}
   716  
   717  	b.ResetTimer()
   718  	b.ReportAllocs()
   719  	storagetest.BenchmarkChunkStoreWriteSequential(b, storer.ReservePutter())
   720  }
   721  
   722  func networkRadiusFunc(r uint8) func() (uint8, error) {
   723  	return func() (uint8, error) { return r, nil }
   724  }