github.com/ethersphere/bee/v2@v2.2.0/pkg/storer/sample_test.go (about) 1 // Copyright 2023 The Swarm Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package storer_test 6 7 import ( 8 "context" 9 "math/rand" 10 "testing" 11 "time" 12 13 "github.com/ethersphere/bee/v2/pkg/postage" 14 15 postagetesting "github.com/ethersphere/bee/v2/pkg/postage/testing" 16 chunk "github.com/ethersphere/bee/v2/pkg/storage/testing" 17 "github.com/ethersphere/bee/v2/pkg/storer" 18 "github.com/ethersphere/bee/v2/pkg/swarm" 19 "github.com/google/go-cmp/cmp" 20 ) 21 22 func TestReserveSampler(t *testing.T) { 23 const chunkCountPerPO = 10 24 const maxPO = 10 25 26 randChunks := func(baseAddr swarm.Address, timeVar uint64) []swarm.Chunk { 27 var chs []swarm.Chunk 28 for po := 0; po < maxPO; po++ { 29 for i := 0; i < chunkCountPerPO; i++ { 30 ch := chunk.GenerateValidRandomChunkAt(baseAddr, po).WithBatch(3, 2, false) 31 if rand.Intn(2) == 0 { // 50% chance to wrap CAC into SOC 32 ch = chunk.GenerateTestRandomSoChunk(t, ch) 33 } 34 35 // override stamp timestamp to be before the consensus timestamp 36 ch = ch.WithStamp(postagetesting.MustNewStampWithTimestamp(timeVar)) 37 chs = append(chs, ch) 38 } 39 } 40 return chs 41 } 42 43 testF := func(t *testing.T, baseAddr swarm.Address, st *storer.DB) { 44 t.Helper() 45 46 timeVar := uint64(time.Now().UnixNano()) 47 chs := randChunks(baseAddr, timeVar-1) 48 49 putter := st.ReservePutter() 50 for _, ch := range chs { 51 err := putter.Put(context.Background(), ch) 52 if err != nil { 53 t.Fatal(err) 54 } 55 } 56 57 t.Run("reserve size", reserveSizeTest(st.Reserve(), chunkCountPerPO*maxPO)) 58 59 var sample1 storer.Sample 60 61 t.Run("reserve sample 1", func(t *testing.T) { 62 sample, err := st.ReserveSample(context.TODO(), []byte("anchor"), 5, timeVar, nil) 63 if err != nil { 64 t.Fatal(err) 65 } 66 67 assertValidSample(t, sample) 68 assertSampleNoErrors(t, sample) 69 70 if sample.Stats.NewIgnored != 0 { 71 t.Fatalf("sample should not have ignored chunks") 72 } 73 74 sample1 = sample 75 }) 76 77 // We generate another 100 chunks. With these new chunks in the reserve, statistically 78 // some of them should definitely make it to the sample based on lex ordering. 79 chs = randChunks(baseAddr, timeVar+1) 80 putter = st.ReservePutter() 81 for _, ch := range chs { 82 err := putter.Put(context.Background(), ch) 83 if err != nil { 84 t.Fatal(err) 85 } 86 } 87 88 time.Sleep(time.Second) 89 90 t.Run("reserve size", reserveSizeTest(st.Reserve(), 2*chunkCountPerPO*maxPO)) 91 92 // Now we generate another sample with the older timestamp. This should give us 93 // the exact same sample, ensuring that none of the later chunks were considered. 94 t.Run("reserve sample 2", func(t *testing.T) { 95 sample, err := st.ReserveSample(context.TODO(), []byte("anchor"), 5, timeVar, nil) 96 if err != nil { 97 t.Fatal(err) 98 } 99 100 if diff := cmp.Diff(sample.Items, sample1.Items, cmp.AllowUnexported(postage.Stamp{})); diff != "" { 101 t.Fatalf("samples different (-want +have):\n%s", diff) 102 } 103 104 if sample.Stats.NewIgnored == 0 { 105 t.Fatalf("sample should have some ignored chunks") 106 } 107 108 assertSampleNoErrors(t, sample) 109 }) 110 111 } 112 113 t.Run("disk", func(t *testing.T) { 114 t.Parallel() 115 baseAddr := swarm.RandAddress(t) 116 opts := dbTestOps(baseAddr, 1000, nil, nil, time.Second) 117 opts.ValidStamp = func(ch swarm.Chunk) (swarm.Chunk, error) { return ch, nil } 118 119 storer, err := diskStorer(t, opts)() 120 if err != nil { 121 t.Fatal(err) 122 } 123 testF(t, baseAddr, storer) 124 }) 125 t.Run("mem", func(t *testing.T) { 126 t.Parallel() 127 baseAddr := swarm.RandAddress(t) 128 opts := dbTestOps(baseAddr, 1000, nil, nil, time.Second) 129 opts.ValidStamp = func(ch swarm.Chunk) (swarm.Chunk, error) { return ch, nil } 130 131 storer, err := memStorer(t, opts)() 132 if err != nil { 133 t.Fatal(err) 134 } 135 testF(t, baseAddr, storer) 136 }) 137 } 138 139 func TestRandSample(t *testing.T) { 140 t.Parallel() 141 142 sample := storer.RandSample(t, nil) 143 assertValidSample(t, sample) 144 } 145 146 func assertValidSample(t *testing.T, sample storer.Sample) { 147 t.Helper() 148 149 // Assert that sample size is exactly storer.SampleSize 150 if len(sample.Items) != storer.SampleSize { 151 t.Fatalf("incorrect no of sample items, exp %d found %d", storer.SampleSize, len(sample.Items)) 152 } 153 154 // Assert that sample item has all fields set 155 assertSampleItem := func(item storer.SampleItem, i int) { 156 if !item.TransformedAddress.IsValidNonEmpty() { 157 t.Fatalf("sample item [%d]: transformed address should be set", i) 158 } 159 if !item.ChunkAddress.IsValidNonEmpty() { 160 t.Fatalf("sample item [%d]: chunk address should be set", i) 161 } 162 if item.ChunkData == nil { 163 t.Fatalf("sample item [%d]: chunk data should be set", i) 164 } 165 if item.Stamp == nil { 166 t.Fatalf("sample item [%d]: stamp should be set", i) 167 } 168 } 169 for i, item := range sample.Items { 170 assertSampleItem(item, i) 171 } 172 173 // Assert that transformed addresses are in ascending order 174 for i := 0; i < len(sample.Items)-1; i++ { 175 if sample.Items[i].TransformedAddress.Compare(sample.Items[i+1].TransformedAddress) != -1 { 176 t.Fatalf("incorrect order of samples") 177 } 178 } 179 } 180 181 func assertSampleNoErrors(t *testing.T, sample storer.Sample) { 182 t.Helper() 183 184 if sample.Stats.ChunkLoadFailed != 0 { 185 t.Fatalf("got unexpected failed chunk loads") 186 } 187 if sample.Stats.RogueChunk != 0 { 188 t.Fatalf("got unexpected rogue chunks") 189 } 190 if sample.Stats.StampLoadFailed != 0 { 191 t.Fatalf("got unexpected failed stamp loads") 192 } 193 if sample.Stats.InvalidStamp != 0 { 194 t.Fatalf("got unexpected invalid stamps") 195 } 196 }