github.com/ethersphere/bee/v2@v2.2.0/pkg/storer/storer_test.go (about) 1 // Copyright 2023 The Swarm Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package storer_test 6 7 import ( 8 "context" 9 "os" 10 "path" 11 "testing" 12 "time" 13 14 "github.com/ethersphere/bee/v2/pkg/log" 15 "github.com/ethersphere/bee/v2/pkg/postage" 16 batchstore "github.com/ethersphere/bee/v2/pkg/postage/batchstore/mock" 17 "github.com/ethersphere/bee/v2/pkg/storage" 18 "github.com/ethersphere/bee/v2/pkg/storage/migration" 19 "github.com/ethersphere/bee/v2/pkg/storer" 20 "github.com/ethersphere/bee/v2/pkg/storer/internal" 21 cs "github.com/ethersphere/bee/v2/pkg/storer/internal/chunkstore" 22 pinstore "github.com/ethersphere/bee/v2/pkg/storer/internal/pinning" 23 "github.com/ethersphere/bee/v2/pkg/storer/internal/transaction" 24 "github.com/ethersphere/bee/v2/pkg/storer/internal/upload" 25 localmigration "github.com/ethersphere/bee/v2/pkg/storer/migration" 26 "github.com/ethersphere/bee/v2/pkg/swarm" 27 "github.com/ethersphere/bee/v2/pkg/topology" 28 kademlia "github.com/ethersphere/bee/v2/pkg/topology/mock" 29 ) 30 31 func verifyChunks( 32 t *testing.T, 33 st transaction.Storage, 34 chunks []swarm.Chunk, 35 has bool, 36 ) { 37 t.Helper() 38 39 for _, ch := range chunks { 40 hasFound, err := st.ChunkStore().Has(context.TODO(), ch.Address()) 41 if err != nil { 42 t.Fatalf("ChunkStore.Has(...): unexpected error: %v", err) 43 } 44 45 if hasFound != has { 46 t.Fatalf("unexpected chunk has state: want %t have %t", has, hasFound) 47 } 48 } 49 } 50 51 func verifyChunkRefCount( 52 t *testing.T, 53 st transaction.ReadOnlyStore, 54 chunks []swarm.Chunk, 55 ) { 56 t.Helper() 57 58 for _, ch := range chunks { 59 _ = st.IndexStore().Iterate(storage.Query{ 60 Factory: func() storage.Item { return new(cs.RetrievalIndexItem) }, 61 }, func(r storage.Result) (bool, error) { 62 entry := r.Entry.(*cs.RetrievalIndexItem) 63 if entry.Address.Equal(ch.Address()) && entry.RefCnt != 1 { 64 t.Errorf("chunk %s has refCnt=%d", ch.Address(), entry.RefCnt) 65 } 66 return false, nil 67 }) 68 } 69 } 70 71 func verifySessionInfo( 72 t *testing.T, 73 st transaction.Storage, 74 sessionID uint64, 75 chunks []swarm.Chunk, 76 has bool, 77 ) { 78 t.Helper() 79 80 verifyChunks(t, st, chunks, has) 81 82 if has { 83 tagInfo, err := upload.TagInfo(st.IndexStore(), sessionID) 84 if err != nil { 85 t.Fatalf("upload.TagInfo(...): unexpected error: %v", err) 86 } 87 88 if tagInfo.Split != uint64(len(chunks)) { 89 t.Fatalf("unexpected split chunk count in tag: want %d have %d", len(chunks), tagInfo.Split) 90 } 91 if tagInfo.Seen != 0 { 92 t.Fatalf("unexpected seen chunk count in tag: want %d have %d", len(chunks), tagInfo.Seen) 93 } 94 } 95 } 96 97 func verifyPinCollection( 98 t *testing.T, 99 st transaction.Storage, 100 root swarm.Chunk, 101 chunks []swarm.Chunk, 102 has bool, 103 ) { 104 t.Helper() 105 106 hasFound, err := pinstore.HasPin(st.IndexStore(), root.Address()) 107 if err != nil { 108 t.Fatalf("pinstore.HasPin(...): unexpected error: %v", err) 109 } 110 111 if hasFound != has { 112 t.Fatalf("unexpected pin collection state: want %t have %t", has, hasFound) 113 } 114 115 verifyChunks(t, st, chunks, has) 116 } 117 118 // TestMain exists to adjust the time.Now function to a fixed value. 119 func TestMain(m *testing.M) { 120 storer.ReplaceSharkyShardLimit(4) 121 defer func() { 122 storer.ReplaceSharkyShardLimit(32) 123 }() 124 code := m.Run() 125 os.Exit(code) 126 } 127 128 func TestNew(t *testing.T) { 129 t.Parallel() 130 131 t.Run("inmem with options", func(t *testing.T) { 132 t.Parallel() 133 134 opts := dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second) 135 136 lstore := makeInmemStorer(t, opts) 137 if lstore == nil { 138 t.Fatalf("storer should be instantiated") 139 } 140 }) 141 t.Run("disk default options", func(t *testing.T) { 142 t.Parallel() 143 144 lstore := makeDiskStorer(t, dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second)) 145 if lstore == nil { 146 t.Fatalf("storer should be instantiated") 147 } 148 }) 149 t.Run("disk with options", func(t *testing.T) { 150 t.Parallel() 151 152 opts := dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second) 153 opts.CacheCapacity = 10 154 155 lstore := makeDiskStorer(t, opts) 156 if lstore == nil { 157 t.Fatalf("storer should be instantiated") 158 } 159 }) 160 161 t.Run("migration on latest version", func(t *testing.T) { 162 t.Parallel() 163 164 t.Run("inmem", func(t *testing.T) { 165 t.Parallel() 166 167 lstore := makeInmemStorer(t, dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second)) 168 assertStorerVersion(t, lstore.Storage().IndexStore(), "") 169 }) 170 171 t.Run("disk", func(t *testing.T) { 172 t.Parallel() 173 174 lstore := makeDiskStorer(t, dbTestOps(swarm.RandAddress(t), 0, nil, nil, time.Second)) 175 assertStorerVersion(t, lstore.Storage().IndexStore(), path.Join(t.TempDir(), "sharky")) 176 }) 177 }) 178 } 179 180 func dbTestOps(baseAddr swarm.Address, reserveCapacity int, bs postage.Storer, radiusSetter topology.SetStorageRadiuser, reserveWakeUpTime time.Duration) *storer.Options { 181 182 opts := storer.DefaultOptions() 183 184 if radiusSetter == nil { 185 radiusSetter = kademlia.NewTopologyDriver() 186 } 187 188 if bs == nil { 189 bs = batchstore.New() 190 } 191 192 opts.Address = baseAddr 193 opts.RadiusSetter = radiusSetter 194 opts.ReserveCapacity = reserveCapacity 195 opts.Batchstore = bs 196 opts.ReserveWakeUpDuration = reserveWakeUpTime 197 opts.Logger = log.Noop 198 199 return opts 200 } 201 202 func assertStorerVersion(t *testing.T, r storage.Reader, sharkyPath string) { 203 t.Helper() 204 205 current, err := migration.Version(r, "migration") 206 if err != nil { 207 t.Fatalf("migration.Version(...): unexpected error: %v", err) 208 } 209 210 expected := migration.LatestVersion(localmigration.AfterInitSteps(sharkyPath, 4, internal.NewInmemStorage(), log.Noop)) 211 if current != expected { 212 t.Fatalf("storer is not migrated to latest version; got %d, expected %d", current, expected) 213 } 214 } 215 216 func makeInmemStorer(t *testing.T, opts *storer.Options) *storer.DB { 217 t.Helper() 218 219 lstore, err := storer.New(context.Background(), "", opts) 220 if err != nil { 221 t.Fatalf("New(...): unexpected error: %v", err) 222 } 223 224 t.Cleanup(func() { 225 err := lstore.Close() 226 if err != nil { 227 t.Fatalf("Close(): unexpected error: %v", err) 228 } 229 }) 230 231 return lstore 232 } 233 234 func makeDiskStorer(t *testing.T, opts *storer.Options) *storer.DB { 235 t.Helper() 236 237 lstore, err := storer.New(context.Background(), t.TempDir(), opts) 238 if err != nil { 239 t.Fatalf("New(...): unexpected error: %v", err) 240 } 241 242 t.Cleanup(func() { 243 err := lstore.Close() 244 if err != nil { 245 t.Fatalf("Close(): unexpected closing storer: %v", err) 246 } 247 }) 248 249 return lstore 250 } 251 252 func newStorer(tb testing.TB, path string, opts *storer.Options) (*storer.DB, error) { 253 tb.Helper() 254 lstore, err := storer.New(context.Background(), path, opts) 255 if err == nil { 256 tb.Cleanup(func() { 257 err := lstore.Close() 258 if err != nil { 259 tb.Errorf("failed closing storer: %v", err) 260 } 261 }) 262 } 263 264 return lstore, err 265 } 266 267 func diskStorer(tb testing.TB, opts *storer.Options) func() (*storer.DB, error) { 268 tb.Helper() 269 return func() (*storer.DB, error) { 270 return newStorer(tb, tb.TempDir(), opts) 271 } 272 } 273 274 func memStorer(tb testing.TB, opts *storer.Options) func() (*storer.DB, error) { 275 tb.Helper() 276 return func() (*storer.DB, error) { 277 return newStorer(tb, "", opts) 278 } 279 }