github.com/ethersphere/bee/v2@v2.2.0/pkg/file/joiner/joiner_test.go (about)

     1  // Copyright 2020 The Swarm Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package joiner_test
     6  
     7  import (
     8  	"bytes"
     9  	"context"
    10  	"crypto/rand"
    11  	"encoding/binary"
    12  	"errors"
    13  	"fmt"
    14  	"io"
    15  	mrand "math/rand"
    16  	"sync"
    17  	"testing"
    18  	"time"
    19  
    20  	"github.com/ethersphere/bee/v2/pkg/cac"
    21  	"github.com/ethersphere/bee/v2/pkg/file/joiner"
    22  	"github.com/ethersphere/bee/v2/pkg/file/pipeline/builder"
    23  	"github.com/ethersphere/bee/v2/pkg/file/redundancy"
    24  	"github.com/ethersphere/bee/v2/pkg/file/redundancy/getter"
    25  	"github.com/ethersphere/bee/v2/pkg/file/splitter"
    26  	filetest "github.com/ethersphere/bee/v2/pkg/file/testing"
    27  	"github.com/ethersphere/bee/v2/pkg/log"
    28  	"github.com/ethersphere/bee/v2/pkg/storage"
    29  	"github.com/ethersphere/bee/v2/pkg/storage/inmemchunkstore"
    30  	testingc "github.com/ethersphere/bee/v2/pkg/storage/testing"
    31  	mockstorer "github.com/ethersphere/bee/v2/pkg/storer/mock"
    32  	"github.com/ethersphere/bee/v2/pkg/swarm"
    33  	"github.com/ethersphere/bee/v2/pkg/util/testutil"
    34  	"github.com/ethersphere/bee/v2/pkg/util/testutil/pseudorand"
    35  	"gitlab.com/nolash/go-mockbytes"
    36  	"golang.org/x/sync/errgroup"
    37  )
    38  
    39  // nolint:paralleltest,tparallel,thelper
    40  
    41  func TestJoiner_ErrReferenceLength(t *testing.T) {
    42  	t.Parallel()
    43  
    44  	store := inmemchunkstore.New()
    45  	_, _, err := joiner.New(context.Background(), store, store, swarm.ZeroAddress)
    46  
    47  	if !errors.Is(err, storage.ErrReferenceLength) {
    48  		t.Fatalf("expected ErrReferenceLength %x but got %v", swarm.ZeroAddress, err)
    49  	}
    50  }
    51  
    52  // TestJoinerSingleChunk verifies that a newly created joiner returns the data stored
    53  // in the store when the reference is one single chunk.
    54  func TestJoinerSingleChunk(t *testing.T) {
    55  	t.Parallel()
    56  
    57  	store := inmemchunkstore.New()
    58  
    59  	ctx, cancel := context.WithCancel(context.Background())
    60  	defer cancel()
    61  
    62  	// create the chunk to
    63  	mockAddrHex := fmt.Sprintf("%064s", "2a")
    64  	mockAddr := swarm.MustParseHexAddress(mockAddrHex)
    65  	mockData := []byte("foo")
    66  	mockDataLengthBytes := make([]byte, 8)
    67  	mockDataLengthBytes[0] = 0x03
    68  	mockChunk := swarm.NewChunk(mockAddr, append(mockDataLengthBytes, mockData...))
    69  	err := store.Put(ctx, mockChunk)
    70  	if err != nil {
    71  		t.Fatal(err)
    72  	}
    73  
    74  	// read back data and compare
    75  	joinReader, l, err := joiner.New(ctx, store, store, mockAddr)
    76  	if err != nil {
    77  		t.Fatal(err)
    78  	}
    79  	if l != int64(len(mockData)) {
    80  		t.Fatalf("expected join data length %d, got %d", len(mockData), l)
    81  	}
    82  	joinData, err := io.ReadAll(joinReader)
    83  	if err != nil {
    84  		t.Fatal(err)
    85  	}
    86  	if !bytes.Equal(joinData, mockData) {
    87  		t.Fatalf("retrieved data '%x' not like original data '%x'", joinData, mockData)
    88  	}
    89  }
    90  
    91  // TestJoinerDecryptingStore_NormalChunk verifies the mock store that uses
    92  // the decrypting store manages to retrieve a normal chunk which is not encrypted
    93  func TestJoinerDecryptingStore_NormalChunk(t *testing.T) {
    94  	t.Parallel()
    95  
    96  	st := inmemchunkstore.New()
    97  
    98  	ctx, cancel := context.WithCancel(context.Background())
    99  	defer cancel()
   100  
   101  	// create the chunk to
   102  	mockAddrHex := fmt.Sprintf("%064s", "2a")
   103  	mockAddr := swarm.MustParseHexAddress(mockAddrHex)
   104  	mockData := []byte("foo")
   105  	mockDataLengthBytes := make([]byte, 8)
   106  	mockDataLengthBytes[0] = 0x03
   107  	mockChunk := swarm.NewChunk(mockAddr, append(mockDataLengthBytes, mockData...))
   108  	err := st.Put(ctx, mockChunk)
   109  	if err != nil {
   110  		t.Fatal(err)
   111  	}
   112  
   113  	// read back data and compare
   114  	joinReader, l, err := joiner.New(ctx, st, st, mockAddr)
   115  	if err != nil {
   116  		t.Fatal(err)
   117  	}
   118  	if l != int64(len(mockData)) {
   119  		t.Fatalf("expected join data length %d, got %d", len(mockData), l)
   120  	}
   121  	joinData, err := io.ReadAll(joinReader)
   122  	if err != nil {
   123  		t.Fatal(err)
   124  	}
   125  	if !bytes.Equal(joinData, mockData) {
   126  		t.Fatalf("retrieved data '%x' not like original data '%x'", joinData, mockData)
   127  	}
   128  }
   129  
   130  // TestJoinerWithReference verifies that a chunk reference is correctly resolved
   131  // and the underlying data is returned.
   132  func TestJoinerWithReference(t *testing.T) {
   133  	t.Parallel()
   134  
   135  	st := inmemchunkstore.New()
   136  
   137  	ctx, cancel := context.WithCancel(context.Background())
   138  	defer cancel()
   139  
   140  	// create root chunk and two data chunks referenced in the root chunk
   141  	rootChunk := filetest.GenerateTestRandomFileChunk(swarm.ZeroAddress, swarm.ChunkSize*2, swarm.SectionSize*2)
   142  	err := st.Put(ctx, rootChunk)
   143  	if err != nil {
   144  		t.Fatal(err)
   145  	}
   146  
   147  	firstAddress := swarm.NewAddress(rootChunk.Data()[8 : swarm.SectionSize+8])
   148  	firstChunk := filetest.GenerateTestRandomFileChunk(firstAddress, swarm.ChunkSize, swarm.ChunkSize)
   149  	err = st.Put(ctx, firstChunk)
   150  	if err != nil {
   151  		t.Fatal(err)
   152  	}
   153  
   154  	secondAddress := swarm.NewAddress(rootChunk.Data()[swarm.SectionSize+8:])
   155  	secondChunk := filetest.GenerateTestRandomFileChunk(secondAddress, swarm.ChunkSize, swarm.ChunkSize)
   156  	err = st.Put(ctx, secondChunk)
   157  	if err != nil {
   158  		t.Fatal(err)
   159  	}
   160  
   161  	// read back data and compare
   162  	joinReader, l, err := joiner.New(ctx, st, st, rootChunk.Address())
   163  	if err != nil {
   164  		t.Fatal(err)
   165  	}
   166  	if l != int64(swarm.ChunkSize*2) {
   167  		t.Fatalf("expected join data length %d, got %d", swarm.ChunkSize*2, l)
   168  	}
   169  
   170  	resultBuffer := make([]byte, swarm.ChunkSize)
   171  	n, err := joinReader.Read(resultBuffer)
   172  	if err != nil {
   173  		t.Fatal(err)
   174  	}
   175  	if n != len(resultBuffer) {
   176  		t.Fatalf("expected read count %d, got %d", len(resultBuffer), n)
   177  	}
   178  	if !bytes.Equal(resultBuffer, firstChunk.Data()[8:]) {
   179  		t.Fatalf("expected resultbuffer %v, got %v", resultBuffer, firstChunk.Data()[:len(resultBuffer)])
   180  	}
   181  }
   182  
   183  func TestJoinerMalformed(t *testing.T) {
   184  	t.Parallel()
   185  
   186  	store := inmemchunkstore.New()
   187  
   188  	ctx, cancel := context.WithCancel(context.Background())
   189  	defer cancel()
   190  
   191  	subTrie := []byte{8085: 1}
   192  	pb := builder.NewPipelineBuilder(ctx, store, false, 0)
   193  	c1addr, _ := builder.FeedPipeline(ctx, pb, bytes.NewReader(subTrie))
   194  
   195  	chunk2 := testingc.GenerateTestRandomChunk()
   196  	err := store.Put(ctx, chunk2)
   197  	if err != nil {
   198  		t.Fatal(err)
   199  	}
   200  
   201  	// root chunk
   202  	rootChunkData := make([]byte, 8+64)
   203  	binary.LittleEndian.PutUint64(rootChunkData[:8], uint64(swarm.ChunkSize*2))
   204  
   205  	copy(rootChunkData[8:], c1addr.Bytes())
   206  	copy(rootChunkData[8+32:], chunk2.Address().Bytes())
   207  
   208  	rootChunk, err := cac.NewWithDataSpan(rootChunkData)
   209  	if err != nil {
   210  		t.Fatal(err)
   211  	}
   212  
   213  	err = store.Put(ctx, rootChunk)
   214  	if err != nil {
   215  		t.Fatal(err)
   216  	}
   217  
   218  	joinReader, _, err := joiner.New(ctx, store, store, rootChunk.Address())
   219  	if err != nil {
   220  		t.Fatal(err)
   221  	}
   222  
   223  	resultBuffer := make([]byte, swarm.ChunkSize)
   224  	_, err = joinReader.Read(resultBuffer)
   225  	if !errors.Is(err, joiner.ErrMalformedTrie) {
   226  		t.Fatalf("expected %v, got %v", joiner.ErrMalformedTrie, err)
   227  	}
   228  }
   229  
   230  func TestEncryptDecrypt(t *testing.T) {
   231  	t.Parallel()
   232  
   233  	var tests = []struct {
   234  		chunkLength int
   235  	}{
   236  		{10},
   237  		{100},
   238  		{1000},
   239  		{4095},
   240  		{4096},
   241  		{4097},
   242  		{1000000},
   243  	}
   244  
   245  	for _, tt := range tests {
   246  		tt := tt
   247  		t.Run(fmt.Sprintf("Encrypt %d bytes", tt.chunkLength), func(t *testing.T) {
   248  			t.Parallel()
   249  
   250  			store := inmemchunkstore.New()
   251  
   252  			g := mockbytes.New(0, mockbytes.MockTypeStandard).WithModulus(255)
   253  			testData, err := g.SequentialBytes(tt.chunkLength)
   254  			if err != nil {
   255  				t.Fatal(err)
   256  			}
   257  			ctx, cancel := context.WithCancel(context.Background())
   258  			defer cancel()
   259  			pipe := builder.NewPipelineBuilder(ctx, store, true, 0)
   260  			testDataReader := bytes.NewReader(testData)
   261  			resultAddress, err := builder.FeedPipeline(ctx, pipe, testDataReader)
   262  			if err != nil {
   263  				t.Fatal(err)
   264  			}
   265  			reader, l, err := joiner.New(context.Background(), store, store, resultAddress)
   266  			if err != nil {
   267  				t.Fatal(err)
   268  			}
   269  
   270  			if l != int64(len(testData)) {
   271  				t.Fatalf("expected join data length %d, got %d", len(testData), l)
   272  			}
   273  
   274  			totalGot := make([]byte, tt.chunkLength)
   275  			index := 0
   276  			resultBuffer := make([]byte, swarm.ChunkSize)
   277  
   278  			for index < tt.chunkLength {
   279  				n, err := reader.Read(resultBuffer)
   280  				if err != nil && !errors.Is(err, io.EOF) {
   281  					t.Fatal(err)
   282  				}
   283  				copy(totalGot[index:], resultBuffer[:n])
   284  				index += n
   285  			}
   286  
   287  			if !bytes.Equal(testData, totalGot) {
   288  				t.Fatal("input data and output data does not match")
   289  			}
   290  		})
   291  	}
   292  }
   293  
   294  func TestSeek(t *testing.T) {
   295  	t.Parallel()
   296  
   297  	seed := time.Now().UnixNano()
   298  
   299  	for _, tc := range []struct {
   300  		name string
   301  		size int64
   302  	}{
   303  		{
   304  			name: "one byte",
   305  			size: 1,
   306  		},
   307  		{
   308  			name: "a few bytes",
   309  			size: 10,
   310  		},
   311  		{
   312  			name: "a few bytes more",
   313  			size: 65,
   314  		},
   315  		{
   316  			name: "almost a chunk",
   317  			size: 4095,
   318  		},
   319  		{
   320  			name: "one chunk",
   321  			size: swarm.ChunkSize,
   322  		},
   323  		{
   324  			name: "a few chunks",
   325  			size: 10 * swarm.ChunkSize,
   326  		},
   327  		{
   328  			name: "a few chunks and a change",
   329  			size: 10*swarm.ChunkSize + 84,
   330  		},
   331  		{
   332  			name: "a few chunks more",
   333  			size: 2*swarm.ChunkSize*swarm.ChunkSize + 1000,
   334  		},
   335  	} {
   336  		tc := tc
   337  		t.Run(tc.name, func(t *testing.T) {
   338  			t.Parallel()
   339  
   340  			ctx, cancel := context.WithCancel(context.Background())
   341  			defer cancel()
   342  
   343  			store := inmemchunkstore.New()
   344  			testutil.CleanupCloser(t, store)
   345  
   346  			data := testutil.RandBytesWithSeed(t, int(tc.size), seed)
   347  			s := splitter.NewSimpleSplitter(store)
   348  			addr, err := s.Split(ctx, io.NopCloser(bytes.NewReader(data)), tc.size, false)
   349  			if err != nil {
   350  				t.Fatal(err)
   351  			}
   352  
   353  			j, _, err := joiner.New(ctx, store, store, addr)
   354  			if err != nil {
   355  				t.Fatal(err)
   356  			}
   357  
   358  			validateRead := func(t *testing.T, name string, i int) {
   359  				t.Helper()
   360  
   361  				got := make([]byte, swarm.ChunkSize)
   362  				count, err := j.Read(got)
   363  				if err != nil {
   364  					t.Fatal(err)
   365  				}
   366  				if count == 0 {
   367  					t.Errorf("read with seek from %s to %v: got count 0", name, i)
   368  				}
   369  				got = got[:count]
   370  				want := data[i : i+count]
   371  				if !bytes.Equal(got, want) {
   372  					t.Fatal("data mismatch")
   373  				}
   374  			}
   375  
   376  			// seek to 10 random locations
   377  			for i := int64(0); i < 10 && i < tc.size; i++ {
   378  				exp := mrand.Int63n(tc.size)
   379  				n, err := j.Seek(exp, io.SeekStart)
   380  				if err != nil {
   381  					t.Fatal(err)
   382  				}
   383  				if n != exp {
   384  					t.Errorf("seek to %v from start, want %v", n, exp)
   385  				}
   386  
   387  				validateRead(t, "start", int(n))
   388  			}
   389  			if _, err := j.Seek(0, io.SeekStart); err != nil {
   390  				t.Fatal(err)
   391  			}
   392  
   393  			// seek to all possible locations from current position
   394  			for i := int64(1); i < 10 && i < tc.size; i++ {
   395  				exp := mrand.Int63n(tc.size)
   396  				n, err := j.Seek(exp, io.SeekCurrent)
   397  				if err != nil {
   398  					t.Fatal(err)
   399  				}
   400  				if n != exp {
   401  					t.Errorf("seek to %v from current, want %v", n, exp)
   402  				}
   403  
   404  				validateRead(t, "current", int(n))
   405  				if _, err := j.Seek(0, io.SeekStart); err != nil {
   406  					t.Fatal(err)
   407  				}
   408  
   409  			}
   410  			if _, err := j.Seek(0, io.SeekStart); err != nil {
   411  				t.Fatal(err)
   412  			}
   413  
   414  			// seek to 10 random locations from end
   415  			for i := int64(1); i < 10; i++ {
   416  				exp := mrand.Int63n(tc.size)
   417  				if exp == 0 {
   418  					exp = 1
   419  				}
   420  				n, err := j.Seek(exp, io.SeekEnd)
   421  				if err != nil {
   422  					t.Fatalf("seek from end, exp %d size %d error: %v", exp, tc.size, err)
   423  				}
   424  				want := tc.size - exp
   425  				if n != want {
   426  					t.Errorf("seek to %v from end, want %v, size %v, exp %v", n, want, tc.size, exp)
   427  				}
   428  
   429  				validateRead(t, "end", int(n))
   430  			}
   431  			if _, err := j.Seek(0, io.SeekStart); err != nil {
   432  				t.Fatal(err)
   433  			}
   434  			// seek overflow for a few bytes
   435  			for i := int64(1); i < 5; i++ {
   436  				n, err := j.Seek(tc.size+i, io.SeekStart)
   437  				if !errors.Is(err, io.EOF) {
   438  					t.Errorf("seek overflow to %v: got error %v, want %v", i, err, io.EOF)
   439  				}
   440  
   441  				if n != 0 {
   442  					t.Errorf("seek overflow to %v: got %v, want 0", i, n)
   443  				}
   444  			}
   445  		})
   446  	}
   447  }
   448  
   449  // TestPrefetch tests that prefetching chunks is made to fill up the read buffer
   450  func TestPrefetch(t *testing.T) {
   451  	t.Parallel()
   452  
   453  	seed := time.Now().UnixNano()
   454  
   455  	for _, tc := range []struct {
   456  		name       string
   457  		size       int64
   458  		bufferSize int
   459  		readOffset int64
   460  		expRead    int
   461  	}{
   462  		{
   463  			name:       "one byte",
   464  			size:       1,
   465  			bufferSize: 1,
   466  			readOffset: 0,
   467  			expRead:    1,
   468  		},
   469  		{
   470  			name:       "one byte",
   471  			size:       1,
   472  			bufferSize: 10,
   473  			readOffset: 0,
   474  			expRead:    1,
   475  		},
   476  		{
   477  			name:       "ten bytes",
   478  			size:       10,
   479  			bufferSize: 5,
   480  			readOffset: 0,
   481  			expRead:    5,
   482  		},
   483  		{
   484  			name:       "thousand bytes",
   485  			size:       1000,
   486  			bufferSize: 100,
   487  			readOffset: 0,
   488  			expRead:    100,
   489  		},
   490  		{
   491  			name:       "thousand bytes",
   492  			size:       1000,
   493  			bufferSize: 100,
   494  			readOffset: 900,
   495  			expRead:    100,
   496  		},
   497  		{
   498  			name:       "thousand bytes",
   499  			size:       1000,
   500  			bufferSize: 100,
   501  			readOffset: 800,
   502  			expRead:    100,
   503  		},
   504  		{
   505  			name:       "one chunk",
   506  			size:       4096,
   507  			bufferSize: 4096,
   508  			readOffset: 0,
   509  			expRead:    4096,
   510  		},
   511  		{
   512  			name:       "one chunk minus a few",
   513  			size:       4096,
   514  			bufferSize: 4093,
   515  			readOffset: 0,
   516  			expRead:    4093,
   517  		},
   518  		{
   519  			name:       "one chunk minus a few",
   520  			size:       4096,
   521  			bufferSize: 4093,
   522  			readOffset: 3,
   523  			expRead:    4093,
   524  		},
   525  		{
   526  			name:       "one byte at the end",
   527  			size:       4096,
   528  			bufferSize: 1,
   529  			readOffset: 4095,
   530  			expRead:    1,
   531  		},
   532  		{
   533  			name:       "one byte at the end",
   534  			size:       8192,
   535  			bufferSize: 1,
   536  			readOffset: 8191,
   537  			expRead:    1,
   538  		},
   539  		{
   540  			name:       "one byte at the end",
   541  			size:       8192,
   542  			bufferSize: 1,
   543  			readOffset: 8190,
   544  			expRead:    1,
   545  		},
   546  		{
   547  			name:       "one byte at the end",
   548  			size:       100000,
   549  			bufferSize: 1,
   550  			readOffset: 99999,
   551  			expRead:    1,
   552  		},
   553  		{
   554  			name:       "10kb",
   555  			size:       10000,
   556  			bufferSize: 5,
   557  			readOffset: 5,
   558  			expRead:    5,
   559  		},
   560  
   561  		{
   562  			name:       "10kb",
   563  			size:       10000,
   564  			bufferSize: 1500,
   565  			readOffset: 5,
   566  			expRead:    1500,
   567  		},
   568  
   569  		{
   570  			name:       "100kb",
   571  			size:       100000,
   572  			bufferSize: 8000,
   573  			readOffset: 100,
   574  			expRead:    8000,
   575  		},
   576  
   577  		{
   578  			name:       "100kb",
   579  			size:       100000,
   580  			bufferSize: 80000,
   581  			readOffset: 100,
   582  			expRead:    80000,
   583  		},
   584  
   585  		{
   586  			name:       "10megs",
   587  			size:       10000000,
   588  			bufferSize: 8000,
   589  			readOffset: 990000,
   590  			expRead:    8000,
   591  		},
   592  		{
   593  			name:       "10megs",
   594  			size:       10000000,
   595  			bufferSize: 80000,
   596  			readOffset: 900000,
   597  			expRead:    80000,
   598  		},
   599  		{
   600  			name:       "10megs",
   601  			size:       10000000,
   602  			bufferSize: 8000000,
   603  			readOffset: 900000,
   604  			expRead:    8000000,
   605  		},
   606  		{
   607  			name:       "10megs",
   608  			size:       1000000,
   609  			bufferSize: 2000000,
   610  			readOffset: 900000,
   611  			expRead:    100000,
   612  		},
   613  	} {
   614  		tc := tc
   615  		t.Run(tc.name, func(t *testing.T) {
   616  			t.Parallel()
   617  
   618  			ctx, cancel := context.WithCancel(context.Background())
   619  			defer cancel()
   620  
   621  			store := inmemchunkstore.New()
   622  			testutil.CleanupCloser(t, store)
   623  
   624  			data := testutil.RandBytesWithSeed(t, int(tc.size), seed)
   625  			s := splitter.NewSimpleSplitter(store)
   626  			addr, err := s.Split(ctx, io.NopCloser(bytes.NewReader(data)), tc.size, false)
   627  			if err != nil {
   628  				t.Fatal(err)
   629  			}
   630  
   631  			j, _, err := joiner.New(ctx, store, store, addr)
   632  			if err != nil {
   633  				t.Fatal(err)
   634  			}
   635  			b := make([]byte, tc.bufferSize)
   636  			n, err := j.ReadAt(b, tc.readOffset)
   637  			if err != nil {
   638  				t.Fatal(err)
   639  			}
   640  			if n != tc.expRead {
   641  				t.Errorf("read %d bytes out of %d", n, tc.expRead)
   642  			}
   643  			ro := int(tc.readOffset)
   644  			if !bytes.Equal(b[:n], data[ro:ro+n]) {
   645  				t.Error("buffer does not match generated data")
   646  			}
   647  		})
   648  	}
   649  }
   650  
   651  func TestJoinerReadAt(t *testing.T) {
   652  	t.Parallel()
   653  
   654  	store := inmemchunkstore.New()
   655  
   656  	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
   657  	defer cancel()
   658  
   659  	// create root chunk with 2 references and the referenced data chunks
   660  	rootChunk := filetest.GenerateTestRandomFileChunk(swarm.ZeroAddress, swarm.ChunkSize*2, swarm.SectionSize*2)
   661  	err := store.Put(ctx, rootChunk)
   662  	if err != nil {
   663  		t.Fatal(err)
   664  	}
   665  
   666  	firstAddress := swarm.NewAddress(rootChunk.Data()[8 : swarm.SectionSize+8])
   667  	firstChunk := filetest.GenerateTestRandomFileChunk(firstAddress, swarm.ChunkSize, swarm.ChunkSize)
   668  	err = store.Put(ctx, firstChunk)
   669  	if err != nil {
   670  		t.Fatal(err)
   671  	}
   672  
   673  	secondAddress := swarm.NewAddress(rootChunk.Data()[swarm.SectionSize+8:])
   674  	secondChunk := filetest.GenerateTestRandomFileChunk(secondAddress, swarm.ChunkSize, swarm.ChunkSize)
   675  	err = store.Put(ctx, secondChunk)
   676  	if err != nil {
   677  		t.Fatal(err)
   678  	}
   679  
   680  	j, _, err := joiner.New(ctx, store, store, rootChunk.Address())
   681  	if err != nil {
   682  		t.Fatal(err)
   683  	}
   684  
   685  	b := make([]byte, swarm.ChunkSize)
   686  	_, err = j.ReadAt(b, swarm.ChunkSize)
   687  	if err != nil {
   688  		t.Fatal(err)
   689  	}
   690  
   691  	if !bytes.Equal(b, secondChunk.Data()[8:]) {
   692  		t.Fatal("data read at offset not equal to expected chunk")
   693  	}
   694  }
   695  
   696  // TestJoinerOneLevel tests the retrieval of two data chunks immediately
   697  // below the root chunk level.
   698  func TestJoinerOneLevel(t *testing.T) {
   699  	t.Parallel()
   700  
   701  	store := inmemchunkstore.New()
   702  
   703  	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
   704  	defer cancel()
   705  
   706  	// create root chunk with 2 references and the referenced data chunks
   707  	rootChunk := filetest.GenerateTestRandomFileChunk(swarm.ZeroAddress, swarm.ChunkSize*2, swarm.SectionSize*2)
   708  	err := store.Put(ctx, rootChunk)
   709  	if err != nil {
   710  		t.Fatal(err)
   711  	}
   712  
   713  	firstAddress := swarm.NewAddress(rootChunk.Data()[8 : swarm.SectionSize+8])
   714  	firstChunk := filetest.GenerateTestRandomFileChunk(firstAddress, swarm.ChunkSize, swarm.ChunkSize)
   715  	err = store.Put(ctx, firstChunk)
   716  	if err != nil {
   717  		t.Fatal(err)
   718  	}
   719  
   720  	secondAddress := swarm.NewAddress(rootChunk.Data()[swarm.SectionSize+8:])
   721  	secondChunk := filetest.GenerateTestRandomFileChunk(secondAddress, swarm.ChunkSize, swarm.ChunkSize)
   722  	err = store.Put(ctx, secondChunk)
   723  	if err != nil {
   724  		t.Fatal(err)
   725  	}
   726  
   727  	j, _, err := joiner.New(ctx, store, store, rootChunk.Address())
   728  	if err != nil {
   729  		t.Fatal(err)
   730  	}
   731  
   732  	// verify first chunk content
   733  	outBuffer := make([]byte, swarm.ChunkSize)
   734  	c, err := j.Read(outBuffer)
   735  	if err != nil {
   736  		t.Fatal(err)
   737  	}
   738  	if c != swarm.ChunkSize {
   739  		t.Fatalf("expected firstchunk read count %d, got %d", swarm.ChunkSize, c)
   740  	}
   741  	if !bytes.Equal(outBuffer, firstChunk.Data()[8:]) {
   742  		t.Fatalf("firstchunk data mismatch, expected %x, got %x", outBuffer, firstChunk.Data()[8:])
   743  	}
   744  
   745  	// verify second chunk content
   746  	c, err = j.Read(outBuffer)
   747  	if err != nil {
   748  		t.Fatal(err)
   749  	}
   750  	if c != swarm.ChunkSize {
   751  		t.Fatalf("expected secondchunk read count %d, got %d", swarm.ChunkSize, c)
   752  	}
   753  	if !bytes.Equal(outBuffer, secondChunk.Data()[8:]) {
   754  		t.Fatalf("secondchunk data mismatch, expected %x, got %x", outBuffer, secondChunk.Data()[8:])
   755  	}
   756  
   757  	// verify EOF is returned also after first time it is returned
   758  	_, err = j.Read(outBuffer)
   759  	if !errors.Is(err, io.EOF) {
   760  		t.Fatal("expected io.EOF")
   761  	}
   762  
   763  	_, err = j.Read(outBuffer)
   764  	if !errors.Is(err, io.EOF) {
   765  		t.Fatal("expected io.EOF")
   766  	}
   767  }
   768  
   769  // TestJoinerTwoLevelsAcrossChunk tests the retrieval of data chunks below
   770  // first intermediate level across two intermediate chunks.
   771  // Last chunk has sub-chunk length.
   772  func TestJoinerTwoLevelsAcrossChunk(t *testing.T) {
   773  	t.Parallel()
   774  
   775  	store := inmemchunkstore.New()
   776  
   777  	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
   778  	defer cancel()
   779  
   780  	// create root chunk with 2 references and two intermediate chunks with references
   781  	rootChunk := filetest.GenerateTestRandomFileChunk(swarm.ZeroAddress, swarm.ChunkSize*swarm.Branches+42, swarm.SectionSize*2)
   782  	err := store.Put(ctx, rootChunk)
   783  	if err != nil {
   784  		t.Fatal(err)
   785  	}
   786  
   787  	firstAddress := swarm.NewAddress(rootChunk.Data()[8 : swarm.SectionSize+8])
   788  	firstChunk := filetest.GenerateTestRandomFileChunk(firstAddress, swarm.ChunkSize*swarm.Branches, swarm.ChunkSize)
   789  	err = store.Put(ctx, firstChunk)
   790  	if err != nil {
   791  		t.Fatal(err)
   792  	}
   793  
   794  	secondAddress := swarm.NewAddress(rootChunk.Data()[swarm.SectionSize+8:])
   795  	secondChunk := filetest.GenerateTestRandomFileChunk(secondAddress, 42, swarm.SectionSize)
   796  	err = store.Put(ctx, secondChunk)
   797  	if err != nil {
   798  		t.Fatal(err)
   799  	}
   800  
   801  	// create 128+1 chunks for all references in the intermediate chunks
   802  	cursor := 8
   803  	for i := 0; i < swarm.Branches; i++ {
   804  		chunkAddressBytes := firstChunk.Data()[cursor : cursor+swarm.SectionSize]
   805  		chunkAddress := swarm.NewAddress(chunkAddressBytes)
   806  		ch := filetest.GenerateTestRandomFileChunk(chunkAddress, swarm.ChunkSize, swarm.ChunkSize)
   807  		err := store.Put(ctx, ch)
   808  		if err != nil {
   809  			t.Fatal(err)
   810  		}
   811  		cursor += swarm.SectionSize
   812  	}
   813  	chunkAddressBytes := secondChunk.Data()[8:]
   814  	chunkAddress := swarm.NewAddress(chunkAddressBytes)
   815  	ch := filetest.GenerateTestRandomFileChunk(chunkAddress, 42, 42)
   816  	err = store.Put(ctx, ch)
   817  	if err != nil {
   818  		t.Fatal(err)
   819  	}
   820  
   821  	j, _, err := joiner.New(ctx, store, store, rootChunk.Address())
   822  	if err != nil {
   823  		t.Fatal(err)
   824  	}
   825  
   826  	// read back all the chunks and verify
   827  	b := make([]byte, swarm.ChunkSize)
   828  	for i := 0; i < swarm.Branches; i++ {
   829  		c, err := j.Read(b)
   830  		if err != nil {
   831  			t.Fatal(err)
   832  		}
   833  		if c != swarm.ChunkSize {
   834  			t.Fatalf("chunk %d expected read %d bytes; got %d", i, swarm.ChunkSize, c)
   835  		}
   836  	}
   837  	c, err := j.Read(b)
   838  	if err != nil {
   839  		t.Fatal(err)
   840  	}
   841  	if c != 42 {
   842  		t.Fatalf("last chunk expected read %d bytes; got %d", 42, c)
   843  	}
   844  }
   845  
   846  func TestJoinerIterateChunkAddresses(t *testing.T) {
   847  	t.Parallel()
   848  
   849  	store := inmemchunkstore.New()
   850  
   851  	ctx, cancel := context.WithTimeout(context.Background(), time.Second)
   852  	defer cancel()
   853  
   854  	// create root chunk with 2 references and the referenced data chunks
   855  	rootChunk := filetest.GenerateTestRandomFileChunk(swarm.ZeroAddress, swarm.ChunkSize*2, swarm.SectionSize*2)
   856  	err := store.Put(ctx, rootChunk)
   857  	if err != nil {
   858  		t.Fatal(err)
   859  	}
   860  
   861  	firstAddress := swarm.NewAddress(rootChunk.Data()[8 : swarm.SectionSize+8])
   862  	firstChunk := filetest.GenerateTestRandomFileChunk(firstAddress, swarm.ChunkSize, swarm.ChunkSize)
   863  	err = store.Put(ctx, firstChunk)
   864  	if err != nil {
   865  		t.Fatal(err)
   866  	}
   867  
   868  	secondAddress := swarm.NewAddress(rootChunk.Data()[swarm.SectionSize+8:])
   869  	secondChunk := filetest.GenerateTestRandomFileChunk(secondAddress, swarm.ChunkSize, swarm.ChunkSize)
   870  	err = store.Put(ctx, secondChunk)
   871  	if err != nil {
   872  		t.Fatal(err)
   873  	}
   874  
   875  	createdAddresses := []swarm.Address{rootChunk.Address(), firstAddress, secondAddress}
   876  
   877  	j, _, err := joiner.New(ctx, store, store, rootChunk.Address())
   878  	if err != nil {
   879  		t.Fatal(err)
   880  	}
   881  
   882  	foundAddresses := make(map[string]struct{})
   883  	var foundAddressesMu sync.Mutex
   884  
   885  	err = j.IterateChunkAddresses(func(addr swarm.Address) error {
   886  		foundAddressesMu.Lock()
   887  		defer foundAddressesMu.Unlock()
   888  
   889  		foundAddresses[addr.String()] = struct{}{}
   890  		return nil
   891  	})
   892  	if err != nil {
   893  		t.Fatal(err)
   894  	}
   895  
   896  	if len(createdAddresses) != len(foundAddresses) {
   897  		t.Fatalf("expected to find %d addresses, got %d", len(createdAddresses), len(foundAddresses))
   898  	}
   899  
   900  	checkAddressFound := func(t *testing.T, foundAddresses map[string]struct{}, address swarm.Address) {
   901  		t.Helper()
   902  
   903  		if _, ok := foundAddresses[address.String()]; !ok {
   904  			t.Fatalf("expected address %s not found", address.String())
   905  		}
   906  	}
   907  
   908  	for _, createdAddress := range createdAddresses {
   909  		checkAddressFound(t, foundAddresses, createdAddress)
   910  	}
   911  }
   912  
   913  func TestJoinerIterateChunkAddresses_Encrypted(t *testing.T) {
   914  	t.Parallel()
   915  
   916  	store := inmemchunkstore.New()
   917  
   918  	g := mockbytes.New(0, mockbytes.MockTypeStandard).WithModulus(255)
   919  	testData, err := g.SequentialBytes(10000)
   920  	if err != nil {
   921  		t.Fatal(err)
   922  	}
   923  	ctx, cancel := context.WithCancel(context.Background())
   924  	defer cancel()
   925  	pipe := builder.NewPipelineBuilder(ctx, store, true, 0)
   926  	testDataReader := bytes.NewReader(testData)
   927  	resultAddress, err := builder.FeedPipeline(ctx, pipe, testDataReader)
   928  	if err != nil {
   929  		t.Fatal(err)
   930  	}
   931  	j, l, err := joiner.New(context.Background(), store, store, resultAddress)
   932  	if err != nil {
   933  		t.Fatal(err)
   934  	}
   935  
   936  	if l != int64(len(testData)) {
   937  		t.Fatalf("expected join data length %d, got %d", len(testData), l)
   938  	}
   939  
   940  	foundAddresses := make(map[string]struct{})
   941  	var foundAddressesMu sync.Mutex
   942  
   943  	err = j.IterateChunkAddresses(func(addr swarm.Address) error {
   944  		foundAddressesMu.Lock()
   945  		defer foundAddressesMu.Unlock()
   946  
   947  		foundAddresses[addr.String()] = struct{}{}
   948  		return nil
   949  	})
   950  	if err != nil {
   951  		t.Fatal(err)
   952  	}
   953  
   954  	if l := len(foundAddresses); l != 4 {
   955  		t.Fatalf("got %d addresses, want 4", l)
   956  	}
   957  
   958  	for v := range foundAddresses {
   959  		// this is 64 because 32 bytes hex is 64 chars
   960  		if len(v) != 64 {
   961  			t.Fatalf("got wrong ref size %d, %s", len(v), v)
   962  		}
   963  	}
   964  }
   965  
   966  type mockPutter struct {
   967  	storage.ChunkStore
   968  	shards, parities chan swarm.Chunk
   969  	done             chan struct{}
   970  	mu               sync.Mutex
   971  }
   972  
   973  func newMockPutter(store storage.ChunkStore, shardCnt, parityCnt int) *mockPutter {
   974  	return &mockPutter{
   975  		ChunkStore: store,
   976  		done:       make(chan struct{}, 1),
   977  		shards:     make(chan swarm.Chunk, shardCnt),
   978  		parities:   make(chan swarm.Chunk, parityCnt),
   979  	}
   980  }
   981  
   982  func (m *mockPutter) Put(ctx context.Context, ch swarm.Chunk) error {
   983  	m.mu.Lock()
   984  	defer m.mu.Unlock()
   985  	if len(m.shards) < cap(m.shards) {
   986  		m.shards <- ch
   987  		return nil
   988  	}
   989  	if len(m.parities) < cap(m.parities) {
   990  		m.parities <- ch
   991  		return nil
   992  	}
   993  	err := m.ChunkStore.Put(ctx, ch) // use passed context
   994  	select {
   995  	case m.done <- struct{}{}:
   996  	default:
   997  	}
   998  	return err
   999  }
  1000  
  1001  func (m *mockPutter) wait(ctx context.Context) {
  1002  	select {
  1003  	case <-m.done:
  1004  	case <-ctx.Done():
  1005  	}
  1006  	m.mu.Lock()
  1007  	close(m.parities)
  1008  	close(m.shards)
  1009  	m.mu.Unlock()
  1010  }
  1011  
  1012  func (m *mockPutter) store(cnt int) error {
  1013  	n := 0
  1014  	m.mu.Lock()
  1015  	defer m.mu.Unlock()
  1016  	for ch := range m.parities {
  1017  		if err := m.ChunkStore.Put(context.Background(), ch); err != nil {
  1018  			return err
  1019  		}
  1020  		n++
  1021  		if n == cnt {
  1022  			return nil
  1023  		}
  1024  	}
  1025  	for ch := range m.shards {
  1026  		if err := m.ChunkStore.Put(context.Background(), ch); err != nil {
  1027  			return err
  1028  		}
  1029  		n++
  1030  		if n == cnt {
  1031  			break
  1032  		}
  1033  	}
  1034  	return nil
  1035  }
  1036  
  1037  // nolint:thelper
  1038  func TestJoinerRedundancy(t *testing.T) {
  1039  	t.Parallel()
  1040  	for _, tc := range []struct {
  1041  		rLevel       redundancy.Level
  1042  		encryptChunk bool
  1043  	}{
  1044  		{
  1045  			redundancy.MEDIUM,
  1046  			false,
  1047  		},
  1048  		{
  1049  			redundancy.MEDIUM,
  1050  			true,
  1051  		},
  1052  		{
  1053  			redundancy.STRONG,
  1054  			false,
  1055  		},
  1056  		{
  1057  			redundancy.STRONG,
  1058  			true,
  1059  		},
  1060  		{
  1061  			redundancy.INSANE,
  1062  			false,
  1063  		},
  1064  		{
  1065  			redundancy.INSANE,
  1066  			true,
  1067  		},
  1068  		{
  1069  			redundancy.PARANOID,
  1070  			false,
  1071  		},
  1072  		{
  1073  			redundancy.PARANOID,
  1074  			true,
  1075  		},
  1076  	} {
  1077  		tc := tc
  1078  		t.Run(fmt.Sprintf("redundancy=%d encryption=%t", tc.rLevel, tc.encryptChunk), func(t *testing.T) {
  1079  			ctx, cancel := context.WithCancel(context.Background())
  1080  			defer cancel()
  1081  			shardCnt := tc.rLevel.GetMaxShards()
  1082  			parityCnt := tc.rLevel.GetParities(shardCnt)
  1083  			if tc.encryptChunk {
  1084  				shardCnt = tc.rLevel.GetMaxEncShards()
  1085  				parityCnt = tc.rLevel.GetEncParities(shardCnt)
  1086  			}
  1087  			store := inmemchunkstore.New()
  1088  			putter := newMockPutter(store, shardCnt, parityCnt)
  1089  			pipe := builder.NewPipelineBuilder(ctx, putter, tc.encryptChunk, tc.rLevel)
  1090  			dataChunks := make([]swarm.Chunk, shardCnt)
  1091  			chunkSize := swarm.ChunkSize
  1092  			size := chunkSize
  1093  			for i := 0; i < shardCnt; i++ {
  1094  				if i == shardCnt-1 {
  1095  					size = 5
  1096  				}
  1097  				chunkData := make([]byte, size)
  1098  				_, err := io.ReadFull(rand.Reader, chunkData)
  1099  				if err != nil {
  1100  					t.Fatal(err)
  1101  				}
  1102  				dataChunks[i], err = cac.New(chunkData)
  1103  				if err != nil {
  1104  					t.Fatal(err)
  1105  				}
  1106  				_, err = pipe.Write(chunkData)
  1107  				if err != nil {
  1108  					t.Fatal(err)
  1109  				}
  1110  			}
  1111  
  1112  			// reader init
  1113  			sum, err := pipe.Sum()
  1114  			if err != nil {
  1115  				t.Fatal(err)
  1116  			}
  1117  			swarmAddr := swarm.NewAddress(sum)
  1118  			putter.wait(ctx)
  1119  			_, err = store.Get(ctx, swarm.NewAddress(sum[:swarm.HashSize]))
  1120  			if err != nil {
  1121  				t.Fatal(err)
  1122  			}
  1123  			// all data can be read back
  1124  			readCheck := func(t *testing.T, expErr error) {
  1125  				ctx, cancel := context.WithCancel(context.Background())
  1126  				defer cancel()
  1127  
  1128  				decodeTimeoutStr := time.Second.String()
  1129  				fallback := true
  1130  				s := getter.RACE
  1131  
  1132  				ctx, err := getter.SetConfigInContext(ctx, &s, &fallback, &decodeTimeoutStr, log.Noop)
  1133  				if err != nil {
  1134  					t.Fatal(err)
  1135  				}
  1136  
  1137  				joinReader, rootSpan, err := joiner.New(ctx, store, store, swarmAddr)
  1138  				if err != nil {
  1139  					t.Fatal(err)
  1140  				}
  1141  				// sanity checks
  1142  				expectedRootSpan := chunkSize*(shardCnt-1) + 5
  1143  				if int64(expectedRootSpan) != rootSpan {
  1144  					t.Fatalf("Expected root span %d. Got: %d", expectedRootSpan, rootSpan)
  1145  				}
  1146  				i := 0
  1147  				eg, ectx := errgroup.WithContext(ctx)
  1148  
  1149  			scnt:
  1150  				for ; i < shardCnt; i++ {
  1151  					select {
  1152  					case <-ectx.Done():
  1153  						break scnt
  1154  					default:
  1155  					}
  1156  					i := i
  1157  					eg.Go(func() error {
  1158  						chunkData := make([]byte, chunkSize)
  1159  						n, err := joinReader.ReadAt(chunkData, int64(i*chunkSize))
  1160  						if err != nil {
  1161  							return err
  1162  						}
  1163  						select {
  1164  						case <-ectx.Done():
  1165  							return ectx.Err()
  1166  						default:
  1167  						}
  1168  						expectedChunkData := dataChunks[i].Data()[swarm.SpanSize:]
  1169  						if !bytes.Equal(expectedChunkData, chunkData[:n]) {
  1170  							return fmt.Errorf("data mismatch on chunk position %d", i)
  1171  						}
  1172  						return nil
  1173  					})
  1174  				}
  1175  				err = eg.Wait()
  1176  
  1177  				if !errors.Is(err, expErr) {
  1178  					t.Fatalf("unexpected error reading chunkdata at chunk position %d: expected %v. got %v", i, expErr, err)
  1179  				}
  1180  			}
  1181  			t.Run("no recovery possible with no chunk stored", func(t *testing.T) {
  1182  				readCheck(t, storage.ErrNotFound)
  1183  			})
  1184  
  1185  			if err := putter.store(shardCnt - 1); err != nil {
  1186  				t.Fatal(err)
  1187  			}
  1188  			t.Run("no recovery possible with 1 short of shardCnt chunks stored", func(t *testing.T) {
  1189  				readCheck(t, storage.ErrNotFound)
  1190  			})
  1191  
  1192  			if err := putter.store(1); err != nil {
  1193  				t.Fatal(err)
  1194  			}
  1195  			t.Run("recovery given shardCnt chunks stored", func(t *testing.T) {
  1196  				readCheck(t, nil)
  1197  			})
  1198  
  1199  			if err := putter.store(shardCnt + parityCnt); err != nil {
  1200  				t.Fatal(err)
  1201  			}
  1202  			t.Run("success given shardCnt data chunks stored, no need for recovery", func(t *testing.T) {
  1203  				readCheck(t, nil)
  1204  			})
  1205  			// success after rootChunk deleted using replicas given shardCnt data chunks stored, no need for recovery
  1206  			if err := store.Delete(ctx, swarm.NewAddress(swarmAddr.Bytes()[:swarm.HashSize])); err != nil {
  1207  				t.Fatal(err)
  1208  			}
  1209  			t.Run("recover from replica if root deleted", func(t *testing.T) {
  1210  				readCheck(t, nil)
  1211  			})
  1212  		})
  1213  	}
  1214  }
  1215  
  1216  // TestJoinerRedundancyMultilevel tests the joiner with all combinations of
  1217  // redundancy level, encryption and size (levels, i.e., the	height of the swarm hash tree).
  1218  //
  1219  // The test cases have the following structure:
  1220  //
  1221  //  1. upload a file with a given redundancy level and encryption
  1222  //
  1223  //  2. [positive test] download the file by the reference returned by the upload API response
  1224  //     This uses range queries to target specific (number of) chunks of the file structure
  1225  //     During path traversal in the swarm hash tree, the underlying mocksore (forgetting)
  1226  //     is in 'recording' mode, flagging all the retrieved chunks as chunks to forget.
  1227  //     This is to simulate the scenario where some of the chunks are not available/lost
  1228  //
  1229  //  3. [negative test] attempt at downloading the file using once again the same root hash
  1230  //     and a no-redundancy strategy to find the file inaccessible after forgetting.
  1231  //     3a. [negative test] download file using NONE without fallback and fail
  1232  //     3b. [negative test] download file using DATA without fallback and fail
  1233  //
  1234  //  4. [positive test] download file using DATA with fallback to allow for
  1235  //     reconstruction via erasure coding and succeed.
  1236  //
  1237  //  5. [positive test] after recovery chunks are saved, so forgetting no longer
  1238  //     repeat  3a/3b but this time succeed
  1239  //
  1240  // nolint:thelper
  1241  func TestJoinerRedundancyMultilevel(t *testing.T) {
  1242  	t.Parallel()
  1243  	test := func(t *testing.T, rLevel redundancy.Level, encrypt bool, size int) {
  1244  		t.Helper()
  1245  		store := mockstorer.NewForgettingStore(newChunkStore())
  1246  		seed, err := pseudorand.NewSeed()
  1247  		if err != nil {
  1248  			t.Fatal(err)
  1249  		}
  1250  		dataReader := pseudorand.NewReader(seed, size*swarm.ChunkSize)
  1251  		ctx, cancel := context.WithCancel(context.Background())
  1252  		defer cancel()
  1253  		// ctx = redundancy.SetLevelInContext(ctx, rLevel)
  1254  		ctx = redundancy.SetLevelInContext(ctx, redundancy.NONE)
  1255  		pipe := builder.NewPipelineBuilder(ctx, store, encrypt, rLevel)
  1256  		addr, err := builder.FeedPipeline(ctx, pipe, dataReader)
  1257  		if err != nil {
  1258  			t.Fatal(err)
  1259  		}
  1260  		expRead := swarm.ChunkSize
  1261  		buf := make([]byte, expRead)
  1262  		offset := mrand.Intn(size) * expRead
  1263  		canReadRange := func(t *testing.T, s getter.Strategy, fallback bool, canRead bool) {
  1264  			ctx, cancel := context.WithCancel(context.Background())
  1265  			defer cancel()
  1266  
  1267  			decodingTimeoutStr := time.Second.String()
  1268  
  1269  			ctx, err := getter.SetConfigInContext(ctx, &s, &fallback, &decodingTimeoutStr, log.Noop)
  1270  			if err != nil {
  1271  				t.Fatal(err)
  1272  			}
  1273  
  1274  			j, _, err := joiner.New(ctx, store, store, addr)
  1275  			if err != nil {
  1276  				t.Fatal(err)
  1277  			}
  1278  			n, err := j.ReadAt(buf, int64(offset))
  1279  			if !canRead {
  1280  				if !errors.Is(err, storage.ErrNotFound) && !errors.Is(err, context.DeadlineExceeded) {
  1281  					t.Fatalf("expected error %v or %v. got %v", storage.ErrNotFound, context.DeadlineExceeded, err)
  1282  				}
  1283  				return
  1284  			}
  1285  			if err != nil {
  1286  				t.Fatal(err)
  1287  			}
  1288  			if n != expRead {
  1289  				t.Errorf("read %d bytes out of %d", n, expRead)
  1290  			}
  1291  			_, err = dataReader.Seek(int64(offset), io.SeekStart)
  1292  			if err != nil {
  1293  				t.Fatal(err)
  1294  			}
  1295  			ok, err := dataReader.Match(bytes.NewBuffer(buf), expRead)
  1296  			if err != nil {
  1297  				t.Fatal(err)
  1298  			}
  1299  			if !ok {
  1300  				t.Error("content mismatch")
  1301  			}
  1302  		}
  1303  
  1304  		// first sanity check and recover a range
  1305  		t.Run("NONE w/o fallback CAN retrieve", func(t *testing.T) {
  1306  			store.Record()
  1307  			defer store.Unrecord()
  1308  			canReadRange(t, getter.NONE, false, true)
  1309  		})
  1310  
  1311  		// do not forget the root chunk
  1312  		store.Unmiss(swarm.NewAddress(addr.Bytes()[:swarm.HashSize]))
  1313  		// after we forget the chunks on the way to the range, we should not be able to retrieve
  1314  		t.Run("NONE w/o fallback CANNOT retrieve", func(t *testing.T) {
  1315  			canReadRange(t, getter.NONE, false, false)
  1316  		})
  1317  
  1318  		// we lost a data chunk, we cannot recover using DATA only strategy with no fallback
  1319  		t.Run("DATA w/o fallback CANNOT retrieve", func(t *testing.T) {
  1320  			canReadRange(t, getter.DATA, false, false)
  1321  		})
  1322  
  1323  		if rLevel == 0 {
  1324  			// allowing fallback mode will not help if no redundancy used for upload
  1325  			t.Run("DATA w fallback CANNOT retrieve", func(t *testing.T) {
  1326  				canReadRange(t, getter.DATA, true, false)
  1327  			})
  1328  			return
  1329  		}
  1330  		// allowing fallback mode will make the range retrievable using erasure decoding
  1331  		t.Run("DATA w fallback CAN retrieve", func(t *testing.T) {
  1332  			canReadRange(t, getter.DATA, true, true)
  1333  		})
  1334  		// after the reconstructed data is stored, we can retrieve the range using DATA only mode
  1335  		t.Run("after recovery, NONE w/o fallback CAN retrieve", func(t *testing.T) {
  1336  			canReadRange(t, getter.NONE, false, true)
  1337  		})
  1338  	}
  1339  	r2level := []int{2, 1, 2, 3, 2}
  1340  	encryptChunk := []bool{false, false, true, true, true}
  1341  	for _, rLevel := range []redundancy.Level{0, 1, 2, 3, 4} {
  1342  		rLevel := rLevel
  1343  		// speeding up tests by skipping some of them
  1344  		t.Run(fmt.Sprintf("rLevel=%v", rLevel), func(t *testing.T) {
  1345  			t.Parallel()
  1346  			for _, encrypt := range []bool{false, true} {
  1347  				encrypt := encrypt
  1348  				shardCnt := rLevel.GetMaxShards()
  1349  				if encrypt {
  1350  					shardCnt = rLevel.GetMaxEncShards()
  1351  				}
  1352  				for _, levels := range []int{1, 2, 3} {
  1353  					chunkCnt := 1
  1354  					switch levels {
  1355  					case 1:
  1356  						chunkCnt = 2
  1357  					case 2:
  1358  						chunkCnt = shardCnt + 1
  1359  					case 3:
  1360  						chunkCnt = shardCnt*shardCnt + 1
  1361  					}
  1362  					t.Run(fmt.Sprintf("encrypt=%v levels=%d chunks=%d incomplete", encrypt, levels, chunkCnt), func(t *testing.T) {
  1363  						if r2level[rLevel] != levels || encrypt != encryptChunk[rLevel] {
  1364  							t.Skip("skipping to save time")
  1365  						}
  1366  						test(t, rLevel, encrypt, chunkCnt)
  1367  					})
  1368  					switch levels {
  1369  					case 1:
  1370  						chunkCnt = shardCnt
  1371  					case 2:
  1372  						chunkCnt = shardCnt * shardCnt
  1373  					case 3:
  1374  						continue
  1375  					}
  1376  					t.Run(fmt.Sprintf("encrypt=%v levels=%d chunks=%d full", encrypt, levels, chunkCnt), func(t *testing.T) {
  1377  						test(t, rLevel, encrypt, chunkCnt)
  1378  					})
  1379  				}
  1380  			}
  1381  		})
  1382  	}
  1383  }
  1384  
  1385  type chunkStore struct {
  1386  	mu     sync.Mutex
  1387  	chunks map[string]swarm.Chunk
  1388  }
  1389  
  1390  func newChunkStore() *chunkStore {
  1391  	return &chunkStore{
  1392  		chunks: make(map[string]swarm.Chunk),
  1393  	}
  1394  }
  1395  
  1396  func (c *chunkStore) Get(_ context.Context, addr swarm.Address) (swarm.Chunk, error) {
  1397  	c.mu.Lock()
  1398  	defer c.mu.Unlock()
  1399  
  1400  	chunk, ok := c.chunks[addr.ByteString()]
  1401  	if !ok {
  1402  		return nil, storage.ErrNotFound
  1403  	}
  1404  	return chunk, nil
  1405  }
  1406  
  1407  func (c *chunkStore) Put(_ context.Context, ch swarm.Chunk) error {
  1408  	c.mu.Lock()
  1409  	defer c.mu.Unlock()
  1410  	c.chunks[ch.Address().ByteString()] = swarm.NewChunk(ch.Address(), ch.Data()).WithStamp(ch.Stamp())
  1411  	return nil
  1412  }
  1413  
  1414  func (c *chunkStore) Replace(_ context.Context, ch swarm.Chunk) error {
  1415  	c.mu.Lock()
  1416  	defer c.mu.Unlock()
  1417  	c.chunks[ch.Address().ByteString()] = swarm.NewChunk(ch.Address(), ch.Data()).WithStamp(ch.Stamp())
  1418  	return nil
  1419  
  1420  }
  1421  
  1422  func (c *chunkStore) Has(_ context.Context, addr swarm.Address) (bool, error) {
  1423  	c.mu.Lock()
  1424  	defer c.mu.Unlock()
  1425  
  1426  	_, exists := c.chunks[addr.ByteString()]
  1427  
  1428  	return exists, nil
  1429  }
  1430  
  1431  func (c *chunkStore) Delete(_ context.Context, addr swarm.Address) error {
  1432  	c.mu.Lock()
  1433  	defer c.mu.Unlock()
  1434  
  1435  	delete(c.chunks, addr.ByteString())
  1436  	return nil
  1437  }
  1438  
  1439  func (c *chunkStore) Iterate(_ context.Context, fn storage.IterateChunkFn) error {
  1440  	c.mu.Lock()
  1441  	defer c.mu.Unlock()
  1442  
  1443  	for _, c := range c.chunks {
  1444  		stop, err := fn(c)
  1445  		if err != nil {
  1446  			return err
  1447  		}
  1448  		if stop {
  1449  			return nil
  1450  		}
  1451  	}
  1452  
  1453  	return nil
  1454  }
  1455  
  1456  func (c *chunkStore) Close() error {
  1457  	return nil
  1458  }