github.com/ethersphere/bee/v2@v2.2.0/pkg/file/file_test.go (about)

     1  // Copyright 2020 The Swarm Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package file_test
     6  
     7  import (
     8  	"bytes"
     9  	"context"
    10  	"errors"
    11  	"io"
    12  	"strconv"
    13  	"strings"
    14  	"testing"
    15  
    16  	"github.com/ethersphere/bee/v2/pkg/file"
    17  	"github.com/ethersphere/bee/v2/pkg/file/joiner"
    18  	"github.com/ethersphere/bee/v2/pkg/file/pipeline/builder"
    19  	test "github.com/ethersphere/bee/v2/pkg/file/testing"
    20  	"github.com/ethersphere/bee/v2/pkg/storage/inmemchunkstore"
    21  	"github.com/ethersphere/bee/v2/pkg/swarm"
    22  )
    23  
    24  var (
    25  	start = 0
    26  	end   = test.GetVectorCount() - 2
    27  )
    28  
    29  // TestSplitThenJoin splits a file with the splitter implementation and
    30  // joins it again with the joiner implementation, verifying that the
    31  // rebuilt data matches the original data that was split.
    32  //
    33  // It uses the same test vectors as the splitter tests to generate the
    34  // necessary data.
    35  func TestSplitThenJoin(t *testing.T) {
    36  	t.Parallel()
    37  
    38  	for i := start; i < end; i++ {
    39  		dataLengthStr := strconv.Itoa(i)
    40  		t.Run(dataLengthStr, testSplitThenJoin)
    41  	}
    42  }
    43  
    44  func testSplitThenJoin(t *testing.T) {
    45  	t.Parallel()
    46  
    47  	var (
    48  		paramstring = strings.Split(t.Name(), "/")
    49  		dataIdx, _  = strconv.ParseInt(paramstring[1], 10, 0)
    50  		store       = inmemchunkstore.New()
    51  		p           = builder.NewPipelineBuilder(context.Background(), store, false, 0)
    52  		data, _     = test.GetVector(t, int(dataIdx))
    53  	)
    54  
    55  	// first split
    56  	ctx, cancel := context.WithCancel(context.Background())
    57  	defer cancel()
    58  	dataReader := file.NewSimpleReadCloser(data)
    59  	resultAddress, err := builder.FeedPipeline(ctx, p, dataReader)
    60  	if err != nil {
    61  		t.Fatal(err)
    62  	}
    63  
    64  	// then join
    65  	r, l, err := joiner.New(ctx, store, store, resultAddress)
    66  	if err != nil {
    67  		t.Fatal(err)
    68  	}
    69  	if l != int64(len(data)) {
    70  		t.Fatalf("data length return expected %d, got %d", len(data), l)
    71  	}
    72  
    73  	// read from joiner
    74  	var resultData []byte
    75  	for i := 0; i < len(data); i += swarm.ChunkSize {
    76  		readData := make([]byte, swarm.ChunkSize)
    77  		_, err := r.Read(readData)
    78  		if err != nil {
    79  			if errors.Is(err, io.EOF) {
    80  				break
    81  			}
    82  			t.Fatal(err)
    83  		}
    84  		resultData = append(resultData, readData...)
    85  	}
    86  
    87  	// compare result
    88  	if !bytes.Equal(resultData[:len(data)], data) {
    89  		t.Fatalf("data mismatch %d", len(data))
    90  	}
    91  }