github.com/ethersphere/bee/v2@v2.2.0/pkg/file/splitter/internal/job_test.go (about)

     1  // Copyright 2020 The Swarm Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package internal_test
     6  
     7  import (
     8  	"context"
     9  	"strconv"
    10  	"strings"
    11  	"testing"
    12  
    13  	"github.com/ethersphere/bee/v2/pkg/file/splitter/internal"
    14  	test "github.com/ethersphere/bee/v2/pkg/file/testing"
    15  	"github.com/ethersphere/bee/v2/pkg/storage/inmemchunkstore"
    16  	"github.com/ethersphere/bee/v2/pkg/swarm"
    17  )
    18  
    19  var (
    20  	start = 0
    21  	end   = test.GetVectorCount()
    22  )
    23  
    24  // TestSplitterJobPartialSingleChunk passes sub-chunk length data to the splitter,
    25  // verifies the correct hash is returned, and that write after Sum/complete Write
    26  // returns error.
    27  func TestSplitterJobPartialSingleChunk(t *testing.T) {
    28  	t.Parallel()
    29  
    30  	store := inmemchunkstore.New()
    31  
    32  	ctx, cancel := context.WithCancel(context.Background())
    33  	defer cancel()
    34  
    35  	data := []byte("foo")
    36  	j := internal.NewSimpleSplitterJob(ctx, store, int64(len(data)), false)
    37  
    38  	c, err := j.Write(data)
    39  	if err != nil {
    40  		t.Fatal(err)
    41  	}
    42  	if c < len(data) {
    43  		t.Fatalf("short write %d", c)
    44  	}
    45  
    46  	hashResult := j.Sum(nil)
    47  	addressResult := swarm.NewAddress(hashResult)
    48  
    49  	bmtHashOfFoo := "2387e8e7d8a48c2a9339c97c1dc3461a9a7aa07e994c5cb8b38fd7c1b3e6ea48"
    50  	address := swarm.MustParseHexAddress(bmtHashOfFoo)
    51  	if !addressResult.Equal(address) {
    52  		t.Fatalf("expected %v, got %v", address, addressResult)
    53  	}
    54  
    55  	_, err = j.Write([]byte("bar"))
    56  	if err == nil {
    57  		t.Fatal("expected error writing after write/sum complete")
    58  	}
    59  }
    60  
    61  // TestSplitterJobVector verifies file hasher results of legacy test vectors
    62  func TestSplitterJobVector(t *testing.T) {
    63  	t.Parallel()
    64  
    65  	for i := start; i < end-2; i++ {
    66  		dataLengthStr := strconv.Itoa(i)
    67  		t.Run(dataLengthStr, testSplitterJobVector)
    68  	}
    69  }
    70  
    71  func testSplitterJobVector(t *testing.T) {
    72  	t.Parallel()
    73  
    74  	var (
    75  		paramstring = strings.Split(t.Name(), "/")
    76  		dataIdx, _  = strconv.ParseInt(paramstring[1], 10, 0)
    77  		store       = inmemchunkstore.New()
    78  	)
    79  
    80  	data, expect := test.GetVector(t, int(dataIdx))
    81  	ctx, cancel := context.WithCancel(context.Background())
    82  	defer cancel()
    83  	j := internal.NewSimpleSplitterJob(ctx, store, int64(len(data)), false)
    84  
    85  	for i := 0; i < len(data); i += swarm.ChunkSize {
    86  		l := swarm.ChunkSize
    87  		if len(data)-i < swarm.ChunkSize {
    88  			l = len(data) - i
    89  		}
    90  		c, err := j.Write(data[i : i+l])
    91  		if err != nil {
    92  			t.Fatal(err)
    93  		}
    94  		if c < l {
    95  			t.Fatalf("short write %d", c)
    96  		}
    97  	}
    98  
    99  	actualBytes := j.Sum(nil)
   100  	actual := swarm.NewAddress(actualBytes)
   101  
   102  	if !expect.Equal(actual) {
   103  		t.Fatalf("expected %v, got %v", expect, actual)
   104  	}
   105  }