github.com/ethersphere/bee/v2@v2.2.0/pkg/file/pipeline/feeder/feeder_test.go (about)

     1  // Copyright 2020 The Swarm Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package feeder_test
     6  
     7  import (
     8  	"bytes"
     9  	"encoding/binary"
    10  	"errors"
    11  	"testing"
    12  
    13  	"github.com/ethersphere/bee/v2/pkg/file/pipeline"
    14  	"github.com/ethersphere/bee/v2/pkg/file/pipeline/feeder"
    15  )
    16  
    17  // TestFeeder tests that partial writes work correctly.
    18  func TestFeeder(t *testing.T) {
    19  	t.Parallel()
    20  	var (
    21  		chunkSize = 5
    22  		data      = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}
    23  	)
    24  
    25  	for _, tc := range []struct {
    26  		name      string // name
    27  		dataSize  []int  // how big each write is
    28  		expWrites int    // expected number of writes
    29  		writeData []byte // expected data in last write buffer
    30  		span      uint64 // expected span of written data
    31  	}{
    32  		{
    33  			name:      "empty write",
    34  			dataSize:  []int{0},
    35  			expWrites: 0,
    36  		},
    37  		{
    38  			name:      "less than chunk, no writes",
    39  			dataSize:  []int{3},
    40  			expWrites: 0,
    41  		},
    42  		{
    43  			name:      "one chunk, one write",
    44  			dataSize:  []int{5},
    45  			expWrites: 1,
    46  			writeData: []byte{1, 2, 3, 4, 5},
    47  			span:      5,
    48  		},
    49  		{
    50  			name:      "two chunks, two writes",
    51  			dataSize:  []int{10},
    52  			expWrites: 2,
    53  			writeData: []byte{6, 7, 8, 9, 10},
    54  			span:      5,
    55  		},
    56  		{
    57  			name:      "half chunk, then full one, one write",
    58  			dataSize:  []int{3, 5},
    59  			expWrites: 1,
    60  			writeData: []byte{1, 2, 3, 4, 5},
    61  			span:      5,
    62  		},
    63  		{
    64  			name:      "half chunk, another two halves, one write",
    65  			dataSize:  []int{3, 2, 3},
    66  			expWrites: 1,
    67  			writeData: []byte{1, 2, 3, 4, 5},
    68  			span:      5,
    69  		},
    70  		{
    71  			name:      "half chunk, another two halves, another full, two writes",
    72  			dataSize:  []int{3, 2, 3, 5},
    73  			expWrites: 2,
    74  			writeData: []byte{6, 7, 8, 9, 10},
    75  			span:      5,
    76  		},
    77  	} {
    78  		tc := tc
    79  		t.Run(tc.name, func(t *testing.T) {
    80  			t.Parallel()
    81  			var results pipeline.PipeWriteArgs
    82  			rr := newMockResultWriter(&results)
    83  			cf := feeder.NewChunkFeederWriter(chunkSize, rr)
    84  			i := 0
    85  			for _, v := range tc.dataSize {
    86  				d := data[i : i+v]
    87  				n, err := cf.Write(d)
    88  				if err != nil {
    89  					t.Fatal(err)
    90  				}
    91  				if n != v {
    92  					t.Fatalf("wrote %d bytes but expected %d bytes", n, v)
    93  				}
    94  				i += v
    95  			}
    96  
    97  			if tc.expWrites == 0 && results.Data != nil {
    98  				t.Fatal("expected no write but got one")
    99  			}
   100  
   101  			if rr.count != tc.expWrites {
   102  				t.Fatalf("expected %d writes but got %d", tc.expWrites, rr.count)
   103  			}
   104  
   105  			if results.Data != nil && !bytes.Equal(tc.writeData, results.Data[8:]) {
   106  				t.Fatalf("expected write data %v but got %v", tc.writeData, results.Data[8:])
   107  			}
   108  
   109  			if tc.span > 0 {
   110  				v := binary.LittleEndian.Uint64(results.Data[:8])
   111  				if v != tc.span {
   112  					t.Fatalf("span mismatch, got %d want %d", v, tc.span)
   113  				}
   114  			}
   115  		})
   116  	}
   117  }
   118  
   119  // TestFeederFlush tests that the feeder flushes the data in the buffer correctly
   120  // on Sum().
   121  func TestFeederFlush(t *testing.T) {
   122  	t.Parallel()
   123  	var (
   124  		chunkSize = 5
   125  		data      = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}
   126  	)
   127  
   128  	for _, tc := range []struct {
   129  		name      string // name
   130  		dataSize  []int  // how big each write is
   131  		expWrites int    // expected number of writes
   132  		writeData []byte // expected data in last write buffer
   133  		span      uint64 // expected span of written data
   134  	}{
   135  		{
   136  			name:      "empty file",
   137  			dataSize:  []int{0},
   138  			expWrites: 1,
   139  		},
   140  		{
   141  			name:      "less than chunk, one write",
   142  			dataSize:  []int{3},
   143  			expWrites: 1,
   144  			writeData: []byte{1, 2, 3},
   145  		},
   146  		{
   147  			name:      "one chunk, one write",
   148  			dataSize:  []int{5},
   149  			expWrites: 1,
   150  			writeData: []byte{1, 2, 3, 4, 5},
   151  			span:      5,
   152  		},
   153  		{
   154  			name:      "two chunks, two writes",
   155  			dataSize:  []int{10},
   156  			expWrites: 2,
   157  			writeData: []byte{6, 7, 8, 9, 10},
   158  			span:      5,
   159  		},
   160  		{
   161  			name:      "half chunk, then full one, two writes",
   162  			dataSize:  []int{3, 5},
   163  			expWrites: 2,
   164  			writeData: []byte{6, 7, 8},
   165  			span:      3,
   166  		},
   167  		{
   168  			name:      "half chunk, another two halves, two writes",
   169  			dataSize:  []int{3, 2, 3},
   170  			expWrites: 2,
   171  			writeData: []byte{6, 7, 8},
   172  			span:      3,
   173  		},
   174  		{
   175  			name:      "half chunk, another two halves, another full, three writes",
   176  			dataSize:  []int{3, 2, 3, 5},
   177  			expWrites: 3,
   178  			writeData: []byte{11, 12, 13},
   179  			span:      3,
   180  		},
   181  	} {
   182  		tc := tc
   183  		t.Run(tc.name, func(t *testing.T) {
   184  			t.Parallel()
   185  
   186  			var results pipeline.PipeWriteArgs
   187  			rr := newMockResultWriter(&results)
   188  			cf := feeder.NewChunkFeederWriter(chunkSize, rr)
   189  			i := 0
   190  			for _, v := range tc.dataSize {
   191  				d := data[i : i+v]
   192  				n, err := cf.Write(d)
   193  				if err != nil {
   194  					t.Fatal(err)
   195  				}
   196  				if n != v {
   197  					t.Fatalf("wrote %d bytes but expected %d bytes", n, v)
   198  				}
   199  				i += v
   200  			}
   201  
   202  			_, _ = cf.Sum()
   203  
   204  			if tc.expWrites == 0 && results.Data != nil {
   205  				t.Fatal("expected no write but got one")
   206  			}
   207  
   208  			if rr.count != tc.expWrites {
   209  				t.Fatalf("expected %d writes but got %d", tc.expWrites, rr.count)
   210  			}
   211  
   212  			if results.Data != nil && !bytes.Equal(tc.writeData, results.Data[8:]) {
   213  				t.Fatalf("expected write data %v but got %v", tc.writeData, results.Data[8:])
   214  			}
   215  
   216  			if tc.span > 0 {
   217  				v := binary.LittleEndian.Uint64(results.Data[:8])
   218  				if v != tc.span {
   219  					t.Fatalf("span mismatch, got %d want %d", v, tc.span)
   220  				}
   221  			}
   222  		})
   223  	}
   224  }
   225  
   226  // countingResultWriter counts how many writes were done to it
   227  // and passes the results to the caller using the pointer provided
   228  // in the constructor.
   229  type countingResultWriter struct {
   230  	target *pipeline.PipeWriteArgs
   231  	count  int
   232  }
   233  
   234  func newMockResultWriter(b *pipeline.PipeWriteArgs) *countingResultWriter {
   235  	return &countingResultWriter{target: b}
   236  }
   237  
   238  func (w *countingResultWriter) ChainWrite(p *pipeline.PipeWriteArgs) error {
   239  	w.count++
   240  	*w.target = *p
   241  	return nil
   242  }
   243  
   244  func (w *countingResultWriter) Sum() ([]byte, error) {
   245  	return nil, errors.New("not implemented")
   246  }