github.com/alexdevranger/node-1.8.27@v0.0.0-20221128213301-aa5841e41d2d/swarm/storage/chunker_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-dubxcoin library.
     3  //
     4  // The go-dubxcoin library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-dubxcoin library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-dubxcoin library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package storage
    18  
    19  import (
    20  	"bytes"
    21  	"context"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"io"
    25  	"testing"
    26  
    27  	"github.com/alexdevranger/node-1.8.27/swarm/testutil"
    28  	"golang.org/x/crypto/sha3"
    29  )
    30  
    31  /*
    32  Tests TreeChunker by splitting and joining a random byte slice
    33  */
    34  
    35  type test interface {
    36  	Fatalf(string, ...interface{})
    37  	Logf(string, ...interface{})
    38  }
    39  
    40  type chunkerTester struct {
    41  	inputs map[uint64][]byte
    42  	t      test
    43  }
    44  
    45  func newTestHasherStore(store ChunkStore, hash string) *hasherStore {
    46  	return NewHasherStore(store, MakeHashFunc(hash), false)
    47  }
    48  
    49  func testRandomBrokenData(n int, tester *chunkerTester) {
    50  	data := testutil.RandomReader(1, n)
    51  	brokendata := brokenLimitReader(data, n, n/2)
    52  
    53  	buf := make([]byte, n)
    54  	_, err := brokendata.Read(buf)
    55  	if err == nil || err.Error() != "Broken reader" {
    56  		tester.t.Fatalf("Broken reader is not broken, hence broken. Returns: %v", err)
    57  	}
    58  
    59  	data = testutil.RandomReader(2, n)
    60  	brokendata = brokenLimitReader(data, n, n/2)
    61  
    62  	putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash)
    63  
    64  	expectedError := fmt.Errorf("Broken reader")
    65  	ctx := context.Background()
    66  	key, _, err := TreeSplit(ctx, brokendata, int64(n), putGetter)
    67  	if err == nil || err.Error() != expectedError.Error() {
    68  		tester.t.Fatalf("Not receiving the correct error! Expected %v, received %v", expectedError, err)
    69  	}
    70  	tester.t.Logf(" Address = %v\n", key)
    71  }
    72  
    73  func testRandomData(usePyramid bool, hash string, n int, tester *chunkerTester) Address {
    74  	if tester.inputs == nil {
    75  		tester.inputs = make(map[uint64][]byte)
    76  	}
    77  	input, found := tester.inputs[uint64(n)]
    78  	var data io.Reader
    79  	if !found {
    80  		input = testutil.RandomBytes(1, n)
    81  		data = bytes.NewReader(input)
    82  		tester.inputs[uint64(n)] = input
    83  	} else {
    84  		data = io.LimitReader(bytes.NewReader(input), int64(n))
    85  	}
    86  
    87  	putGetter := newTestHasherStore(NewMapChunkStore(), hash)
    88  
    89  	var addr Address
    90  	var wait func(context.Context) error
    91  	var err error
    92  	ctx := context.TODO()
    93  	if usePyramid {
    94  		addr, wait, err = PyramidSplit(ctx, data, putGetter, putGetter)
    95  	} else {
    96  		addr, wait, err = TreeSplit(ctx, data, int64(n), putGetter)
    97  	}
    98  	if err != nil {
    99  		tester.t.Fatalf(err.Error())
   100  	}
   101  	tester.t.Logf(" Address = %v\n", addr)
   102  	err = wait(ctx)
   103  	if err != nil {
   104  		tester.t.Fatalf(err.Error())
   105  	}
   106  
   107  	reader := TreeJoin(ctx, addr, putGetter, 0)
   108  	output := make([]byte, n)
   109  	r, err := reader.Read(output)
   110  	if r != n || err != io.EOF {
   111  		tester.t.Fatalf("read error  read: %v  n = %v  err = %v\n", r, n, err)
   112  	}
   113  	if input != nil {
   114  		if !bytes.Equal(output, input) {
   115  			tester.t.Fatalf("input and output mismatch\n IN: %v\nOUT: %v\n", input, output)
   116  		}
   117  	}
   118  
   119  	// testing partial read
   120  	for i := 1; i < n; i += 10000 {
   121  		readableLength := n - i
   122  		r, err := reader.ReadAt(output, int64(i))
   123  		if r != readableLength || err != io.EOF {
   124  			tester.t.Fatalf("readAt error with offset %v read: %v  n = %v  err = %v\n", i, r, readableLength, err)
   125  		}
   126  		if input != nil {
   127  			if !bytes.Equal(output[:readableLength], input[i:]) {
   128  				tester.t.Fatalf("input and output mismatch\n IN: %v\nOUT: %v\n", input[i:], output[:readableLength])
   129  			}
   130  		}
   131  	}
   132  
   133  	return addr
   134  }
   135  
   136  func TestSha3ForCorrectness(t *testing.T) {
   137  	tester := &chunkerTester{t: t}
   138  
   139  	size := 4096
   140  	input := make([]byte, size+8)
   141  	binary.LittleEndian.PutUint64(input[:8], uint64(size))
   142  
   143  	io.LimitReader(bytes.NewReader(input[8:]), int64(size))
   144  
   145  	rawSha3 := sha3.NewLegacyKeccak256()
   146  	rawSha3.Reset()
   147  	rawSha3.Write(input)
   148  	rawSha3Output := rawSha3.Sum(nil)
   149  
   150  	sha3FromMakeFunc := MakeHashFunc(SHA3Hash)()
   151  	sha3FromMakeFunc.ResetWithLength(input[:8])
   152  	sha3FromMakeFunc.Write(input[8:])
   153  	sha3FromMakeFuncOutput := sha3FromMakeFunc.Sum(nil)
   154  
   155  	if len(rawSha3Output) != len(sha3FromMakeFuncOutput) {
   156  		tester.t.Fatalf("Original SHA3 and abstracted Sha3 has different length %v:%v\n", len(rawSha3Output), len(sha3FromMakeFuncOutput))
   157  	}
   158  
   159  	if !bytes.Equal(rawSha3Output, sha3FromMakeFuncOutput) {
   160  		tester.t.Fatalf("Original SHA3 and abstracted Sha3 mismatch %v:%v\n", rawSha3Output, sha3FromMakeFuncOutput)
   161  	}
   162  
   163  }
   164  
   165  func TestDataAppend(t *testing.T) {
   166  	sizes := []int{1, 1, 1, 4095, 4096, 4097, 1, 1, 1, 123456, 2345678, 2345678}
   167  	appendSizes := []int{4095, 4096, 4097, 1, 1, 1, 8191, 8192, 8193, 9000, 3000, 5000}
   168  
   169  	tester := &chunkerTester{t: t}
   170  	for i := range sizes {
   171  		n := sizes[i]
   172  		m := appendSizes[i]
   173  
   174  		if tester.inputs == nil {
   175  			tester.inputs = make(map[uint64][]byte)
   176  		}
   177  		input, found := tester.inputs[uint64(n)]
   178  		var data io.Reader
   179  		if !found {
   180  			input = testutil.RandomBytes(i, n)
   181  			data = bytes.NewReader(input)
   182  			tester.inputs[uint64(n)] = input
   183  		} else {
   184  			data = io.LimitReader(bytes.NewReader(input), int64(n))
   185  		}
   186  
   187  		store := NewMapChunkStore()
   188  		putGetter := newTestHasherStore(store, SHA3Hash)
   189  
   190  		ctx := context.TODO()
   191  		addr, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
   192  		if err != nil {
   193  			tester.t.Fatalf(err.Error())
   194  		}
   195  		err = wait(ctx)
   196  		if err != nil {
   197  			tester.t.Fatalf(err.Error())
   198  		}
   199  		//create a append data stream
   200  		appendInput, found := tester.inputs[uint64(m)]
   201  		var appendData io.Reader
   202  		if !found {
   203  			appendInput = testutil.RandomBytes(i, m)
   204  			appendData = bytes.NewReader(appendInput)
   205  			tester.inputs[uint64(m)] = appendInput
   206  		} else {
   207  			appendData = io.LimitReader(bytes.NewReader(appendInput), int64(m))
   208  		}
   209  
   210  		putGetter = newTestHasherStore(store, SHA3Hash)
   211  		newAddr, wait, err := PyramidAppend(ctx, addr, appendData, putGetter, putGetter)
   212  		if err != nil {
   213  			tester.t.Fatalf(err.Error())
   214  		}
   215  		err = wait(ctx)
   216  		if err != nil {
   217  			tester.t.Fatalf(err.Error())
   218  		}
   219  
   220  		reader := TreeJoin(ctx, newAddr, putGetter, 0)
   221  		newOutput := make([]byte, n+m)
   222  		r, err := reader.Read(newOutput)
   223  		if r != (n + m) {
   224  			tester.t.Fatalf("read error  read: %v  n = %v  m = %v  err = %v\n", r, n, m, err)
   225  		}
   226  
   227  		newInput := append(input, appendInput...)
   228  		if !bytes.Equal(newOutput, newInput) {
   229  			tester.t.Fatalf("input and output mismatch\n IN: %v\nOUT: %v\n", newInput, newOutput)
   230  		}
   231  	}
   232  }
   233  
   234  func TestRandomData(t *testing.T) {
   235  	// This test can validate files up to a relatively short length, as tree chunker slows down drastically.
   236  	// Validation of longer files is done by TestLocalStoreAndRetrieve in swarm package.
   237  	//sizes := []int{1, 60, 83, 179, 253, 1024, 4095, 4096, 4097, 8191, 8192, 8193, 12287, 12288, 12289, 524288, 524288 + 1, 524288 + 4097, 7 * 524288, 7*524288 + 1, 7*524288 + 4097}
   238  	sizes := []int{1, 60, 83, 179, 253, 1024, 4095, 4097, 8191, 8192, 12288, 12289, 524288}
   239  	tester := &chunkerTester{t: t}
   240  
   241  	for _, s := range sizes {
   242  		treeChunkerAddress := testRandomData(false, SHA3Hash, s, tester)
   243  		pyramidChunkerAddress := testRandomData(true, SHA3Hash, s, tester)
   244  		if treeChunkerAddress.String() != pyramidChunkerAddress.String() {
   245  			tester.t.Fatalf("tree chunker and pyramid chunker key mismatch for size %v\n TC: %v\n PC: %v\n", s, treeChunkerAddress.String(), pyramidChunkerAddress.String())
   246  		}
   247  	}
   248  
   249  	for _, s := range sizes {
   250  		treeChunkerAddress := testRandomData(false, BMTHash, s, tester)
   251  		pyramidChunkerAddress := testRandomData(true, BMTHash, s, tester)
   252  		if treeChunkerAddress.String() != pyramidChunkerAddress.String() {
   253  			tester.t.Fatalf("tree chunker and pyramid chunker key mismatch for size %v\n TC: %v\n PC: %v\n", s, treeChunkerAddress.String(), pyramidChunkerAddress.String())
   254  		}
   255  	}
   256  }
   257  
   258  func TestRandomBrokenData(t *testing.T) {
   259  	sizes := []int{1, 60, 83, 179, 253, 1024, 4095, 4096, 4097, 8191, 8192, 8193, 12287, 12288, 12289, 123456, 2345678}
   260  	tester := &chunkerTester{t: t}
   261  	for _, s := range sizes {
   262  		testRandomBrokenData(s, tester)
   263  	}
   264  }
   265  
   266  func benchReadAll(reader LazySectionReader) {
   267  	size, _ := reader.Size(context.TODO(), nil)
   268  	output := make([]byte, 1000)
   269  	for pos := int64(0); pos < size; pos += 1000 {
   270  		reader.ReadAt(output, pos)
   271  	}
   272  }
   273  
   274  func benchmarkSplitJoin(n int, t *testing.B) {
   275  	t.ReportAllocs()
   276  	for i := 0; i < t.N; i++ {
   277  		data := testutil.RandomReader(i, n)
   278  
   279  		putGetter := newTestHasherStore(NewMapChunkStore(), SHA3Hash)
   280  		ctx := context.TODO()
   281  		key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
   282  		if err != nil {
   283  			t.Fatalf(err.Error())
   284  		}
   285  		err = wait(ctx)
   286  		if err != nil {
   287  			t.Fatalf(err.Error())
   288  		}
   289  		reader := TreeJoin(ctx, key, putGetter, 0)
   290  		benchReadAll(reader)
   291  	}
   292  }
   293  
   294  func benchmarkSplitTreeSHA3(n int, t *testing.B) {
   295  	t.ReportAllocs()
   296  	for i := 0; i < t.N; i++ {
   297  		data := testutil.RandomReader(i, n)
   298  		putGetter := newTestHasherStore(&FakeChunkStore{}, SHA3Hash)
   299  
   300  		ctx := context.Background()
   301  		_, wait, err := TreeSplit(ctx, data, int64(n), putGetter)
   302  		if err != nil {
   303  			t.Fatalf(err.Error())
   304  		}
   305  		err = wait(ctx)
   306  		if err != nil {
   307  			t.Fatalf(err.Error())
   308  		}
   309  
   310  	}
   311  }
   312  
   313  func benchmarkSplitTreeBMT(n int, t *testing.B) {
   314  	t.ReportAllocs()
   315  	for i := 0; i < t.N; i++ {
   316  		data := testutil.RandomReader(i, n)
   317  		putGetter := newTestHasherStore(&FakeChunkStore{}, BMTHash)
   318  
   319  		ctx := context.Background()
   320  		_, wait, err := TreeSplit(ctx, data, int64(n), putGetter)
   321  		if err != nil {
   322  			t.Fatalf(err.Error())
   323  		}
   324  		err = wait(ctx)
   325  		if err != nil {
   326  			t.Fatalf(err.Error())
   327  		}
   328  	}
   329  }
   330  
   331  func benchmarkSplitPyramidBMT(n int, t *testing.B) {
   332  	t.ReportAllocs()
   333  	for i := 0; i < t.N; i++ {
   334  		data := testutil.RandomReader(i, n)
   335  		putGetter := newTestHasherStore(&FakeChunkStore{}, BMTHash)
   336  
   337  		ctx := context.Background()
   338  		_, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
   339  		if err != nil {
   340  			t.Fatalf(err.Error())
   341  		}
   342  		err = wait(ctx)
   343  		if err != nil {
   344  			t.Fatalf(err.Error())
   345  		}
   346  	}
   347  }
   348  
   349  func benchmarkSplitPyramidSHA3(n int, t *testing.B) {
   350  	t.ReportAllocs()
   351  	for i := 0; i < t.N; i++ {
   352  		data := testutil.RandomReader(i, n)
   353  		putGetter := newTestHasherStore(&FakeChunkStore{}, SHA3Hash)
   354  
   355  		ctx := context.Background()
   356  		_, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
   357  		if err != nil {
   358  			t.Fatalf(err.Error())
   359  		}
   360  		err = wait(ctx)
   361  		if err != nil {
   362  			t.Fatalf(err.Error())
   363  		}
   364  	}
   365  }
   366  
   367  func benchmarkSplitAppendPyramid(n, m int, t *testing.B) {
   368  	t.ReportAllocs()
   369  	for i := 0; i < t.N; i++ {
   370  		data := testutil.RandomReader(i, n)
   371  		data1 := testutil.RandomReader(t.N+i, m)
   372  
   373  		store := NewMapChunkStore()
   374  		putGetter := newTestHasherStore(store, SHA3Hash)
   375  
   376  		ctx := context.Background()
   377  		key, wait, err := PyramidSplit(ctx, data, putGetter, putGetter)
   378  		if err != nil {
   379  			t.Fatalf(err.Error())
   380  		}
   381  		err = wait(ctx)
   382  		if err != nil {
   383  			t.Fatalf(err.Error())
   384  		}
   385  
   386  		putGetter = newTestHasherStore(store, SHA3Hash)
   387  		_, wait, err = PyramidAppend(ctx, key, data1, putGetter, putGetter)
   388  		if err != nil {
   389  			t.Fatalf(err.Error())
   390  		}
   391  		err = wait(ctx)
   392  		if err != nil {
   393  			t.Fatalf(err.Error())
   394  		}
   395  	}
   396  }
   397  
   398  func BenchmarkSplitJoin_2(t *testing.B) { benchmarkSplitJoin(100, t) }
   399  func BenchmarkSplitJoin_3(t *testing.B) { benchmarkSplitJoin(1000, t) }
   400  func BenchmarkSplitJoin_4(t *testing.B) { benchmarkSplitJoin(10000, t) }
   401  func BenchmarkSplitJoin_5(t *testing.B) { benchmarkSplitJoin(100000, t) }
   402  func BenchmarkSplitJoin_6(t *testing.B) { benchmarkSplitJoin(1000000, t) }
   403  func BenchmarkSplitJoin_7(t *testing.B) { benchmarkSplitJoin(10000000, t) }
   404  
   405  // func BenchmarkSplitJoin_8(t *testing.B) { benchmarkJoin(100000000, t) }
   406  
   407  func BenchmarkSplitTreeSHA3_2(t *testing.B)  { benchmarkSplitTreeSHA3(100, t) }
   408  func BenchmarkSplitTreeSHA3_2h(t *testing.B) { benchmarkSplitTreeSHA3(500, t) }
   409  func BenchmarkSplitTreeSHA3_3(t *testing.B)  { benchmarkSplitTreeSHA3(1000, t) }
   410  func BenchmarkSplitTreeSHA3_3h(t *testing.B) { benchmarkSplitTreeSHA3(5000, t) }
   411  func BenchmarkSplitTreeSHA3_4(t *testing.B)  { benchmarkSplitTreeSHA3(10000, t) }
   412  func BenchmarkSplitTreeSHA3_4h(t *testing.B) { benchmarkSplitTreeSHA3(50000, t) }
   413  func BenchmarkSplitTreeSHA3_5(t *testing.B)  { benchmarkSplitTreeSHA3(100000, t) }
   414  func BenchmarkSplitTreeSHA3_6(t *testing.B)  { benchmarkSplitTreeSHA3(1000000, t) }
   415  func BenchmarkSplitTreeSHA3_7(t *testing.B)  { benchmarkSplitTreeSHA3(10000000, t) }
   416  
   417  // func BenchmarkSplitTreeSHA3_8(t *testing.B)  { benchmarkSplitTreeSHA3(100000000, t) }
   418  
   419  func BenchmarkSplitTreeBMT_2(t *testing.B)  { benchmarkSplitTreeBMT(100, t) }
   420  func BenchmarkSplitTreeBMT_2h(t *testing.B) { benchmarkSplitTreeBMT(500, t) }
   421  func BenchmarkSplitTreeBMT_3(t *testing.B)  { benchmarkSplitTreeBMT(1000, t) }
   422  func BenchmarkSplitTreeBMT_3h(t *testing.B) { benchmarkSplitTreeBMT(5000, t) }
   423  func BenchmarkSplitTreeBMT_4(t *testing.B)  { benchmarkSplitTreeBMT(10000, t) }
   424  func BenchmarkSplitTreeBMT_4h(t *testing.B) { benchmarkSplitTreeBMT(50000, t) }
   425  func BenchmarkSplitTreeBMT_5(t *testing.B)  { benchmarkSplitTreeBMT(100000, t) }
   426  func BenchmarkSplitTreeBMT_6(t *testing.B)  { benchmarkSplitTreeBMT(1000000, t) }
   427  func BenchmarkSplitTreeBMT_7(t *testing.B)  { benchmarkSplitTreeBMT(10000000, t) }
   428  
   429  // func BenchmarkSplitTreeBMT_8(t *testing.B)  { benchmarkSplitTreeBMT(100000000, t) }
   430  
   431  func BenchmarkSplitPyramidSHA3_2(t *testing.B)  { benchmarkSplitPyramidSHA3(100, t) }
   432  func BenchmarkSplitPyramidSHA3_2h(t *testing.B) { benchmarkSplitPyramidSHA3(500, t) }
   433  func BenchmarkSplitPyramidSHA3_3(t *testing.B)  { benchmarkSplitPyramidSHA3(1000, t) }
   434  func BenchmarkSplitPyramidSHA3_3h(t *testing.B) { benchmarkSplitPyramidSHA3(5000, t) }
   435  func BenchmarkSplitPyramidSHA3_4(t *testing.B)  { benchmarkSplitPyramidSHA3(10000, t) }
   436  func BenchmarkSplitPyramidSHA3_4h(t *testing.B) { benchmarkSplitPyramidSHA3(50000, t) }
   437  func BenchmarkSplitPyramidSHA3_5(t *testing.B)  { benchmarkSplitPyramidSHA3(100000, t) }
   438  func BenchmarkSplitPyramidSHA3_6(t *testing.B)  { benchmarkSplitPyramidSHA3(1000000, t) }
   439  func BenchmarkSplitPyramidSHA3_7(t *testing.B)  { benchmarkSplitPyramidSHA3(10000000, t) }
   440  
   441  // func BenchmarkSplitPyramidSHA3_8(t *testing.B)  { benchmarkSplitPyramidSHA3(100000000, t) }
   442  
   443  func BenchmarkSplitPyramidBMT_2(t *testing.B)  { benchmarkSplitPyramidBMT(100, t) }
   444  func BenchmarkSplitPyramidBMT_2h(t *testing.B) { benchmarkSplitPyramidBMT(500, t) }
   445  func BenchmarkSplitPyramidBMT_3(t *testing.B)  { benchmarkSplitPyramidBMT(1000, t) }
   446  func BenchmarkSplitPyramidBMT_3h(t *testing.B) { benchmarkSplitPyramidBMT(5000, t) }
   447  func BenchmarkSplitPyramidBMT_4(t *testing.B)  { benchmarkSplitPyramidBMT(10000, t) }
   448  func BenchmarkSplitPyramidBMT_4h(t *testing.B) { benchmarkSplitPyramidBMT(50000, t) }
   449  func BenchmarkSplitPyramidBMT_5(t *testing.B)  { benchmarkSplitPyramidBMT(100000, t) }
   450  func BenchmarkSplitPyramidBMT_6(t *testing.B)  { benchmarkSplitPyramidBMT(1000000, t) }
   451  func BenchmarkSplitPyramidBMT_7(t *testing.B)  { benchmarkSplitPyramidBMT(10000000, t) }
   452  
   453  // func BenchmarkSplitPyramidBMT_8(t *testing.B)  { benchmarkSplitPyramidBMT(100000000, t) }
   454  
   455  func BenchmarkSplitAppendPyramid_2(t *testing.B)  { benchmarkSplitAppendPyramid(100, 1000, t) }
   456  func BenchmarkSplitAppendPyramid_2h(t *testing.B) { benchmarkSplitAppendPyramid(500, 1000, t) }
   457  func BenchmarkSplitAppendPyramid_3(t *testing.B)  { benchmarkSplitAppendPyramid(1000, 1000, t) }
   458  func BenchmarkSplitAppendPyramid_4(t *testing.B)  { benchmarkSplitAppendPyramid(10000, 1000, t) }
   459  func BenchmarkSplitAppendPyramid_4h(t *testing.B) { benchmarkSplitAppendPyramid(50000, 1000, t) }
   460  func BenchmarkSplitAppendPyramid_5(t *testing.B)  { benchmarkSplitAppendPyramid(1000000, 1000, t) }
   461  func BenchmarkSplitAppendPyramid_6(t *testing.B)  { benchmarkSplitAppendPyramid(1000000, 1000, t) }
   462  func BenchmarkSplitAppendPyramid_7(t *testing.B)  { benchmarkSplitAppendPyramid(10000000, 1000, t) }
   463  
   464  // func BenchmarkAppendPyramid_8(t *testing.B)  { benchmarkAppendPyramid(100000000, 1000, t) }
   465  
   466  // go test -timeout 20m -cpu 4 -bench=./swarm/storage -run no
   467  // If you dont add the timeout argument above .. the benchmark will timeout and dump