github.com/murrekatt/go-ethereum@v1.5.8-0.20170123175102-fc52f2c007fb/swarm/storage/chunker_test.go (about)

     1  // Copyright 2016 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package storage
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/rand"
    22  	"encoding/binary"
    23  	"fmt"
    24  	"io"
    25  	"runtime"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  )
    30  
    31  /*
    32  Tests TreeChunker by splitting and joining a random byte slice
    33  */
    34  
    35  type test interface {
    36  	Fatalf(string, ...interface{})
    37  	Logf(string, ...interface{})
    38  }
    39  
    40  type chunkerTester struct {
    41  	inputs map[uint64][]byte
    42  	chunks map[string]*Chunk
    43  	t      test
    44  }
    45  
    46  func (self *chunkerTester) checkChunks(t *testing.T, want int) {
    47  	l := len(self.chunks)
    48  	if l != want {
    49  		t.Errorf("expected %v chunks, got %v", want, l)
    50  	}
    51  }
    52  
    53  func (self *chunkerTester) Split(chunker Splitter, data io.Reader, size int64, chunkC chan *Chunk, swg *sync.WaitGroup, expectedError error) (key Key) {
    54  	// reset
    55  	self.chunks = make(map[string]*Chunk)
    56  
    57  	if self.inputs == nil {
    58  		self.inputs = make(map[uint64][]byte)
    59  	}
    60  
    61  	quitC := make(chan bool)
    62  	timeout := time.After(600 * time.Second)
    63  	if chunkC != nil {
    64  		go func() {
    65  			for {
    66  				select {
    67  				case <-timeout:
    68  					self.t.Fatalf("Join timeout error")
    69  				case <-quitC:
    70  					return
    71  				case chunk := <-chunkC:
    72  					// self.chunks = append(self.chunks, chunk)
    73  					self.chunks[chunk.Key.String()] = chunk
    74  					if chunk.wg != nil {
    75  						chunk.wg.Done()
    76  					}
    77  				}
    78  			}
    79  		}()
    80  	}
    81  	key, err := chunker.Split(data, size, chunkC, swg, nil)
    82  	if err != nil && expectedError == nil {
    83  		self.t.Fatalf("Split error: %v", err)
    84  	} else if expectedError != nil && (err == nil || err.Error() != expectedError.Error()) {
    85  		self.t.Fatalf("Not receiving the correct error! Expected %v, received %v", expectedError, err)
    86  	}
    87  	if chunkC != nil {
    88  		if swg != nil {
    89  			swg.Wait()
    90  		}
    91  		close(quitC)
    92  	}
    93  	return
    94  }
    95  
    96  func (self *chunkerTester) Join(chunker Chunker, key Key, c int, chunkC chan *Chunk, quitC chan bool) LazySectionReader {
    97  	// reset but not the chunks
    98  
    99  	reader := chunker.Join(key, chunkC)
   100  
   101  	timeout := time.After(600 * time.Second)
   102  	i := 0
   103  	go func() {
   104  		for {
   105  			select {
   106  			case <-timeout:
   107  				self.t.Fatalf("Join timeout error")
   108  
   109  			case chunk, ok := <-chunkC:
   110  				if !ok {
   111  					close(quitC)
   112  					return
   113  				}
   114  				// this just mocks the behaviour of a chunk store retrieval
   115  				stored, success := self.chunks[chunk.Key.String()]
   116  				if !success {
   117  					self.t.Fatalf("not found")
   118  					return
   119  				}
   120  				chunk.SData = stored.SData
   121  				chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8]))
   122  				close(chunk.C)
   123  				i++
   124  			}
   125  		}
   126  	}()
   127  	return reader
   128  }
   129  
   130  func testRandomBrokenData(splitter Splitter, n int, tester *chunkerTester) {
   131  	data := io.LimitReader(rand.Reader, int64(n))
   132  	brokendata := brokenLimitReader(data, n, n/2)
   133  
   134  	buf := make([]byte, n)
   135  	_, err := brokendata.Read(buf)
   136  	if err == nil || err.Error() != "Broken reader" {
   137  		tester.t.Fatalf("Broken reader is not broken, hence broken. Returns: %v", err)
   138  	}
   139  
   140  	data = io.LimitReader(rand.Reader, int64(n))
   141  	brokendata = brokenLimitReader(data, n, n/2)
   142  
   143  	chunkC := make(chan *Chunk, 1000)
   144  	swg := &sync.WaitGroup{}
   145  
   146  	key := tester.Split(splitter, brokendata, int64(n), chunkC, swg, fmt.Errorf("Broken reader"))
   147  	tester.t.Logf(" Key = %v\n", key)
   148  }
   149  
   150  func testRandomData(splitter Splitter, n int, tester *chunkerTester) {
   151  	if tester.inputs == nil {
   152  		tester.inputs = make(map[uint64][]byte)
   153  	}
   154  	input, found := tester.inputs[uint64(n)]
   155  	var data io.Reader
   156  	if !found {
   157  		data, input = testDataReaderAndSlice(n)
   158  		tester.inputs[uint64(n)] = input
   159  	} else {
   160  		data = io.LimitReader(bytes.NewReader(input), int64(n))
   161  	}
   162  
   163  	chunkC := make(chan *Chunk, 1000)
   164  	swg := &sync.WaitGroup{}
   165  
   166  	key := tester.Split(splitter, data, int64(n), chunkC, swg, nil)
   167  	tester.t.Logf(" Key = %v\n", key)
   168  
   169  	chunkC = make(chan *Chunk, 1000)
   170  	quitC := make(chan bool)
   171  
   172  	chunker := NewTreeChunker(NewChunkerParams())
   173  	reader := tester.Join(chunker, key, 0, chunkC, quitC)
   174  	output := make([]byte, n)
   175  	r, err := reader.Read(output)
   176  	if r != n || err != io.EOF {
   177  		tester.t.Fatalf("read error  read: %v  n = %v  err = %v\n", r, n, err)
   178  	}
   179  	if input != nil {
   180  		if !bytes.Equal(output, input) {
   181  			tester.t.Fatalf("input and output mismatch\n IN: %v\nOUT: %v\n", input, output)
   182  		}
   183  	}
   184  	close(chunkC)
   185  	<-quitC
   186  }
   187  
   188  func TestRandomData(t *testing.T) {
   189  	// sizes := []int{123456}
   190  	sizes := []int{1, 60, 83, 179, 253, 1024, 4095, 4096, 4097, 8191, 8192, 8193, 123456, 2345678}
   191  	tester := &chunkerTester{t: t}
   192  	chunker := NewTreeChunker(NewChunkerParams())
   193  	for _, s := range sizes {
   194  		testRandomData(chunker, s, tester)
   195  	}
   196  	pyramid := NewPyramidChunker(NewChunkerParams())
   197  	for _, s := range sizes {
   198  		testRandomData(pyramid, s, tester)
   199  	}
   200  }
   201  
   202  func TestRandomBrokenData(t *testing.T) {
   203  	sizes := []int{1, 60, 83, 179, 253, 1024, 4095, 4096, 4097, 8191, 8192, 8193, 123456, 2345678}
   204  	tester := &chunkerTester{t: t}
   205  	chunker := NewTreeChunker(NewChunkerParams())
   206  	for _, s := range sizes {
   207  		testRandomBrokenData(chunker, s, tester)
   208  		t.Logf("done size: %v", s)
   209  	}
   210  }
   211  
   212  func readAll(reader LazySectionReader, result []byte) {
   213  	size := int64(len(result))
   214  
   215  	var end int64
   216  	for pos := int64(0); pos < size; pos += 1000 {
   217  		if pos+1000 > size {
   218  			end = size
   219  		} else {
   220  			end = pos + 1000
   221  		}
   222  		reader.ReadAt(result[pos:end], pos)
   223  	}
   224  }
   225  
   226  func benchReadAll(reader LazySectionReader) {
   227  	size, _ := reader.Size(nil)
   228  	output := make([]byte, 1000)
   229  	for pos := int64(0); pos < size; pos += 1000 {
   230  		reader.ReadAt(output, pos)
   231  	}
   232  }
   233  
   234  func benchmarkJoin(n int, t *testing.B) {
   235  	t.ReportAllocs()
   236  	for i := 0; i < t.N; i++ {
   237  		chunker := NewTreeChunker(NewChunkerParams())
   238  		tester := &chunkerTester{t: t}
   239  		data := testDataReader(n)
   240  
   241  		chunkC := make(chan *Chunk, 1000)
   242  		swg := &sync.WaitGroup{}
   243  
   244  		key := tester.Split(chunker, data, int64(n), chunkC, swg, nil)
   245  		// t.StartTimer()
   246  		chunkC = make(chan *Chunk, 1000)
   247  		quitC := make(chan bool)
   248  		reader := tester.Join(chunker, key, i, chunkC, quitC)
   249  		benchReadAll(reader)
   250  		close(chunkC)
   251  		<-quitC
   252  		// t.StopTimer()
   253  	}
   254  	stats := new(runtime.MemStats)
   255  	runtime.ReadMemStats(stats)
   256  	fmt.Println(stats.Sys)
   257  }
   258  
   259  func benchmarkSplitTree(n int, t *testing.B) {
   260  	t.ReportAllocs()
   261  	for i := 0; i < t.N; i++ {
   262  		chunker := NewTreeChunker(NewChunkerParams())
   263  		tester := &chunkerTester{t: t}
   264  		data := testDataReader(n)
   265  		tester.Split(chunker, data, int64(n), nil, nil, nil)
   266  	}
   267  	stats := new(runtime.MemStats)
   268  	runtime.ReadMemStats(stats)
   269  	fmt.Println(stats.Sys)
   270  }
   271  
   272  func benchmarkSplitPyramid(n int, t *testing.B) {
   273  	t.ReportAllocs()
   274  	for i := 0; i < t.N; i++ {
   275  		splitter := NewPyramidChunker(NewChunkerParams())
   276  		tester := &chunkerTester{t: t}
   277  		data := testDataReader(n)
   278  		tester.Split(splitter, data, int64(n), nil, nil, nil)
   279  	}
   280  	stats := new(runtime.MemStats)
   281  	runtime.ReadMemStats(stats)
   282  	fmt.Println(stats.Sys)
   283  }
   284  
   285  func BenchmarkJoin_2(t *testing.B) { benchmarkJoin(100, t) }
   286  func BenchmarkJoin_3(t *testing.B) { benchmarkJoin(1000, t) }
   287  func BenchmarkJoin_4(t *testing.B) { benchmarkJoin(10000, t) }
   288  func BenchmarkJoin_5(t *testing.B) { benchmarkJoin(100000, t) }
   289  func BenchmarkJoin_6(t *testing.B) { benchmarkJoin(1000000, t) }
   290  func BenchmarkJoin_7(t *testing.B) { benchmarkJoin(10000000, t) }
   291  func BenchmarkJoin_8(t *testing.B) { benchmarkJoin(100000000, t) }
   292  
   293  func BenchmarkSplitTree_2(t *testing.B)  { benchmarkSplitTree(100, t) }
   294  func BenchmarkSplitTree_2h(t *testing.B) { benchmarkSplitTree(500, t) }
   295  func BenchmarkSplitTree_3(t *testing.B)  { benchmarkSplitTree(1000, t) }
   296  func BenchmarkSplitTree_3h(t *testing.B) { benchmarkSplitTree(5000, t) }
   297  func BenchmarkSplitTree_4(t *testing.B)  { benchmarkSplitTree(10000, t) }
   298  func BenchmarkSplitTree_4h(t *testing.B) { benchmarkSplitTree(50000, t) }
   299  func BenchmarkSplitTree_5(t *testing.B)  { benchmarkSplitTree(100000, t) }
   300  func BenchmarkSplitTree_6(t *testing.B)  { benchmarkSplitTree(1000000, t) }
   301  func BenchmarkSplitTree_7(t *testing.B)  { benchmarkSplitTree(10000000, t) }
   302  func BenchmarkSplitTree_8(t *testing.B)  { benchmarkSplitTree(100000000, t) }
   303  
   304  func BenchmarkSplitPyramid_2(t *testing.B)  { benchmarkSplitPyramid(100, t) }
   305  func BenchmarkSplitPyramid_2h(t *testing.B) { benchmarkSplitPyramid(500, t) }
   306  func BenchmarkSplitPyramid_3(t *testing.B)  { benchmarkSplitPyramid(1000, t) }
   307  func BenchmarkSplitPyramid_3h(t *testing.B) { benchmarkSplitPyramid(5000, t) }
   308  func BenchmarkSplitPyramid_4(t *testing.B)  { benchmarkSplitPyramid(10000, t) }
   309  func BenchmarkSplitPyramid_4h(t *testing.B) { benchmarkSplitPyramid(50000, t) }
   310  func BenchmarkSplitPyramid_5(t *testing.B)  { benchmarkSplitPyramid(100000, t) }
   311  func BenchmarkSplitPyramid_6(t *testing.B)  { benchmarkSplitPyramid(1000000, t) }
   312  func BenchmarkSplitPyramid_7(t *testing.B)  { benchmarkSplitPyramid(10000000, t) }
   313  func BenchmarkSplitPyramid_8(t *testing.B)  { benchmarkSplitPyramid(100000000, t) }
   314  
   315  // godep go test -bench ./swarm/storage -cpuprofile cpu.out -memprofile mem.out