github.com/codingfuture/orig-energi3@v0.8.4/swarm/storage/localstore/mode_put_test.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package localstore
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"sync"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/ethereum/go-ethereum/swarm/storage"
    27  )
    28  
    29  // TestModePutRequest validates ModePutRequest index values on the provided DB.
    30  func TestModePutRequest(t *testing.T) {
    31  	db, cleanupFunc := newTestDB(t, nil)
    32  	defer cleanupFunc()
    33  
    34  	putter := db.NewPutter(ModePutRequest)
    35  
    36  	chunk := generateRandomChunk()
    37  
    38  	// keep the record when the chunk is stored
    39  	var storeTimestamp int64
    40  
    41  	t.Run("first put", func(t *testing.T) {
    42  		wantTimestamp := time.Now().UTC().UnixNano()
    43  		defer setNow(func() (t int64) {
    44  			return wantTimestamp
    45  		})()
    46  
    47  		storeTimestamp = wantTimestamp
    48  
    49  		err := putter.Put(chunk)
    50  		if err != nil {
    51  			t.Fatal(err)
    52  		}
    53  
    54  		t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, wantTimestamp, wantTimestamp))
    55  
    56  		t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
    57  
    58  		t.Run("gc size", newIndexGCSizeTest(db))
    59  	})
    60  
    61  	t.Run("second put", func(t *testing.T) {
    62  		wantTimestamp := time.Now().UTC().UnixNano()
    63  		defer setNow(func() (t int64) {
    64  			return wantTimestamp
    65  		})()
    66  
    67  		err := putter.Put(chunk)
    68  		if err != nil {
    69  			t.Fatal(err)
    70  		}
    71  
    72  		t.Run("retrieve indexes", newRetrieveIndexesTestWithAccess(db, chunk, storeTimestamp, wantTimestamp))
    73  
    74  		t.Run("gc index count", newItemsCountTest(db.gcIndex, 1))
    75  
    76  		t.Run("gc size", newIndexGCSizeTest(db))
    77  	})
    78  }
    79  
    80  // TestModePutSync validates ModePutSync index values on the provided DB.
    81  func TestModePutSync(t *testing.T) {
    82  	db, cleanupFunc := newTestDB(t, nil)
    83  	defer cleanupFunc()
    84  
    85  	wantTimestamp := time.Now().UTC().UnixNano()
    86  	defer setNow(func() (t int64) {
    87  		return wantTimestamp
    88  	})()
    89  
    90  	chunk := generateRandomChunk()
    91  
    92  	err := db.NewPutter(ModePutSync).Put(chunk)
    93  	if err != nil {
    94  		t.Fatal(err)
    95  	}
    96  
    97  	t.Run("retrieve indexes", newRetrieveIndexesTest(db, chunk, wantTimestamp, 0))
    98  
    99  	t.Run("pull index", newPullIndexTest(db, chunk, wantTimestamp, nil))
   100  }
   101  
   102  // TestModePutUpload validates ModePutUpload index values on the provided DB.
   103  func TestModePutUpload(t *testing.T) {
   104  	db, cleanupFunc := newTestDB(t, nil)
   105  	defer cleanupFunc()
   106  
   107  	wantTimestamp := time.Now().UTC().UnixNano()
   108  	defer setNow(func() (t int64) {
   109  		return wantTimestamp
   110  	})()
   111  
   112  	chunk := generateRandomChunk()
   113  
   114  	err := db.NewPutter(ModePutUpload).Put(chunk)
   115  	if err != nil {
   116  		t.Fatal(err)
   117  	}
   118  
   119  	t.Run("retrieve indexes", newRetrieveIndexesTest(db, chunk, wantTimestamp, 0))
   120  
   121  	t.Run("pull index", newPullIndexTest(db, chunk, wantTimestamp, nil))
   122  
   123  	t.Run("push index", newPushIndexTest(db, chunk, wantTimestamp, nil))
   124  }
   125  
   126  // TestModePutUpload_parallel uploads chunks in parallel
   127  // and validates if all chunks can be retrieved with correct data.
   128  func TestModePutUpload_parallel(t *testing.T) {
   129  	db, cleanupFunc := newTestDB(t, nil)
   130  	defer cleanupFunc()
   131  
   132  	chunkCount := 1000
   133  	workerCount := 100
   134  
   135  	chunkChan := make(chan storage.Chunk)
   136  	errChan := make(chan error)
   137  	doneChan := make(chan struct{})
   138  	defer close(doneChan)
   139  
   140  	// start uploader workers
   141  	for i := 0; i < workerCount; i++ {
   142  		go func(i int) {
   143  			uploader := db.NewPutter(ModePutUpload)
   144  			for {
   145  				select {
   146  				case chunk, ok := <-chunkChan:
   147  					if !ok {
   148  						return
   149  					}
   150  					err := uploader.Put(chunk)
   151  					select {
   152  					case errChan <- err:
   153  					case <-doneChan:
   154  					}
   155  				case <-doneChan:
   156  					return
   157  				}
   158  			}
   159  		}(i)
   160  	}
   161  
   162  	chunks := make([]storage.Chunk, 0)
   163  	var chunksMu sync.Mutex
   164  
   165  	// send chunks to workers
   166  	go func() {
   167  		for i := 0; i < chunkCount; i++ {
   168  			chunk := generateRandomChunk()
   169  			select {
   170  			case chunkChan <- chunk:
   171  			case <-doneChan:
   172  				return
   173  			}
   174  			chunksMu.Lock()
   175  			chunks = append(chunks, chunk)
   176  			chunksMu.Unlock()
   177  		}
   178  
   179  		close(chunkChan)
   180  	}()
   181  
   182  	// validate every error from workers
   183  	for i := 0; i < chunkCount; i++ {
   184  		err := <-errChan
   185  		if err != nil {
   186  			t.Fatal(err)
   187  		}
   188  	}
   189  
   190  	// get every chunk and validate its data
   191  	getter := db.NewGetter(ModeGetRequest)
   192  
   193  	chunksMu.Lock()
   194  	defer chunksMu.Unlock()
   195  	for _, chunk := range chunks {
   196  		got, err := getter.Get(chunk.Address())
   197  		if err != nil {
   198  			t.Fatal(err)
   199  		}
   200  		if !bytes.Equal(got.Data(), chunk.Data()) {
   201  			t.Fatalf("got chunk %s data %x, want %x", chunk.Address().Hex(), got.Data(), chunk.Data())
   202  		}
   203  	}
   204  }
   205  
   206  // BenchmarkPutUpload runs a series of benchmarks that upload
   207  // a specific number of chunks in parallel.
   208  //
   209  // Measurements on MacBook Pro (Retina, 15-inch, Mid 2014)
   210  //
   211  // # go test -benchmem -run=none github.com/ethereum/go-ethereum/swarm/storage/localstore -bench BenchmarkPutUpload -v
   212  //
   213  // goos: darwin
   214  // goarch: amd64
   215  // pkg: github.com/ethereum/go-ethereum/swarm/storage/localstore
   216  // BenchmarkPutUpload/count_100_parallel_1-8         	     300	   5107704 ns/op	 2081461 B/op	    2374 allocs/op
   217  // BenchmarkPutUpload/count_100_parallel_2-8         	     300	   5411742 ns/op	 2081608 B/op	    2364 allocs/op
   218  // BenchmarkPutUpload/count_100_parallel_4-8         	     500	   3704964 ns/op	 2081696 B/op	    2324 allocs/op
   219  // BenchmarkPutUpload/count_100_parallel_8-8         	     500	   2932663 ns/op	 2082594 B/op	    2295 allocs/op
   220  // BenchmarkPutUpload/count_100_parallel_16-8        	     500	   3117157 ns/op	 2085438 B/op	    2282 allocs/op
   221  // BenchmarkPutUpload/count_100_parallel_32-8        	     500	   3449122 ns/op	 2089721 B/op	    2286 allocs/op
   222  // BenchmarkPutUpload/count_1000_parallel_1-8        	      20	  79784470 ns/op	25211240 B/op	   23225 allocs/op
   223  // BenchmarkPutUpload/count_1000_parallel_2-8        	      20	  75422164 ns/op	25210730 B/op	   23187 allocs/op
   224  // BenchmarkPutUpload/count_1000_parallel_4-8        	      20	  70698378 ns/op	25206522 B/op	   22692 allocs/op
   225  // BenchmarkPutUpload/count_1000_parallel_8-8        	      20	  71285528 ns/op	25213436 B/op	   22345 allocs/op
   226  // BenchmarkPutUpload/count_1000_parallel_16-8       	      20	  71301826 ns/op	25205040 B/op	   22090 allocs/op
   227  // BenchmarkPutUpload/count_1000_parallel_32-8       	      30	  57713506 ns/op	25219781 B/op	   21848 allocs/op
   228  // BenchmarkPutUpload/count_10000_parallel_1-8       	       2	 656719345 ns/op	216792908 B/op	  248940 allocs/op
   229  // BenchmarkPutUpload/count_10000_parallel_2-8       	       2	 646301962 ns/op	216730800 B/op	  248270 allocs/op
   230  // BenchmarkPutUpload/count_10000_parallel_4-8       	       2	 532784228 ns/op	216667080 B/op	  241910 allocs/op
   231  // BenchmarkPutUpload/count_10000_parallel_8-8       	       3	 494290188 ns/op	216297749 B/op	  236247 allocs/op
   232  // BenchmarkPutUpload/count_10000_parallel_16-8      	       3	 483485315 ns/op	216060384 B/op	  231090 allocs/op
   233  // BenchmarkPutUpload/count_10000_parallel_32-8      	       3	 434461294 ns/op	215371280 B/op	  224800 allocs/op
   234  // BenchmarkPutUpload/count_100000_parallel_1-8      	       1	22767894338 ns/op	2331372088 B/op	 4049876 allocs/op
   235  // BenchmarkPutUpload/count_100000_parallel_2-8      	       1	25347872677 ns/op	2344140160 B/op	 4106763 allocs/op
   236  // BenchmarkPutUpload/count_100000_parallel_4-8      	       1	23580460174 ns/op	2338582576 B/op	 4027452 allocs/op
   237  // BenchmarkPutUpload/count_100000_parallel_8-8      	       1	22197559193 ns/op	2321803496 B/op	 3877553 allocs/op
   238  // BenchmarkPutUpload/count_100000_parallel_16-8     	       1	22527046476 ns/op	2327854800 B/op	 3885455 allocs/op
   239  // BenchmarkPutUpload/count_100000_parallel_32-8     	       1	21332243613 ns/op	2299654568 B/op	 3697181 allocs/op
   240  // PASS
   241  func BenchmarkPutUpload(b *testing.B) {
   242  	for _, count := range []int{
   243  		100,
   244  		1000,
   245  		10000,
   246  		100000,
   247  	} {
   248  		for _, maxParallelUploads := range []int{
   249  			1,
   250  			2,
   251  			4,
   252  			8,
   253  			16,
   254  			32,
   255  		} {
   256  			name := fmt.Sprintf("count %v parallel %v", count, maxParallelUploads)
   257  			b.Run(name, func(b *testing.B) {
   258  				for n := 0; n < b.N; n++ {
   259  					benchmarkPutUpload(b, nil, count, maxParallelUploads)
   260  				}
   261  			})
   262  		}
   263  	}
   264  }
   265  
   266  // benchmarkPutUpload runs a benchmark by uploading a specific number
   267  // of chunks with specified max parallel uploads.
   268  func benchmarkPutUpload(b *testing.B, o *Options, count, maxParallelUploads int) {
   269  	b.StopTimer()
   270  	db, cleanupFunc := newTestDB(b, o)
   271  	defer cleanupFunc()
   272  
   273  	uploader := db.NewPutter(ModePutUpload)
   274  	chunks := make([]storage.Chunk, count)
   275  	for i := 0; i < count; i++ {
   276  		chunks[i] = generateFakeRandomChunk()
   277  	}
   278  	errs := make(chan error)
   279  	b.StartTimer()
   280  
   281  	go func() {
   282  		sem := make(chan struct{}, maxParallelUploads)
   283  		for i := 0; i < count; i++ {
   284  			sem <- struct{}{}
   285  
   286  			go func(i int) {
   287  				defer func() { <-sem }()
   288  
   289  				errs <- uploader.Put(chunks[i])
   290  			}(i)
   291  		}
   292  	}()
   293  
   294  	for i := 0; i < count; i++ {
   295  		err := <-errs
   296  		if err != nil {
   297  			b.Fatal(err)
   298  		}
   299  	}
   300  }