github.com/klaytn/klaytn@v1.12.1/blockchain/bloombits/scheduler_test.go (about)

     1  // Modifications Copyright 2018 The klaytn Authors
     2  // Copyright 2017 The go-ethereum Authors
     3  // This file is part of the go-ethereum library.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from core/bloombits/scheduler_test.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package bloombits
    22  
    23  import (
    24  	"bytes"
    25  	"math/big"
    26  	"math/rand"
    27  	"sync"
    28  	"sync/atomic"
    29  	"testing"
    30  	"time"
    31  )
    32  
    33  // Tests that the scheduler can deduplicate and forward retrieval requests to
    34  // underlying fetchers and serve responses back, irrelevant of the concurrency
    35  // of the requesting clients or serving data fetchers.
    36  func TestSchedulerSingleClientSingleFetcher(t *testing.T) { testScheduler(t, 1, 1, 5000) }
    37  func TestSchedulerSingleClientMultiFetcher(t *testing.T)  { testScheduler(t, 1, 10, 5000) }
    38  func TestSchedulerMultiClientSingleFetcher(t *testing.T)  { testScheduler(t, 10, 1, 5000) }
    39  func TestSchedulerMultiClientMultiFetcher(t *testing.T)   { testScheduler(t, 10, 10, 5000) }
    40  
    41  func testScheduler(t *testing.T, clients int, fetchers int, requests int) {
    42  	f := newScheduler(0)
    43  
    44  	// Create a batch of handler goroutines that respond to bloom bit requests and
    45  	// deliver them to the scheduler.
    46  	var fetchPend sync.WaitGroup
    47  	fetchPend.Add(fetchers)
    48  	defer fetchPend.Wait()
    49  
    50  	fetch := make(chan *request, 16)
    51  	defer close(fetch)
    52  
    53  	var delivered uint32
    54  	for i := 0; i < fetchers; i++ {
    55  		go func() {
    56  			defer fetchPend.Done()
    57  
    58  			for req := range fetch {
    59  				time.Sleep(time.Duration(rand.Intn(int(100 * time.Microsecond))))
    60  				atomic.AddUint32(&delivered, 1)
    61  
    62  				f.deliver([]uint64{
    63  					req.section + uint64(requests), // Non-requested data (ensure it doesn't go out of bounds)
    64  					req.section,                    // Requested data
    65  					req.section,                    // Duplicated data (ensure it doesn't double close anything)
    66  				}, [][]byte{
    67  					{},
    68  					new(big.Int).SetUint64(req.section).Bytes(),
    69  					new(big.Int).SetUint64(req.section).Bytes(),
    70  				})
    71  			}
    72  		}()
    73  	}
    74  	// Start a batch of goroutines to concurrently run scheduling tasks
    75  	quit := make(chan struct{})
    76  
    77  	var pend sync.WaitGroup
    78  	pend.Add(clients)
    79  
    80  	for i := 0; i < clients; i++ {
    81  		go func() {
    82  			defer pend.Done()
    83  
    84  			in := make(chan uint64, 16)
    85  			out := make(chan []byte, 16)
    86  
    87  			f.run(in, fetch, out, quit, &pend)
    88  
    89  			go func() {
    90  				for j := 0; j < requests; j++ {
    91  					in <- uint64(j)
    92  				}
    93  				close(in)
    94  			}()
    95  
    96  			for j := 0; j < requests; j++ {
    97  				bits := <-out
    98  				if want := new(big.Int).SetUint64(uint64(j)).Bytes(); !bytes.Equal(bits, want) {
    99  					t.Errorf("vector %d: delivered content mismatch: have %x, want %x", j, bits, want)
   100  				}
   101  			}
   102  		}()
   103  	}
   104  	pend.Wait()
   105  
   106  	if have := atomic.LoadUint32(&delivered); int(have) != requests {
   107  		t.Errorf("request count mismatch: have %v, want %v", have, requests)
   108  	}
   109  }