github.com/ethereum/go-ethereum@v1.14.3/core/bloombits/scheduler_test.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package bloombits
    18  
    19  import (
    20  	"bytes"
    21  	"math/big"
    22  	"sync"
    23  	"sync/atomic"
    24  	"testing"
    25  )
    26  
    27  // Tests that the scheduler can deduplicate and forward retrieval requests to
    28  // underlying fetchers and serve responses back, irrelevant of the concurrency
    29  // of the requesting clients or serving data fetchers.
    30  func TestSchedulerSingleClientSingleFetcher(t *testing.T) { testScheduler(t, 1, 1, 5000) }
    31  func TestSchedulerSingleClientMultiFetcher(t *testing.T)  { testScheduler(t, 1, 10, 5000) }
    32  func TestSchedulerMultiClientSingleFetcher(t *testing.T)  { testScheduler(t, 10, 1, 5000) }
    33  func TestSchedulerMultiClientMultiFetcher(t *testing.T)   { testScheduler(t, 10, 10, 5000) }
    34  
    35  func testScheduler(t *testing.T, clients int, fetchers int, requests int) {
    36  	t.Parallel()
    37  	f := newScheduler(0)
    38  
    39  	// Create a batch of handler goroutines that respond to bloom bit requests and
    40  	// deliver them to the scheduler.
    41  	var fetchPend sync.WaitGroup
    42  	fetchPend.Add(fetchers)
    43  	defer fetchPend.Wait()
    44  
    45  	fetch := make(chan *request, 16)
    46  	defer close(fetch)
    47  
    48  	var delivered atomic.Uint32
    49  	for i := 0; i < fetchers; i++ {
    50  		go func() {
    51  			defer fetchPend.Done()
    52  
    53  			for req := range fetch {
    54  				delivered.Add(1)
    55  
    56  				f.deliver([]uint64{
    57  					req.section + uint64(requests), // Non-requested data (ensure it doesn't go out of bounds)
    58  					req.section,                    // Requested data
    59  					req.section,                    // Duplicated data (ensure it doesn't double close anything)
    60  				}, [][]byte{
    61  					{},
    62  					new(big.Int).SetUint64(req.section).Bytes(),
    63  					new(big.Int).SetUint64(req.section).Bytes(),
    64  				})
    65  			}
    66  		}()
    67  	}
    68  	// Start a batch of goroutines to concurrently run scheduling tasks
    69  	quit := make(chan struct{})
    70  
    71  	var pend sync.WaitGroup
    72  	pend.Add(clients)
    73  
    74  	for i := 0; i < clients; i++ {
    75  		go func() {
    76  			defer pend.Done()
    77  
    78  			in := make(chan uint64, 16)
    79  			out := make(chan []byte, 16)
    80  
    81  			f.run(in, fetch, out, quit, &pend)
    82  
    83  			go func() {
    84  				for j := 0; j < requests; j++ {
    85  					in <- uint64(j)
    86  				}
    87  				close(in)
    88  			}()
    89  			b := new(big.Int)
    90  			for j := 0; j < requests; j++ {
    91  				bits := <-out
    92  				if want := b.SetUint64(uint64(j)).Bytes(); !bytes.Equal(bits, want) {
    93  					t.Errorf("vector %d: delivered content mismatch: have %x, want %x", j, bits, want)
    94  				}
    95  			}
    96  		}()
    97  	}
    98  	pend.Wait()
    99  
   100  	if have := delivered.Load(); int(have) != requests {
   101  		t.Errorf("request count mismatch: have %v, want %v", have, requests)
   102  	}
   103  }