github.com/aquanetwork/aquachain@v1.7.8/core/bloombits/scheduler_test.go (about) 1 // Copyright 2017 The aquachain Authors 2 // This file is part of the aquachain library. 3 // 4 // The aquachain library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The aquachain library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the aquachain library. If not, see <http://www.gnu.org/licenses/>. 16 17 package bloombits 18 19 import ( 20 "bytes" 21 "math/big" 22 "math/rand" 23 "sync" 24 "sync/atomic" 25 "testing" 26 "time" 27 ) 28 29 // Tests that the scheduler can deduplicate and forward retrieval requests to 30 // underlying fetchers and serve responses back, irrelevant of the concurrency 31 // of the requesting clients or serving data fetchers. 32 func TestSchedulerSingleClientSingleFetcher(t *testing.T) { testScheduler(t, 1, 1, 5000) } 33 func TestSchedulerSingleClientMultiFetcher(t *testing.T) { testScheduler(t, 1, 10, 5000) } 34 func TestSchedulerMultiClientSingleFetcher(t *testing.T) { testScheduler(t, 10, 1, 5000) } 35 func TestSchedulerMultiClientMultiFetcher(t *testing.T) { testScheduler(t, 10, 10, 5000) } 36 37 func testScheduler(t *testing.T, clients int, fetchers int, requests int) { 38 f := newScheduler(0) 39 40 // Create a batch of handler goroutines that respond to bloom bit requests and 41 // deliver them to the scheduler. 42 var fetchPend sync.WaitGroup 43 fetchPend.Add(fetchers) 44 defer fetchPend.Wait() 45 46 fetch := make(chan *request, 16) 47 defer close(fetch) 48 49 var delivered uint32 50 for i := 0; i < fetchers; i++ { 51 go func() { 52 defer fetchPend.Done() 53 54 for req := range fetch { 55 time.Sleep(time.Duration(rand.Intn(int(100 * time.Microsecond)))) 56 atomic.AddUint32(&delivered, 1) 57 58 f.deliver([]uint64{ 59 req.section + uint64(requests), // Non-requested data (ensure it doesn't go out of bounds) 60 req.section, // Requested data 61 req.section, // Duplicated data (ensure it doesn't double close anything) 62 }, [][]byte{ 63 {}, 64 new(big.Int).SetUint64(req.section).Bytes(), 65 new(big.Int).SetUint64(req.section).Bytes(), 66 }) 67 } 68 }() 69 } 70 // Start a batch of goroutines to concurrently run scheduling tasks 71 quit := make(chan struct{}) 72 73 var pend sync.WaitGroup 74 pend.Add(clients) 75 76 for i := 0; i < clients; i++ { 77 go func() { 78 defer pend.Done() 79 80 in := make(chan uint64, 16) 81 out := make(chan []byte, 16) 82 83 f.run(in, fetch, out, quit, &pend) 84 85 go func() { 86 for j := 0; j < requests; j++ { 87 in <- uint64(j) 88 } 89 close(in) 90 }() 91 92 for j := 0; j < requests; j++ { 93 bits := <-out 94 if want := new(big.Int).SetUint64(uint64(j)).Bytes(); !bytes.Equal(bits, want) { 95 t.Errorf("vector %d: delivered content mismatch: have %x, want %x", j, bits, want) 96 } 97 } 98 }() 99 } 100 pend.Wait() 101 102 if have := atomic.LoadUint32(&delivered); int(have) != requests { 103 t.Errorf("request count mismatch: have %v, want %v", have, requests) 104 } 105 }