github.com/neatio-net/neatio@v1.7.3-0.20231114194659-f4d7a2226baa/chain/core/bloombits/scheduler.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package bloombits
    18  
    19  import (
    20  	"sync"
    21  )
    22  
    23  // request represents a bloom retrieval task to prioritize and pull from the local
    24  // database or remotely from the network.
    25  type request struct {
    26  	section uint64 // Section index to retrieve the a bit-vector from
    27  	bit     uint   // Bit index within the section to retrieve the vector of
    28  }
    29  
    30  // response represents the state of a requested bit-vector through a scheduler.
    31  type response struct {
    32  	cached []byte        // Cached bits to dedup multiple requests
    33  	done   chan struct{} // Channel to allow waiting for completion
    34  }
    35  
    36  // scheduler handles the scheduling of bloom-filter retrieval operations for
    37  // entire section-batches belonging to a single bloom bit. Beside scheduling the
    38  // retrieval operations, this struct also deduplicates the requests and caches
    39  // the results to minimize network/database overhead even in complex filtering
    40  // scenarios.
    41  type scheduler struct {
    42  	bit       uint                 // Index of the bit in the bloom filter this scheduler is responsible for
    43  	responses map[uint64]*response // Currently pending retrieval requests or already cached responses
    44  	lock      sync.Mutex           // Lock protecting the responses from concurrent access
    45  }
    46  
    47  // newScheduler creates a new bloom-filter retrieval scheduler for a specific
    48  // bit index.
    49  func newScheduler(idx uint) *scheduler {
    50  	return &scheduler{
    51  		bit:       idx,
    52  		responses: make(map[uint64]*response),
    53  	}
    54  }
    55  
    56  // run creates a retrieval pipeline, receiving section indexes from sections and
    57  // returning the results in the same order through the done channel. Concurrent
    58  // runs of the same scheduler are allowed, leading to retrieval task deduplication.
    59  func (s *scheduler) run(sections chan uint64, dist chan *request, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
    60  	// Create a forwarder channel between requests and responses of the same size as
    61  	// the distribution channel (since that will block the pipeline anyway).
    62  	pend := make(chan uint64, cap(dist))
    63  
    64  	// Start the pipeline schedulers to forward between user -> distributor -> user
    65  	wg.Add(2)
    66  	go s.scheduleRequests(sections, dist, pend, quit, wg)
    67  	go s.scheduleDeliveries(pend, done, quit, wg)
    68  }
    69  
    70  // reset cleans up any leftovers from previous runs. This is required before a
    71  // restart to ensure the no previously requested but never delivered state will
    72  // cause a lockup.
    73  func (s *scheduler) reset() {
    74  	s.lock.Lock()
    75  	defer s.lock.Unlock()
    76  
    77  	for section, res := range s.responses {
    78  		if res.cached == nil {
    79  			delete(s.responses, section)
    80  		}
    81  	}
    82  }
    83  
    84  // scheduleRequests reads section retrieval requests from the input channel,
    85  // deduplicates the stream and pushes unique retrieval tasks into the distribution
    86  // channel for a database or network layer to honour.
    87  func (s *scheduler) scheduleRequests(reqs chan uint64, dist chan *request, pend chan uint64, quit chan struct{}, wg *sync.WaitGroup) {
    88  	// Clean up the goroutine and pipeline when done
    89  	defer wg.Done()
    90  	defer close(pend)
    91  
    92  	// Keep reading and scheduling section requests
    93  	for {
    94  		select {
    95  		case <-quit:
    96  			return
    97  
    98  		case section, ok := <-reqs:
    99  			// New section retrieval requested
   100  			if !ok {
   101  				return
   102  			}
   103  			// Deduplicate retrieval requests
   104  			unique := false
   105  
   106  			s.lock.Lock()
   107  			if s.responses[section] == nil {
   108  				s.responses[section] = &response{
   109  					done: make(chan struct{}),
   110  				}
   111  				unique = true
   112  			}
   113  			s.lock.Unlock()
   114  
   115  			// Schedule the section for retrieval and notify the deliverer to expect this section
   116  			if unique {
   117  				select {
   118  				case <-quit:
   119  					return
   120  				case dist <- &request{bit: s.bit, section: section}:
   121  				}
   122  			}
   123  			select {
   124  			case <-quit:
   125  				return
   126  			case pend <- section:
   127  			}
   128  		}
   129  	}
   130  }
   131  
   132  // scheduleDeliveries reads section acceptance notifications and waits for them
   133  // to be delivered, pushing them into the output data buffer.
   134  func (s *scheduler) scheduleDeliveries(pend chan uint64, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
   135  	// Clean up the goroutine and pipeline when done
   136  	defer wg.Done()
   137  	defer close(done)
   138  
   139  	// Keep reading notifications and scheduling deliveries
   140  	for {
   141  		select {
   142  		case <-quit:
   143  			return
   144  
   145  		case idx, ok := <-pend:
   146  			// New section retrieval pending
   147  			if !ok {
   148  				return
   149  			}
   150  			// Wait until the request is honoured
   151  			s.lock.Lock()
   152  			res := s.responses[idx]
   153  			s.lock.Unlock()
   154  
   155  			select {
   156  			case <-quit:
   157  				return
   158  			case <-res.done:
   159  			}
   160  			// Deliver the result
   161  			select {
   162  			case <-quit:
   163  				return
   164  			case done <- res.cached:
   165  			}
   166  		}
   167  	}
   168  }
   169  
   170  // deliver is called by the request distributor when a reply to a request arrives.
   171  func (s *scheduler) deliver(sections []uint64, data [][]byte) {
   172  	s.lock.Lock()
   173  	defer s.lock.Unlock()
   174  
   175  	for i, section := range sections {
   176  		if res := s.responses[section]; res != nil && res.cached == nil { // Avoid non-requests and double deliveries
   177  			res.cached = data[i]
   178  			close(res.done)
   179  		}
   180  	}
   181  }