github.com/arieschain/arieschain@v0.0.0-20191023063405-37c074544356/core/bloombits/scheduler.go (about)

     1  package bloombits
     2  
     3  import (
     4  	"sync"
     5  )
     6  
     7  // request represents a bloom retrieval task to prioritize and pull from the local
     8  // database or remotely from the network.
     9  type request struct {
    10  	section uint64 // Section index to retrieve the a bit-vector from
    11  	bit     uint   // Bit index within the section to retrieve the vector of
    12  }
    13  
    14  // response represents the state of a requested bit-vector through a scheduler.
    15  type response struct {
    16  	cached []byte        // Cached bits to dedup multiple requests
    17  	done   chan struct{} // Channel to allow waiting for completion
    18  }
    19  
    20  // scheduler handles the scheduling of bloom-filter retrieval operations for
    21  // entire section-batches belonging to a single bloom bit. Beside scheduling the
    22  // retrieval operations, this struct also deduplicates the requests and caches
    23  // the results to minimize network/database overhead even in complex filtering
    24  // scenarios.
    25  type scheduler struct {
    26  	bit       uint                 // Index of the bit in the bloom filter this scheduler is responsible for
    27  	responses map[uint64]*response // Currently pending retrieval requests or already cached responses
    28  	lock      sync.Mutex           // Lock protecting the responses from concurrent access
    29  }
    30  
    31  // newScheduler creates a new bloom-filter retrieval scheduler for a specific
    32  // bit index.
    33  func newScheduler(idx uint) *scheduler {
    34  	return &scheduler{
    35  		bit:       idx,
    36  		responses: make(map[uint64]*response),
    37  	}
    38  }
    39  
    40  // run creates a retrieval pipeline, receiving section indexes from sections and
    41  // returning the results in the same order through the done channel. Concurrent
    42  // runs of the same scheduler are allowed, leading to retrieval task deduplication.
    43  func (s *scheduler) run(sections chan uint64, dist chan *request, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
    44  	// Create a forwarder channel between requests and responses of the same size as
    45  	// the distribution channel (since that will block the pipeline anyway).
    46  	pend := make(chan uint64, cap(dist))
    47  
    48  	// Start the pipeline schedulers to forward between user -> distributor -> user
    49  	wg.Add(2)
    50  	go s.scheduleRequests(sections, dist, pend, quit, wg)
    51  	go s.scheduleDeliveries(pend, done, quit, wg)
    52  }
    53  
    54  // reset cleans up any leftovers from previous runs. This is required before a
    55  // restart to ensure the no previously requested but never delivered state will
    56  // cause a lockup.
    57  func (s *scheduler) reset() {
    58  	s.lock.Lock()
    59  	defer s.lock.Unlock()
    60  
    61  	for section, res := range s.responses {
    62  		if res.cached == nil {
    63  			delete(s.responses, section)
    64  		}
    65  	}
    66  }
    67  
    68  // scheduleRequests reads section retrieval requests from the input channel,
    69  // deduplicates the stream and pushes unique retrieval tasks into the distribution
    70  // channel for a database or network layer to honour.
    71  func (s *scheduler) scheduleRequests(reqs chan uint64, dist chan *request, pend chan uint64, quit chan struct{}, wg *sync.WaitGroup) {
    72  	// Clean up the goroutine and pipeline when done
    73  	defer wg.Done()
    74  	defer close(pend)
    75  
    76  	// Keep reading and scheduling section requests
    77  	for {
    78  		select {
    79  		case <-quit:
    80  			return
    81  
    82  		case section, ok := <-reqs:
    83  			// New section retrieval requested
    84  			if !ok {
    85  				return
    86  			}
    87  			// Deduplicate retrieval requests
    88  			unique := false
    89  
    90  			s.lock.Lock()
    91  			if s.responses[section] == nil {
    92  				s.responses[section] = &response{
    93  					done: make(chan struct{}),
    94  				}
    95  				unique = true
    96  			}
    97  			s.lock.Unlock()
    98  
    99  			// Schedule the section for retrieval and notify the deliverer to expect this section
   100  			if unique {
   101  				select {
   102  				case <-quit:
   103  					return
   104  				case dist <- &request{bit: s.bit, section: section}:
   105  				}
   106  			}
   107  			select {
   108  			case <-quit:
   109  				return
   110  			case pend <- section:
   111  			}
   112  		}
   113  	}
   114  }
   115  
   116  // scheduleDeliveries reads section acceptance notifications and waits for them
   117  // to be delivered, pushing them into the output data buffer.
   118  func (s *scheduler) scheduleDeliveries(pend chan uint64, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
   119  	// Clean up the goroutine and pipeline when done
   120  	defer wg.Done()
   121  	defer close(done)
   122  
   123  	// Keep reading notifications and scheduling deliveries
   124  	for {
   125  		select {
   126  		case <-quit:
   127  			return
   128  
   129  		case idx, ok := <-pend:
   130  			// New section retrieval pending
   131  			if !ok {
   132  				return
   133  			}
   134  			// Wait until the request is honoured
   135  			s.lock.Lock()
   136  			res := s.responses[idx]
   137  			s.lock.Unlock()
   138  
   139  			select {
   140  			case <-quit:
   141  				return
   142  			case <-res.done:
   143  			}
   144  			// Deliver the result
   145  			select {
   146  			case <-quit:
   147  				return
   148  			case done <- res.cached:
   149  			}
   150  		}
   151  	}
   152  }
   153  
   154  // deliver is called by the request distributor when a reply to a request arrives.
   155  func (s *scheduler) deliver(sections []uint64, data [][]byte) {
   156  	s.lock.Lock()
   157  	defer s.lock.Unlock()
   158  
   159  	for i, section := range sections {
   160  		if res := s.responses[section]; res != nil && res.cached == nil { // Avoid non-requests and double deliveries
   161  			res.cached = data[i]
   162  			close(res.done)
   163  		}
   164  	}
   165  }