github.com/jeffallen/go-ethereum@v1.1.4-0.20150910155051-571d3236c49c/eth/downloader/queue.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Contains the block download scheduler to collect download tasks and schedule
    18  // them in an ordered, and throttled way.
    19  
    20  package downloader
    21  
    22  import (
    23  	"errors"
    24  	"fmt"
    25  	"sync"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/common"
    29  	"github.com/ethereum/go-ethereum/core/types"
    30  	"github.com/ethereum/go-ethereum/logger"
    31  	"github.com/ethereum/go-ethereum/logger/glog"
    32  	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
    33  )
    34  
    35  var (
    36  	blockCacheLimit = 8 * MaxBlockFetch // Maximum number of blocks to cache before throttling the download
    37  )
    38  
    39  var (
    40  	errNoFetchesPending = errors.New("no fetches pending")
    41  	errStaleDelivery    = errors.New("stale delivery")
    42  )
    43  
    44  // fetchRequest is a currently running block retrieval operation.
    45  type fetchRequest struct {
    46  	Peer   *peer               // Peer to which the request was sent
    47  	Hashes map[common.Hash]int // Requested hashes with their insertion index (priority)
    48  	Time   time.Time           // Time when the request was made
    49  }
    50  
    51  // queue represents hashes that are either need fetching or are being fetched
    52  type queue struct {
    53  	hashPool    map[common.Hash]int // Pending hashes, mapping to their insertion index (priority)
    54  	hashQueue   *prque.Prque        // Priority queue of the block hashes to fetch
    55  	hashCounter int                 // Counter indexing the added hashes to ensure retrieval order
    56  
    57  	pendPool map[string]*fetchRequest // Currently pending block retrieval operations
    58  
    59  	blockPool   map[common.Hash]uint64 // Hash-set of the downloaded data blocks, mapping to cache indexes
    60  	blockCache  []*Block               // Downloaded but not yet delivered blocks
    61  	blockOffset uint64                 // Offset of the first cached block in the block-chain
    62  
    63  	lock sync.RWMutex
    64  }
    65  
    66  // newQueue creates a new download queue for scheduling block retrieval.
    67  func newQueue() *queue {
    68  	return &queue{
    69  		hashPool:   make(map[common.Hash]int),
    70  		hashQueue:  prque.New(),
    71  		pendPool:   make(map[string]*fetchRequest),
    72  		blockPool:  make(map[common.Hash]uint64),
    73  		blockCache: make([]*Block, blockCacheLimit),
    74  	}
    75  }
    76  
    77  // Reset clears out the queue contents.
    78  func (q *queue) Reset() {
    79  	q.lock.Lock()
    80  	defer q.lock.Unlock()
    81  
    82  	q.hashPool = make(map[common.Hash]int)
    83  	q.hashQueue.Reset()
    84  	q.hashCounter = 0
    85  
    86  	q.pendPool = make(map[string]*fetchRequest)
    87  
    88  	q.blockPool = make(map[common.Hash]uint64)
    89  	q.blockOffset = 0
    90  	q.blockCache = make([]*Block, blockCacheLimit)
    91  }
    92  
    93  // Size retrieves the number of hashes in the queue, returning separately for
    94  // pending and already downloaded.
    95  func (q *queue) Size() (int, int) {
    96  	q.lock.RLock()
    97  	defer q.lock.RUnlock()
    98  
    99  	return len(q.hashPool), len(q.blockPool)
   100  }
   101  
   102  // Pending retrieves the number of hashes pending for retrieval.
   103  func (q *queue) Pending() int {
   104  	q.lock.RLock()
   105  	defer q.lock.RUnlock()
   106  
   107  	return q.hashQueue.Size()
   108  }
   109  
   110  // InFlight retrieves the number of fetch requests currently in flight.
   111  func (q *queue) InFlight() int {
   112  	q.lock.RLock()
   113  	defer q.lock.RUnlock()
   114  
   115  	return len(q.pendPool)
   116  }
   117  
   118  // Throttle checks if the download should be throttled (active block fetches
   119  // exceed block cache).
   120  func (q *queue) Throttle() bool {
   121  	q.lock.RLock()
   122  	defer q.lock.RUnlock()
   123  
   124  	// Calculate the currently in-flight block requests
   125  	pending := 0
   126  	for _, request := range q.pendPool {
   127  		pending += len(request.Hashes)
   128  	}
   129  	// Throttle if more blocks are in-flight than free space in the cache
   130  	return pending >= len(q.blockCache)-len(q.blockPool)
   131  }
   132  
   133  // Has checks if a hash is within the download queue or not.
   134  func (q *queue) Has(hash common.Hash) bool {
   135  	q.lock.RLock()
   136  	defer q.lock.RUnlock()
   137  
   138  	if _, ok := q.hashPool[hash]; ok {
   139  		return true
   140  	}
   141  	if _, ok := q.blockPool[hash]; ok {
   142  		return true
   143  	}
   144  	return false
   145  }
   146  
   147  // Insert adds a set of hashes for the download queue for scheduling, returning
   148  // the new hashes encountered.
   149  func (q *queue) Insert(hashes []common.Hash, fifo bool) []common.Hash {
   150  	q.lock.Lock()
   151  	defer q.lock.Unlock()
   152  
   153  	// Insert all the hashes prioritized in the arrival order
   154  	inserts := make([]common.Hash, 0, len(hashes))
   155  	for _, hash := range hashes {
   156  		// Skip anything we already have
   157  		if old, ok := q.hashPool[hash]; ok {
   158  			glog.V(logger.Warn).Infof("Hash %x already scheduled at index %v", hash, old)
   159  			continue
   160  		}
   161  		// Update the counters and insert the hash
   162  		q.hashCounter = q.hashCounter + 1
   163  		inserts = append(inserts, hash)
   164  
   165  		q.hashPool[hash] = q.hashCounter
   166  		if fifo {
   167  			q.hashQueue.Push(hash, -float32(q.hashCounter)) // Lowest gets schedules first
   168  		} else {
   169  			q.hashQueue.Push(hash, float32(q.hashCounter)) // Highest gets schedules first
   170  		}
   171  	}
   172  	return inserts
   173  }
   174  
   175  // GetHeadBlock retrieves the first block from the cache, or nil if it hasn't
   176  // been downloaded yet (or simply non existent).
   177  func (q *queue) GetHeadBlock() *Block {
   178  	q.lock.RLock()
   179  	defer q.lock.RUnlock()
   180  
   181  	if len(q.blockCache) == 0 {
   182  		return nil
   183  	}
   184  	return q.blockCache[0]
   185  }
   186  
   187  // GetBlock retrieves a downloaded block, or nil if non-existent.
   188  func (q *queue) GetBlock(hash common.Hash) *Block {
   189  	q.lock.RLock()
   190  	defer q.lock.RUnlock()
   191  
   192  	// Short circuit if the block hasn't been downloaded yet
   193  	index, ok := q.blockPool[hash]
   194  	if !ok {
   195  		return nil
   196  	}
   197  	// Return the block if it's still available in the cache
   198  	if q.blockOffset <= index && index < q.blockOffset+uint64(len(q.blockCache)) {
   199  		return q.blockCache[index-q.blockOffset]
   200  	}
   201  	return nil
   202  }
   203  
   204  // TakeBlocks retrieves and permanently removes a batch of blocks from the cache.
   205  func (q *queue) TakeBlocks() []*Block {
   206  	q.lock.Lock()
   207  	defer q.lock.Unlock()
   208  
   209  	// Accumulate all available blocks
   210  	blocks := []*Block{}
   211  	for _, block := range q.blockCache {
   212  		if block == nil {
   213  			break
   214  		}
   215  		blocks = append(blocks, block)
   216  		delete(q.blockPool, block.RawBlock.Hash())
   217  	}
   218  	// Delete the blocks from the slice and let them be garbage collected
   219  	// without this slice trick the blocks would stay in memory until nil
   220  	// would be assigned to q.blocks
   221  	copy(q.blockCache, q.blockCache[len(blocks):])
   222  	for k, n := len(q.blockCache)-len(blocks), len(q.blockCache); k < n; k++ {
   223  		q.blockCache[k] = nil
   224  	}
   225  	q.blockOffset += uint64(len(blocks))
   226  
   227  	return blocks
   228  }
   229  
   230  // Reserve reserves a set of hashes for the given peer, skipping any previously
   231  // failed download.
   232  func (q *queue) Reserve(p *peer, count int) *fetchRequest {
   233  	q.lock.Lock()
   234  	defer q.lock.Unlock()
   235  
   236  	// Short circuit if the pool has been depleted, or if the peer's already
   237  	// downloading something (sanity check not to corrupt state)
   238  	if q.hashQueue.Empty() {
   239  		return nil
   240  	}
   241  	if _, ok := q.pendPool[p.id]; ok {
   242  		return nil
   243  	}
   244  	// Calculate an upper limit on the hashes we might fetch (i.e. throttling)
   245  	space := len(q.blockCache) - len(q.blockPool)
   246  	for _, request := range q.pendPool {
   247  		space -= len(request.Hashes)
   248  	}
   249  	// Retrieve a batch of hashes, skipping previously failed ones
   250  	send := make(map[common.Hash]int)
   251  	skip := make(map[common.Hash]int)
   252  
   253  	for proc := 0; proc < space && len(send) < count && !q.hashQueue.Empty(); proc++ {
   254  		hash, priority := q.hashQueue.Pop()
   255  		if p.ignored.Has(hash) {
   256  			skip[hash.(common.Hash)] = int(priority)
   257  		} else {
   258  			send[hash.(common.Hash)] = int(priority)
   259  		}
   260  	}
   261  	// Merge all the skipped hashes back
   262  	for hash, index := range skip {
   263  		q.hashQueue.Push(hash, float32(index))
   264  	}
   265  	// Assemble and return the block download request
   266  	if len(send) == 0 {
   267  		return nil
   268  	}
   269  	request := &fetchRequest{
   270  		Peer:   p,
   271  		Hashes: send,
   272  		Time:   time.Now(),
   273  	}
   274  	q.pendPool[p.id] = request
   275  
   276  	return request
   277  }
   278  
   279  // Cancel aborts a fetch request, returning all pending hashes to the queue.
   280  func (q *queue) Cancel(request *fetchRequest) {
   281  	q.lock.Lock()
   282  	defer q.lock.Unlock()
   283  
   284  	for hash, index := range request.Hashes {
   285  		q.hashQueue.Push(hash, float32(index))
   286  	}
   287  	delete(q.pendPool, request.Peer.id)
   288  }
   289  
   290  // Expire checks for in flight requests that exceeded a timeout allowance,
   291  // canceling them and returning the responsible peers for penalization.
   292  func (q *queue) Expire(timeout time.Duration) []string {
   293  	q.lock.Lock()
   294  	defer q.lock.Unlock()
   295  
   296  	// Iterate over the expired requests and return each to the queue
   297  	peers := []string{}
   298  	for id, request := range q.pendPool {
   299  		if time.Since(request.Time) > timeout {
   300  			for hash, index := range request.Hashes {
   301  				q.hashQueue.Push(hash, float32(index))
   302  			}
   303  			peers = append(peers, id)
   304  		}
   305  	}
   306  	// Remove the expired requests from the pending pool
   307  	for _, id := range peers {
   308  		delete(q.pendPool, id)
   309  	}
   310  	return peers
   311  }
   312  
   313  // Deliver injects a block retrieval response into the download queue.
   314  func (q *queue) Deliver(id string, blocks []*types.Block) (err error) {
   315  	q.lock.Lock()
   316  	defer q.lock.Unlock()
   317  
   318  	// Short circuit if the blocks were never requested
   319  	request := q.pendPool[id]
   320  	if request == nil {
   321  		return errNoFetchesPending
   322  	}
   323  	delete(q.pendPool, id)
   324  
   325  	// If no blocks were retrieved, mark them as unavailable for the origin peer
   326  	if len(blocks) == 0 {
   327  		for hash, _ := range request.Hashes {
   328  			request.Peer.ignored.Add(hash)
   329  		}
   330  	}
   331  	// Iterate over the downloaded blocks and add each of them
   332  	errs := make([]error, 0)
   333  	for _, block := range blocks {
   334  		// Skip any blocks that were not requested
   335  		hash := block.Hash()
   336  		if _, ok := request.Hashes[hash]; !ok {
   337  			errs = append(errs, fmt.Errorf("non-requested block %x", hash))
   338  			continue
   339  		}
   340  		// If a requested block falls out of the range, the hash chain is invalid
   341  		index := int(int64(block.NumberU64()) - int64(q.blockOffset))
   342  		if index >= len(q.blockCache) || index < 0 {
   343  			return errInvalidChain
   344  		}
   345  		// Otherwise merge the block and mark the hash block
   346  		q.blockCache[index] = &Block{
   347  			RawBlock:   block,
   348  			OriginPeer: id,
   349  		}
   350  		delete(request.Hashes, hash)
   351  		delete(q.hashPool, hash)
   352  		q.blockPool[hash] = block.NumberU64()
   353  	}
   354  	// Return all failed or missing fetches to the queue
   355  	for hash, index := range request.Hashes {
   356  		q.hashQueue.Push(hash, float32(index))
   357  	}
   358  	// If none of the blocks were good, it's a stale delivery
   359  	if len(errs) != 0 {
   360  		if len(errs) == len(blocks) {
   361  			return errStaleDelivery
   362  		}
   363  		return fmt.Errorf("multiple failures: %v", errs)
   364  	}
   365  	return nil
   366  }
   367  
   368  // Prepare configures the block cache offset to allow accepting inbound blocks.
   369  func (q *queue) Prepare(offset uint64) {
   370  	q.lock.Lock()
   371  	defer q.lock.Unlock()
   372  
   373  	if q.blockOffset < offset {
   374  		q.blockOffset = offset
   375  	}
   376  }