github.com/uber/kraken@v0.1.4/lib/torrent/scheduler/dispatch/piecerequest/manager.go (about)

     1  // Copyright (c) 2016-2019 Uber Technologies, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  package piecerequest
    15  
    16  import (
    17  	"fmt"
    18  	"sort"
    19  	"sync"
    20  	"time"
    21  
    22  	"github.com/uber/kraken/core"
    23  	"github.com/uber/kraken/utils/syncutil"
    24  
    25  	"github.com/andres-erbsen/clock"
    26  	"github.com/willf/bitset"
    27  )
    28  
    29  // Status enumerates possible statuses of a Request.
    30  type Status int
    31  
    32  const (
    33  	// StatusPending denotes a valid request which is still in-flight.
    34  	StatusPending Status = iota
    35  
    36  	// StatusExpired denotes an in-flight request which has timed out on our end.
    37  	StatusExpired
    38  
    39  	// StatusUnsent denotes an unsent request that is safe to retry to the same peer.
    40  	StatusUnsent
    41  
    42  	// StatusInvalid denotes a completed request that resulted in an invalid payload.
    43  	StatusInvalid
    44  )
    45  
    46  // Request represents a piece request to peer.
    47  type Request struct {
    48  	Piece  int
    49  	PeerID core.PeerID
    50  	Status Status
    51  
    52  	sentAt time.Time
    53  }
    54  
    55  // Manager encapsulates thread-safe piece request bookkeeping. It is not responsible
    56  // for sending nor receiving pieces in any way.
    57  type Manager struct {
    58  	sync.RWMutex
    59  
    60  	// requests and requestsByPeer holds the same data, just indexed differently.
    61  	requests       map[int][]*Request
    62  	requestsByPeer map[core.PeerID]map[int]*Request
    63  
    64  	clock   clock.Clock
    65  	timeout time.Duration
    66  
    67  	policy        pieceSelectionPolicy
    68  	pipelineLimit int
    69  }
    70  
    71  // NewManager creates a new Manager.
    72  func NewManager(
    73  	clk clock.Clock,
    74  	timeout time.Duration,
    75  	policy string,
    76  	pipelineLimit int) (*Manager, error) {
    77  
    78  	m := &Manager{
    79  		requests:       make(map[int][]*Request),
    80  		requestsByPeer: make(map[core.PeerID]map[int]*Request),
    81  		clock:          clk,
    82  		timeout:        timeout,
    83  		pipelineLimit:  pipelineLimit,
    84  	}
    85  
    86  	switch policy {
    87  	case DefaultPolicy:
    88  		m.policy = newDefaultPolicy()
    89  	case RarestFirstPolicy:
    90  		m.policy = newRarestFirstPolicy()
    91  	default:
    92  		return nil, fmt.Errorf("invalid piece selection policy: %s", policy)
    93  	}
    94  	return m, nil
    95  }
    96  
    97  // ReservePieces selects the next piece(s) to be requested from given peer.
    98  // It selects peers on a rarity-first basis using numPeersByPiece.
    99  // If allowDuplicates is set, may return pieces which have already been
   100  // reserved under other peers.
   101  func (m *Manager) ReservePieces(
   102  	peerID core.PeerID,
   103  	candidates *bitset.BitSet,
   104  	numPeersByPiece syncutil.Counters,
   105  	allowDuplicates bool) ([]int, error) {
   106  
   107  	m.Lock()
   108  	defer m.Unlock()
   109  
   110  	quota := m.requestQuota(peerID)
   111  	if quota <= 0 {
   112  		return nil, nil
   113  	}
   114  
   115  	valid := func(i int) bool { return m.validRequest(peerID, i, allowDuplicates) }
   116  	pieces, err := m.policy.selectPieces(quota, valid, candidates, numPeersByPiece)
   117  	if err != nil {
   118  		return nil, err
   119  	}
   120  
   121  	// Set as pending in requests map.
   122  	for _, i := range pieces {
   123  		r := &Request{
   124  			Piece:  i,
   125  			PeerID: peerID,
   126  			Status: StatusPending,
   127  			sentAt: m.clock.Now(),
   128  		}
   129  		m.requests[i] = append(m.requests[i], r)
   130  		if _, ok := m.requestsByPeer[peerID]; !ok {
   131  			m.requestsByPeer[peerID] = make(map[int]*Request)
   132  		}
   133  		m.requestsByPeer[peerID][i] = r
   134  	}
   135  
   136  	return pieces, nil
   137  }
   138  
   139  // MarkUnsent marks the piece request for piece i as unsent.
   140  func (m *Manager) MarkUnsent(peerID core.PeerID, i int) {
   141  	m.markStatus(peerID, i, StatusUnsent)
   142  }
   143  
   144  // MarkInvalid marks the piece request for piece i as invalid.
   145  func (m *Manager) MarkInvalid(peerID core.PeerID, i int) {
   146  	m.markStatus(peerID, i, StatusInvalid)
   147  }
   148  
   149  // Clear deletes the piece request for piece i. Should be used for freeing up
   150  // unneeded request bookkeeping.
   151  func (m *Manager) Clear(i int) {
   152  	m.Lock()
   153  	defer m.Unlock()
   154  
   155  	delete(m.requests, i)
   156  
   157  	for peerID, pm := range m.requestsByPeer {
   158  		delete(pm, i)
   159  		if len(pm) == 0 {
   160  			delete(m.requestsByPeer, peerID)
   161  		}
   162  	}
   163  }
   164  
   165  // PendingPieces returns the pieces for all pending requests to peerID in sorted
   166  // order. Intended primarily for testing purposes.
   167  func (m *Manager) PendingPieces(peerID core.PeerID) []int {
   168  	m.RLock()
   169  	defer m.RUnlock()
   170  
   171  	var pieces []int
   172  	for i, r := range m.requestsByPeer[peerID] {
   173  		if r.Status == StatusPending {
   174  			pieces = append(pieces, i)
   175  		}
   176  	}
   177  	sort.Ints(pieces)
   178  	return pieces
   179  }
   180  
   181  // ClearPeer deletes all piece requests for peerID.
   182  func (m *Manager) ClearPeer(peerID core.PeerID) {
   183  	m.Lock()
   184  	defer m.Unlock()
   185  
   186  	delete(m.requestsByPeer, peerID)
   187  
   188  	for i, rs := range m.requests {
   189  		for j, r := range rs {
   190  			if r.PeerID == peerID {
   191  				// Eject request.
   192  				rs[j] = rs[len(rs)-1]
   193  				m.requests[i] = rs[:len(rs)-1]
   194  				break
   195  			}
   196  		}
   197  	}
   198  }
   199  
   200  // GetFailedRequests returns a copy of all failed piece requests.
   201  func (m *Manager) GetFailedRequests() []Request {
   202  	m.RLock()
   203  	defer m.RUnlock()
   204  
   205  	var failed []Request
   206  	for _, rs := range m.requests {
   207  		for _, r := range rs {
   208  			status := r.Status
   209  			if status == StatusPending && m.expired(r) {
   210  				status = StatusExpired
   211  			}
   212  			if status != StatusPending {
   213  				failed = append(failed, Request{
   214  					Piece:  r.Piece,
   215  					PeerID: r.PeerID,
   216  					Status: status,
   217  				})
   218  			}
   219  		}
   220  	}
   221  	return failed
   222  }
   223  
   224  func (m *Manager) validRequest(peerID core.PeerID, i int, allowDuplicates bool) bool {
   225  	for _, r := range m.requests[i] {
   226  		if r.Status == StatusPending && !m.expired(r) {
   227  			if r.PeerID == peerID {
   228  				return false
   229  			}
   230  			if !allowDuplicates {
   231  				return false
   232  			}
   233  		}
   234  	}
   235  	return true
   236  }
   237  
   238  func (m *Manager) requestQuota(peerID core.PeerID) int {
   239  	quota := m.pipelineLimit
   240  	pm, ok := m.requestsByPeer[peerID]
   241  	if !ok {
   242  		return quota
   243  	}
   244  
   245  	for _, r := range pm {
   246  		if r.Status == StatusPending && !m.expired(r) {
   247  			quota--
   248  			if quota == 0 {
   249  				break
   250  			}
   251  		}
   252  	}
   253  
   254  	return quota
   255  }
   256  
   257  func (m *Manager) expired(r *Request) bool {
   258  	expiresAt := r.sentAt.Add(m.timeout)
   259  	return m.clock.Now().After(expiresAt)
   260  }
   261  
   262  func (m *Manager) markStatus(peerID core.PeerID, i int, s Status) {
   263  	m.Lock()
   264  	defer m.Unlock()
   265  
   266  	for _, r := range m.requests[i] {
   267  		if r.PeerID == peerID {
   268  			r.Status = s
   269  		}
   270  	}
   271  }