github.com/keltia/go-ipfs@v0.3.8-0.20150909044612-210793031c63/exchange/bitswap/decision/engine.go (about)

     1  // package decision implements the decision engine for the bitswap service.
     2  package decision
     3  
     4  import (
     5  	"sync"
     6  
     7  	context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
     8  	blocks "github.com/ipfs/go-ipfs/blocks"
     9  	bstore "github.com/ipfs/go-ipfs/blocks/blockstore"
    10  	bsmsg "github.com/ipfs/go-ipfs/exchange/bitswap/message"
    11  	wl "github.com/ipfs/go-ipfs/exchange/bitswap/wantlist"
    12  	peer "github.com/ipfs/go-ipfs/p2p/peer"
    13  	eventlog "github.com/ipfs/go-ipfs/thirdparty/eventlog"
    14  )
    15  
    16  // TODO consider taking responsibility for other types of requests. For
    17  // example, there could be a |cancelQueue| for all of the cancellation
    18  // messages that need to go out. There could also be a |wantlistQueue| for
    19  // the local peer's wantlists. Alternatively, these could all be bundled
    20  // into a single, intelligent global queue that efficiently
    21  // batches/combines and takes all of these into consideration.
    22  //
    23  // Right now, messages go onto the network for four reasons:
    24  // 1. an initial `sendwantlist` message to a provider of the first key in a request
    25  // 2. a periodic full sweep of `sendwantlist` messages to all providers
    26  // 3. upon receipt of blocks, a `cancel` message to all peers
    27  // 4. draining the priority queue of `blockrequests` from peers
    28  //
    29  // Presently, only `blockrequests` are handled by the decision engine.
    30  // However, there is an opportunity to give it more responsibility! If the
    31  // decision engine is given responsibility for all of the others, it can
    32  // intelligently decide how to combine requests efficiently.
    33  //
    34  // Some examples of what would be possible:
    35  //
    36  // * when sending out the wantlists, include `cancel` requests
    37  // * when handling `blockrequests`, include `sendwantlist` and `cancel` as appropriate
    38  // * when handling `cancel`, if we recently received a wanted block from a
    39  // 	 peer, include a partial wantlist that contains a few other high priority
    40  //   blocks
    41  //
    42  // In a sense, if we treat the decision engine as a black box, it could do
    43  // whatever it sees fit to produce desired outcomes (get wanted keys
    44  // quickly, maintain good relationships with peers, etc).
    45  
    46  var log = eventlog.Logger("engine")
    47  
    48  const (
    49  	// outboxChanBuffer must be 0 to prevent stale messages from being sent
    50  	outboxChanBuffer = 0
    51  )
    52  
    53  // Envelope contains a message for a Peer
    54  type Envelope struct {
    55  	// Peer is the intended recipient
    56  	Peer peer.ID
    57  
    58  	// Block is the payload
    59  	Block *blocks.Block
    60  
    61  	// A callback to notify the decision queue that the task is complete
    62  	Sent func()
    63  }
    64  
    65  type Engine struct {
    66  	// peerRequestQueue is a priority queue of requests received from peers.
    67  	// Requests are popped from the queue, packaged up, and placed in the
    68  	// outbox.
    69  	peerRequestQueue peerRequestQueue
    70  
    71  	// FIXME it's a bit odd for the client and the worker to both share memory
    72  	// (both modify the peerRequestQueue) and also to communicate over the
    73  	// workSignal channel. consider sending requests over the channel and
    74  	// allowing the worker to have exclusive access to the peerRequestQueue. In
    75  	// that case, no lock would be required.
    76  	workSignal chan struct{}
    77  
    78  	// outbox contains outgoing messages to peers. This is owned by the
    79  	// taskWorker goroutine
    80  	outbox chan (<-chan *Envelope)
    81  
    82  	bs bstore.Blockstore
    83  
    84  	lock sync.RWMutex // protects the fields immediatly below
    85  	// ledgerMap lists Ledgers by their Partner key.
    86  	ledgerMap map[peer.ID]*ledger
    87  }
    88  
    89  func NewEngine(ctx context.Context, bs bstore.Blockstore) *Engine {
    90  	e := &Engine{
    91  		ledgerMap:        make(map[peer.ID]*ledger),
    92  		bs:               bs,
    93  		peerRequestQueue: newPRQ(),
    94  		outbox:           make(chan (<-chan *Envelope), outboxChanBuffer),
    95  		workSignal:       make(chan struct{}, 1),
    96  	}
    97  	go e.taskWorker(ctx)
    98  	return e
    99  }
   100  
   101  func (e *Engine) WantlistForPeer(p peer.ID) (out []wl.Entry) {
   102  	e.lock.Lock()
   103  	partner, ok := e.ledgerMap[p]
   104  	if ok {
   105  		out = partner.wantList.SortedEntries()
   106  	}
   107  	e.lock.Unlock()
   108  	return out
   109  }
   110  
   111  func (e *Engine) taskWorker(ctx context.Context) {
   112  	defer close(e.outbox) // because taskWorker uses the channel exclusively
   113  	for {
   114  		oneTimeUse := make(chan *Envelope, 1) // buffer to prevent blocking
   115  		select {
   116  		case <-ctx.Done():
   117  			return
   118  		case e.outbox <- oneTimeUse:
   119  		}
   120  		// receiver is ready for an outoing envelope. let's prepare one. first,
   121  		// we must acquire a task from the PQ...
   122  		envelope, err := e.nextEnvelope(ctx)
   123  		if err != nil {
   124  			close(oneTimeUse)
   125  			return // ctx cancelled
   126  		}
   127  		oneTimeUse <- envelope // buffered. won't block
   128  		close(oneTimeUse)
   129  	}
   130  }
   131  
   132  // nextEnvelope runs in the taskWorker goroutine. Returns an error if the
   133  // context is cancelled before the next Envelope can be created.
   134  func (e *Engine) nextEnvelope(ctx context.Context) (*Envelope, error) {
   135  	for {
   136  		nextTask := e.peerRequestQueue.Pop()
   137  		for nextTask == nil {
   138  			select {
   139  			case <-ctx.Done():
   140  				return nil, ctx.Err()
   141  			case <-e.workSignal:
   142  				nextTask = e.peerRequestQueue.Pop()
   143  			}
   144  		}
   145  
   146  		// with a task in hand, we're ready to prepare the envelope...
   147  
   148  		block, err := e.bs.Get(nextTask.Entry.Key)
   149  		if err != nil {
   150  			// If we don't have the block, don't hold that against the peer
   151  			// make sure to update that the task has been 'completed'
   152  			nextTask.Done()
   153  			continue
   154  		}
   155  
   156  		return &Envelope{
   157  			Peer:  nextTask.Target,
   158  			Block: block,
   159  			Sent: func() {
   160  				nextTask.Done()
   161  				select {
   162  				case e.workSignal <- struct{}{}:
   163  					// work completing may mean that our queue will provide new
   164  					// work to be done.
   165  				default:
   166  				}
   167  			},
   168  		}, nil
   169  	}
   170  }
   171  
   172  // Outbox returns a channel of one-time use Envelope channels.
   173  func (e *Engine) Outbox() <-chan (<-chan *Envelope) {
   174  	return e.outbox
   175  }
   176  
   177  // Returns a slice of Peers with whom the local node has active sessions
   178  func (e *Engine) Peers() []peer.ID {
   179  	e.lock.RLock()
   180  	defer e.lock.RUnlock()
   181  
   182  	response := make([]peer.ID, 0)
   183  	for _, ledger := range e.ledgerMap {
   184  		response = append(response, ledger.Partner)
   185  	}
   186  	return response
   187  }
   188  
   189  // MessageReceived performs book-keeping. Returns error if passed invalid
   190  // arguments.
   191  func (e *Engine) MessageReceived(p peer.ID, m bsmsg.BitSwapMessage) error {
   192  	e.lock.Lock()
   193  	defer e.lock.Unlock()
   194  
   195  	if len(m.Wantlist()) == 0 && len(m.Blocks()) == 0 {
   196  		log.Debugf("received empty message from %s", p)
   197  	}
   198  
   199  	newWorkExists := false
   200  	defer func() {
   201  		if newWorkExists {
   202  			e.signalNewWork()
   203  		}
   204  	}()
   205  
   206  	l := e.findOrCreate(p)
   207  	if m.Full() {
   208  		l.wantList = wl.New()
   209  	}
   210  
   211  	for _, entry := range m.Wantlist() {
   212  		if entry.Cancel {
   213  			log.Debugf("cancel %s", entry.Key)
   214  			l.CancelWant(entry.Key)
   215  			e.peerRequestQueue.Remove(entry.Key, p)
   216  		} else {
   217  			log.Debugf("wants %s - %d", entry.Key, entry.Priority)
   218  			l.Wants(entry.Key, entry.Priority)
   219  			if exists, err := e.bs.Has(entry.Key); err == nil && exists {
   220  				e.peerRequestQueue.Push(entry.Entry, p)
   221  				newWorkExists = true
   222  			}
   223  		}
   224  	}
   225  
   226  	for _, block := range m.Blocks() {
   227  		log.Debugf("got block %s %d bytes", block.Key(), len(block.Data))
   228  		l.ReceivedBytes(len(block.Data))
   229  		for _, l := range e.ledgerMap {
   230  			if entry, ok := l.WantListContains(block.Key()); ok {
   231  				e.peerRequestQueue.Push(entry, l.Partner)
   232  				newWorkExists = true
   233  			}
   234  		}
   235  	}
   236  	return nil
   237  }
   238  
   239  // TODO add contents of m.WantList() to my local wantlist? NB: could introduce
   240  // race conditions where I send a message, but MessageSent gets handled after
   241  // MessageReceived. The information in the local wantlist could become
   242  // inconsistent. Would need to ensure that Sends and acknowledgement of the
   243  // send happen atomically
   244  
   245  func (e *Engine) MessageSent(p peer.ID, m bsmsg.BitSwapMessage) error {
   246  	e.lock.Lock()
   247  	defer e.lock.Unlock()
   248  
   249  	l := e.findOrCreate(p)
   250  	for _, block := range m.Blocks() {
   251  		l.SentBytes(len(block.Data))
   252  		l.wantList.Remove(block.Key())
   253  		e.peerRequestQueue.Remove(block.Key(), p)
   254  	}
   255  
   256  	return nil
   257  }
   258  
   259  func (e *Engine) PeerDisconnected(p peer.ID) {
   260  	// TODO: release ledger
   261  }
   262  
   263  func (e *Engine) numBytesSentTo(p peer.ID) uint64 {
   264  	// NB not threadsafe
   265  	return e.findOrCreate(p).Accounting.BytesSent
   266  }
   267  
   268  func (e *Engine) numBytesReceivedFrom(p peer.ID) uint64 {
   269  	// NB not threadsafe
   270  	return e.findOrCreate(p).Accounting.BytesRecv
   271  }
   272  
   273  // ledger lazily instantiates a ledger
   274  func (e *Engine) findOrCreate(p peer.ID) *ledger {
   275  	l, ok := e.ledgerMap[p]
   276  	if !ok {
   277  		l = newLedger(p)
   278  		e.ledgerMap[p] = l
   279  	}
   280  	return l
   281  }
   282  
   283  func (e *Engine) signalNewWork() {
   284  	// Signal task generation to restart (if stopped!)
   285  	select {
   286  	case e.workSignal <- struct{}{}:
   287  	default:
   288  	}
   289  }