github.com/uber/kraken@v0.1.4/lib/torrent/scheduler/dispatch/dispatcher_test.go (about)

     1  // Copyright (c) 2016-2019 Uber Technologies, Inc.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  package dispatch
    15  
    16  import (
    17  	"errors"
    18  	"sync"
    19  	"testing"
    20  	"time"
    21  
    22  	"github.com/uber/kraken/core"
    23  	"github.com/uber/kraken/gen/go/proto/p2p"
    24  	"github.com/uber/kraken/lib/torrent/networkevent"
    25  	"github.com/uber/kraken/lib/torrent/scheduler/conn"
    26  	"github.com/uber/kraken/lib/torrent/scheduler/torrentlog"
    27  	"github.com/uber/kraken/lib/torrent/storage"
    28  	"github.com/uber/kraken/lib/torrent/storage/agentstorage"
    29  	"github.com/uber/kraken/lib/torrent/storage/piecereader"
    30  	"github.com/uber/kraken/utils/bitsetutil"
    31  	"github.com/uber/kraken/utils/memsize"
    32  	"go.uber.org/zap"
    33  
    34  	"github.com/andres-erbsen/clock"
    35  	"github.com/stretchr/testify/require"
    36  	"github.com/uber-go/tally"
    37  	"github.com/willf/bitset"
    38  )
    39  
    40  type mockMessages struct {
    41  	sent     []*conn.Message
    42  	receiver chan *conn.Message
    43  	closed   bool
    44  }
    45  
    46  func newMockMessages() *mockMessages {
    47  	return &mockMessages{receiver: make(chan *conn.Message)}
    48  }
    49  
    50  func (m *mockMessages) Send(msg *conn.Message) error {
    51  	if m.closed {
    52  		return errors.New("messages closed")
    53  	}
    54  	m.sent = append(m.sent, msg)
    55  	return nil
    56  }
    57  
    58  func (m *mockMessages) Receiver() <-chan *conn.Message { return m.receiver }
    59  
    60  func (m *mockMessages) Close() {
    61  	if m.closed {
    62  		return
    63  	}
    64  	close(m.receiver)
    65  	m.closed = true
    66  }
    67  
    68  func numRequestsPerPiece(messages Messages) map[int]int {
    69  	requests := make(map[int]int)
    70  	for _, msg := range messages.(*mockMessages).sent {
    71  		if msg.Message.Type == p2p.Message_PIECE_REQUEST {
    72  			requests[int(msg.Message.PieceRequest.Index)]++
    73  		}
    74  	}
    75  	return requests
    76  }
    77  
    78  func announcedPieces(messages Messages) []int {
    79  	var ps []int
    80  	for _, msg := range messages.(*mockMessages).sent {
    81  		if msg.Message.Type == p2p.Message_ANNOUCE_PIECE {
    82  			ps = append(ps, int(msg.Message.AnnouncePiece.Index))
    83  		}
    84  	}
    85  	return ps
    86  }
    87  
    88  func hasComplete(messages Messages) bool {
    89  	for _, m := range messages.(*mockMessages).sent {
    90  		if m.Message.Type == p2p.Message_COMPLETE {
    91  			return true
    92  		}
    93  	}
    94  	return false
    95  }
    96  
    97  func closed(messages Messages) bool {
    98  	return messages.(*mockMessages).closed
    99  }
   100  
   101  type noopEvents struct{}
   102  
   103  func (e noopEvents) DispatcherComplete(*Dispatcher) {}
   104  
   105  func (e noopEvents) PeerRemoved(core.PeerID, core.InfoHash) {}
   106  
   107  func testDispatcher(config Config, clk clock.Clock, t storage.Torrent) *Dispatcher {
   108  	d, err := newDispatcher(
   109  		config,
   110  		tally.NoopScope,
   111  		clk,
   112  		networkevent.NewTestProducer(),
   113  		noopEvents{},
   114  		core.PeerIDFixture(),
   115  		t,
   116  		zap.NewNop().Sugar(),
   117  		torrentlog.NewNopLogger())
   118  	if err != nil {
   119  		panic(err)
   120  	}
   121  	return d
   122  }
   123  
   124  func TestDispatcherSendUniquePieceRequestsWithinLimit(t *testing.T) {
   125  	require := require.New(t)
   126  
   127  	config := Config{
   128  		PipelineLimit: 3,
   129  	}
   130  	clk := clock.NewMock()
   131  
   132  	torrent, cleanup := agentstorage.TorrentFixture(core.SizedBlobFixture(100, 1).MetaInfo)
   133  	defer cleanup()
   134  
   135  	d := testDispatcher(config, clk, torrent)
   136  
   137  	var mu sync.Mutex
   138  	var requestCount int
   139  	totalRequestsPerPiece := make(map[int]int)
   140  	totalRequestPerPeer := make(map[core.PeerID]int)
   141  
   142  	// Add a bunch of peers concurrently which are saturated with pieces d needs.
   143  	// We should send exactly <pipelineLimit> piece requests per peer.
   144  	peerBitfield := bitset.New(uint(torrent.NumPieces())).Complement()
   145  	var wg sync.WaitGroup
   146  	for i := 0; i < 10; i++ {
   147  		wg.Add(1)
   148  		go func() {
   149  			defer wg.Done()
   150  			p, err := d.addPeer(core.PeerIDFixture(), peerBitfield, newMockMessages())
   151  			require.NoError(err)
   152  			d.maybeRequestMorePieces(p)
   153  			for i, n := range numRequestsPerPiece(p.messages) {
   154  				require.True(n <= 1)
   155  				mu.Lock()
   156  				requestCount += n
   157  				totalRequestsPerPiece[i] += n
   158  				require.True(totalRequestsPerPiece[i] <= 1)
   159  				totalRequestPerPeer[p.id] += n
   160  				require.True(totalRequestPerPeer[p.id] <= config.PipelineLimit)
   161  				mu.Unlock()
   162  			}
   163  		}()
   164  	}
   165  	wg.Wait()
   166  
   167  	require.Equal(config.PipelineLimit*10, requestCount)
   168  
   169  	buffer := make([]uint, peerBitfield.Len())
   170  	_, buffer = peerBitfield.NextSetMany(uint(0), buffer)
   171  	for _, i := range buffer {
   172  		count := d.numPeersByPiece.Get(int(i))
   173  		require.Equal(10, count)
   174  	}
   175  }
   176  
   177  func TestDispatcherResendFailedPieceRequests(t *testing.T) {
   178  	require := require.New(t)
   179  
   180  	config := Config{
   181  		DisableEndgame: true,
   182  	}
   183  	clk := clock.NewMock()
   184  
   185  	torrent, cleanup := agentstorage.TorrentFixture(core.SizedBlobFixture(2, 1).MetaInfo)
   186  	defer cleanup()
   187  
   188  	d := testDispatcher(config, clk, torrent)
   189  
   190  	// p1 has both pieces and sends requests for both.
   191  	p1, err := d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(true, true), newMockMessages())
   192  	require.NoError(err)
   193  	d.maybeRequestMorePieces(p1)
   194  	require.Equal(map[int]int{
   195  		0: 1,
   196  		1: 1,
   197  	}, numRequestsPerPiece(p1.messages))
   198  
   199  	// p2 has piece 0 and sends no piece requests.
   200  	p2, err := d.addPeer(
   201  		core.PeerIDFixture(), bitsetutil.FromBools(true, false), newMockMessages())
   202  	require.NoError(err)
   203  	d.maybeRequestMorePieces(p2)
   204  	require.Equal(map[int]int{}, numRequestsPerPiece(p2.messages))
   205  
   206  	// p3 has piece 1 and sends no piece requests.
   207  	p3, err := d.addPeer(
   208  		core.PeerIDFixture(), bitsetutil.FromBools(false, true), newMockMessages())
   209  	require.NoError(err)
   210  	d.maybeRequestMorePieces(p3)
   211  	require.Equal(map[int]int{}, numRequestsPerPiece(p3.messages))
   212  
   213  	clk.Add(d.pieceRequestTimeout + 1)
   214  
   215  	d.resendFailedPieceRequests()
   216  
   217  	// p1 was not sent any new piece requests.
   218  	require.Equal(map[int]int{
   219  		0: 1,
   220  		1: 1,
   221  	}, numRequestsPerPiece(p1.messages))
   222  
   223  	// p2 was sent a piece request for piece 0.
   224  	require.Equal(map[int]int{
   225  		0: 1,
   226  	}, numRequestsPerPiece(p2.messages))
   227  
   228  	// p3 was sent a piece request for piece 1.
   229  	require.Equal(map[int]int{
   230  		1: 1,
   231  	}, numRequestsPerPiece(p3.messages))
   232  }
   233  
   234  func TestDispatcherSendErrorsMarksPieceRequestsUnsent(t *testing.T) {
   235  	require := require.New(t)
   236  
   237  	config := Config{
   238  		DisableEndgame: true,
   239  	}
   240  	clk := clock.NewMock()
   241  
   242  	torrent, cleanup := agentstorage.TorrentFixture(core.SizedBlobFixture(1, 1).MetaInfo)
   243  	defer cleanup()
   244  
   245  	d := testDispatcher(config, clk, torrent)
   246  
   247  	p1, err := d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(true), newMockMessages())
   248  	require.NoError(err)
   249  
   250  	p1.messages.Close()
   251  
   252  	// Send should fail since p1 messages are closed.
   253  	d.maybeRequestMorePieces(p1)
   254  
   255  	require.Equal(map[int]int{}, numRequestsPerPiece(p1.messages))
   256  
   257  	p2, err := d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(true), newMockMessages())
   258  	require.NoError(err)
   259  
   260  	// Send should succeed since pending requests were marked unsent.
   261  	d.maybeRequestMorePieces(p2)
   262  
   263  	require.Equal(map[int]int{
   264  		0: 1,
   265  	}, numRequestsPerPiece(p2.messages))
   266  }
   267  
   268  func TestDispatcherCalcPieceRequestTimeout(t *testing.T) {
   269  	config := Config{
   270  		PieceRequestMinTimeout:   5 * time.Second,
   271  		PieceRequestTimeoutPerMb: 2 * time.Second,
   272  	}
   273  
   274  	tests := []struct {
   275  		maxPieceLength uint64
   276  		expected       time.Duration
   277  	}{
   278  		{512 * memsize.KB, 5 * time.Second},
   279  		{memsize.MB, 5 * time.Second},
   280  		{4 * memsize.MB, 8 * time.Second},
   281  		{8 * memsize.MB, 16 * time.Second},
   282  	}
   283  	for _, test := range tests {
   284  		t.Run(memsize.Format(test.maxPieceLength), func(t *testing.T) {
   285  			timeout := config.calcPieceRequestTimeout(int64(test.maxPieceLength))
   286  			require.Equal(t, test.expected, timeout)
   287  		})
   288  	}
   289  }
   290  
   291  func TestDispatcherEndgame(t *testing.T) {
   292  	require := require.New(t)
   293  
   294  	config := Config{
   295  		PipelineLimit:    1,
   296  		EndgameThreshold: 1,
   297  	}
   298  	clk := clock.NewMock()
   299  
   300  	torrent, cleanup := agentstorage.TorrentFixture(core.SizedBlobFixture(1, 1).MetaInfo)
   301  	defer cleanup()
   302  
   303  	d := testDispatcher(config, clk, torrent)
   304  
   305  	p1, err := d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(true), newMockMessages())
   306  	require.NoError(err)
   307  
   308  	d.maybeRequestMorePieces(p1)
   309  	require.Equal(map[int]int{0: 1}, numRequestsPerPiece(p1.messages))
   310  
   311  	p2, err := d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(true), newMockMessages())
   312  	require.NoError(err)
   313  
   314  	// Should send duplicate request for piece 0 since we're in endgame.
   315  	d.maybeRequestMorePieces(p2)
   316  	require.Equal(map[int]int{0: 1}, numRequestsPerPiece(p2.messages))
   317  }
   318  
   319  func TestDispatcherHandlePiecePayloadAnnouncesPiece(t *testing.T) {
   320  	require := require.New(t)
   321  
   322  	blob := core.SizedBlobFixture(2, 1)
   323  
   324  	torrent, cleanup := agentstorage.TorrentFixture(blob.MetaInfo)
   325  	defer cleanup()
   326  
   327  	d := testDispatcher(Config{}, clock.NewMock(), torrent)
   328  
   329  	p1, err := d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(false, false), newMockMessages())
   330  	require.NoError(err)
   331  
   332  	p2, err := d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(false, false), newMockMessages())
   333  	require.NoError(err)
   334  
   335  	msg := conn.NewPiecePayloadMessage(0, piecereader.NewBuffer(blob.Content[0:1]))
   336  
   337  	require.NoError(d.dispatch(p1, msg))
   338  
   339  	// Should not announce to the peer who sent the payload.
   340  	require.Empty(announcedPieces(p1.messages))
   341  
   342  	// Should announce to other peers.
   343  	require.Equal([]int{0}, announcedPieces(p2.messages))
   344  }
   345  
   346  func TestDispatcherHandlePiecePayloadSendsCompleteMessage(t *testing.T) {
   347  	require := require.New(t)
   348  
   349  	blob := core.SizedBlobFixture(1, 1)
   350  
   351  	torrent, cleanup := agentstorage.TorrentFixture(blob.MetaInfo)
   352  	defer cleanup()
   353  
   354  	d := testDispatcher(Config{}, clock.NewMock(), torrent)
   355  
   356  	p1, err := d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(false), newMockMessages())
   357  	require.NoError(err)
   358  
   359  	p2, err := d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(false), newMockMessages())
   360  	require.NoError(err)
   361  
   362  	msg := conn.NewPiecePayloadMessage(0, piecereader.NewBuffer(blob.Content[0:1]))
   363  
   364  	require.NoError(d.dispatch(p1, msg))
   365  
   366  	require.True(hasComplete(p1.messages))
   367  	require.True(hasComplete(p2.messages))
   368  }
   369  
   370  func TestDispatcherClosesCompletedPeersWhenComplete(t *testing.T) {
   371  	require := require.New(t)
   372  
   373  	blob := core.SizedBlobFixture(1, 1)
   374  
   375  	torrent, cleanup := agentstorage.TorrentFixture(blob.MetaInfo)
   376  	defer cleanup()
   377  
   378  	d := testDispatcher(Config{}, clock.NewMock(), torrent)
   379  
   380  	completedPeer, err := d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(true), newMockMessages())
   381  	require.NoError(err)
   382  
   383  	incompletePeer, err := d.addPeer(
   384  		core.PeerIDFixture(), bitsetutil.FromBools(false), newMockMessages())
   385  	require.NoError(err)
   386  
   387  	msg := conn.NewPiecePayloadMessage(0, piecereader.NewBuffer(blob.Content[0:1]))
   388  
   389  	// Completed peers are closed when the dispatcher completes.
   390  	require.NoError(d.dispatch(completedPeer, msg))
   391  	require.True(closed(completedPeer.messages))
   392  	require.False(closed(incompletePeer.messages))
   393  
   394  	// Peers which send complete messages are closed if the dispatcher is complete.
   395  	require.NoError(d.dispatch(incompletePeer, conn.NewCompleteMessage()))
   396  	require.True(closed(incompletePeer.messages))
   397  }
   398  
   399  func TestDispatcherHandleCompleteRequestsPieces(t *testing.T) {
   400  	require := require.New(t)
   401  
   402  	blob := core.SizedBlobFixture(1, 1)
   403  
   404  	torrent, cleanup := agentstorage.TorrentFixture(blob.MetaInfo)
   405  	defer cleanup()
   406  
   407  	d := testDispatcher(Config{}, clock.NewMock(), torrent)
   408  
   409  	p, err := d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(false), newMockMessages())
   410  	require.NoError(err)
   411  
   412  	require.Empty(numRequestsPerPiece(p.messages))
   413  
   414  	require.NoError(d.dispatch(p, conn.NewCompleteMessage()))
   415  
   416  	require.Equal(map[int]int{0: 1}, numRequestsPerPiece(p.messages))
   417  	require.False(closed(p.messages))
   418  }
   419  
   420  func TestDispatcherPeerPieceCounts(t *testing.T) {
   421  	require := require.New(t)
   422  
   423  	blob := core.SizedBlobFixture(3, 1)
   424  
   425  	torrent, cleanup := agentstorage.TorrentFixture(blob.MetaInfo)
   426  	defer cleanup()
   427  
   428  	d := testDispatcher(Config{}, clock.NewMock(), torrent)
   429  
   430  	var err error
   431  
   432  	p, err := d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(false, false, false), newMockMessages())
   433  	require.NoError(err)
   434  
   435  	require.Equal(0, d.numPeersByPiece.Get(0))
   436  	require.Equal(0, d.numPeersByPiece.Get(1))
   437  	require.Equal(0, d.numPeersByPiece.Get(2))
   438  
   439  	d.dispatch(p, conn.NewAnnouncePieceMessage(2))
   440  
   441  	require.Equal(1, d.numPeersByPiece.Get(2))
   442  
   443  	d.dispatch(p, conn.NewAnnouncePieceMessage(0))
   444  	d.dispatch(p, conn.NewAnnouncePieceMessage(0))
   445  
   446  	require.Equal(2, d.numPeersByPiece.Get(0))
   447  
   448  	_, err = d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(true, true, true), newMockMessages())
   449  	require.NoError(err)
   450  
   451  	require.Equal(3, d.numPeersByPiece.Get(0))
   452  	require.Equal(1, d.numPeersByPiece.Get(1))
   453  	require.Equal(2, d.numPeersByPiece.Get(2))
   454  
   455  	_, err = d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(true, false, true), newMockMessages())
   456  	require.NoError(err)
   457  
   458  	require.Equal(4, d.numPeersByPiece.Get(0))
   459  	require.Equal(1, d.numPeersByPiece.Get(1))
   460  	require.Equal(3, d.numPeersByPiece.Get(2))
   461  
   462  	_, err = d.addPeer(core.PeerIDFixture(), bitsetutil.FromBools(false, false, false), newMockMessages())
   463  	require.NoError(err)
   464  
   465  	require.Equal(4, d.numPeersByPiece.Get(0))
   466  	require.Equal(1, d.numPeersByPiece.Get(1))
   467  	require.Equal(3, d.numPeersByPiece.Get(2))
   468  
   469  	d.removePeer(p)
   470  
   471  	require.Equal(3, d.numPeersByPiece.Get(0))
   472  	require.Equal(1, d.numPeersByPiece.Get(1))
   473  	require.Equal(2, d.numPeersByPiece.Get(2))
   474  }