github.com/anacrolix/torrent@v1.61.0/peerconn_test.go (about) 1 package torrent 2 3 import ( 4 "context" 5 "encoding/binary" 6 "errors" 7 "fmt" 8 "io" 9 "net" 10 "net/netip" 11 "sync" 12 "testing" 13 14 g "github.com/anacrolix/generics" 15 "github.com/go-quicktest/qt" 16 "github.com/stretchr/testify/require" 17 "golang.org/x/time/rate" 18 19 "github.com/anacrolix/torrent/metainfo" 20 pp "github.com/anacrolix/torrent/peer_protocol" 21 "github.com/anacrolix/torrent/storage" 22 ) 23 24 // Ensure that no race exists between sending a bitfield, and a subsequent 25 // Have that would potentially alter it. 26 func TestSendBitfieldThenHave(t *testing.T) { 27 cl := newTestingClient(t) 28 c := cl.newConnection(nil, newConnectionOpts{network: "io.Pipe"}) 29 c.setTorrent(cl.newTorrentForTesting()) 30 // I think code to handle zero size, no-name torrents is missing. It should be fine to a point. 31 err := c.t.setInfoUnlocked(&metainfo.Info{ 32 Pieces: make([]byte, metainfo.HashSize*3), 33 Name: "dummy", 34 PieceLength: 1, 35 Length: 3, 36 }) 37 qt.Assert(t, qt.IsNil(err)) 38 r, w := io.Pipe() 39 // c.r = r 40 c.w = w 41 c.startMessageWriter() 42 c.locker().Lock() 43 c.t._completedPieces.Add(1) 44 c.postBitfield( /*[]bool{false, true, false}*/ ) 45 c.locker().Unlock() 46 c.locker().Lock() 47 c.have(2) 48 c.locker().Unlock() 49 b := make([]byte, 15) 50 n, err := io.ReadFull(r, b) 51 c.locker().Lock() 52 // This will cause connection.writer to terminate. 53 c.closed.Set() 54 c.locker().Unlock() 55 require.NoError(t, err) 56 require.EqualValues(t, 15, n) 57 // Here we see that the bitfield doesn't have piece 2 set, as that should 58 // arrive in the following Have message. 59 require.EqualValues(t, "\x00\x00\x00\x02\x05@\x00\x00\x00\x05\x04\x00\x00\x00\x02", string(b)) 60 } 61 62 type torrentStorage struct { 63 allChunksWritten sync.WaitGroup 64 } 65 66 func (me *torrentStorage) Close() error { return nil } 67 68 func (me *torrentStorage) Piece(mp metainfo.Piece) storage.PieceImpl { 69 return me 70 } 71 72 func (me *torrentStorage) Completion() storage.Completion { 73 return storage.Completion{} 74 } 75 76 func (me *torrentStorage) MarkComplete() error { 77 return nil 78 } 79 80 func (me *torrentStorage) MarkNotComplete() error { 81 return nil 82 } 83 84 func (me *torrentStorage) ReadAt([]byte, int64) (int, error) { 85 return 0, errors.New("not implemented") 86 } 87 88 func (me *torrentStorage) WriteAt(b []byte, _ int64) (int, error) { 89 if len(b) != defaultChunkSize { 90 panic(len(b)) 91 } 92 me.allChunksWritten.Done() 93 return len(b), nil 94 } 95 96 type torrentStorageClient struct { 97 ts *torrentStorage 98 } 99 100 func (t torrentStorageClient) OpenTorrent(ctx context.Context, info *metainfo.Info, infoHash metainfo.Hash) (storage.TorrentImpl, error) { 101 ts := t.ts 102 return storage.TorrentImpl{Piece: ts.Piece, Close: ts.Close}, nil 103 } 104 105 func BenchmarkConnectionMainReadLoop(b *testing.B) { 106 var cl Client 107 cfg := TestingConfig(b) 108 ts := &torrentStorage{} 109 cfg.DefaultStorage = &torrentStorageClient{ts} 110 cl.init(cfg) 111 t, _ := cl.AddTorrentOpt(AddTorrentOpts{ 112 InfoHash: testingTorrentInfoHash, 113 Storage: &torrentStorageClient{ts}, 114 DisableInitialPieceCheck: true, 115 }) 116 require.NoError(b, t.setInfoUnlocked(&metainfo.Info{ 117 Pieces: make([]byte, 20), 118 Length: 1 << 20, 119 PieceLength: 1 << 20, 120 })) 121 //t.storage = &storage.Torrent{TorrentImpl: storage.TorrentImpl{Piece: ts.Piece, Close: ts.Close}} 122 //t.onSetInfo() 123 t._pendingPieces.Add(0) 124 r, w := net.Pipe() 125 b.Logf("pipe reader remote addr: %v", r.RemoteAddr()) 126 cn := cl.newConnection(r, newConnectionOpts{ 127 outgoing: true, 128 // TODO: This is a hack to give the pipe a bannable remote address. 129 remoteAddr: netip.AddrPortFrom(netip.AddrFrom4([4]byte{1, 2, 3, 4}), 1234), 130 network: r.RemoteAddr().Network(), 131 connString: regularNetConnPeerConnConnString(r), 132 }) 133 qt.Assert(b, qt.IsTrue(cn.bannableAddr.Ok)) 134 cn.setTorrent(t) 135 requestIndexBegin := t.pieceRequestIndexBegin(0) 136 requestIndexEnd := t.pieceRequestIndexBegin(1) 137 eachRequestIndex := func(f func(ri RequestIndex)) { 138 for ri := requestIndexBegin; ri < requestIndexEnd; ri++ { 139 f(ri) 140 } 141 } 142 const chunkSize = defaultChunkSize 143 numRequests := requestIndexEnd - requestIndexBegin 144 msgBufs := make([][]byte, 0, numRequests) 145 eachRequestIndex(func(ri RequestIndex) { 146 msgBufs = append(msgBufs, pp.Message{ 147 Type: pp.Piece, 148 Piece: make([]byte, chunkSize), 149 Begin: pp.Integer(chunkSize) * pp.Integer(ri), 150 }.MustMarshalBinary()) 151 }) 152 // errgroup can't handle this pattern... 153 allErrors := make(chan error, 2) 154 var wg sync.WaitGroup 155 wg.Add(1) 156 go func() { 157 defer wg.Done() 158 cl.lock() 159 err := cn.mainReadLoop() 160 if errors.Is(err, io.EOF) { 161 err = nil 162 } 163 allErrors <- err 164 }() 165 b.SetBytes(chunkSize * int64(numRequests)) 166 wg.Add(1) 167 go func() { 168 defer wg.Done() 169 for i := 0; i < b.N; i += 1 { 170 cl.lock() 171 // The chunk must be written to storage everytime, to ensure the 172 // writeSem is unlocked. 173 t.pendAllChunkSpecs(0) 174 g.MakeMapIfNil(&cn.validReceiveChunks) 175 eachRequestIndex(func(ri RequestIndex) { 176 cn.validReceiveChunks[ri] = 1 177 }) 178 cl.unlock() 179 ts.allChunksWritten.Add(int(numRequests)) 180 for _, wb := range msgBufs { 181 n, err := w.Write(wb) 182 require.NoError(b, err) 183 require.EqualValues(b, len(wb), n) 184 } 185 // This is unlocked by a successful write to storage. So this unblocks when that is 186 // done. 187 ts.allChunksWritten.Wait() 188 } 189 if err := w.Close(); err != nil { 190 panic(err) 191 } 192 }() 193 go func() { 194 wg.Wait() 195 close(allErrors) 196 }() 197 var err error 198 for err = range allErrors { 199 if err != nil { 200 break 201 } 202 } 203 qt.Assert(b, qt.IsNil(err)) 204 qt.Assert(b, qt.Equals(cn._stats.ChunksReadUseful.Int64(), int64(b.N)*int64(numRequests))) 205 qt.Assert(b, qt.IsTrue(t.smartBanCache.HasBlocks())) 206 } 207 208 func TestConnPexPeerFlags(t *testing.T) { 209 var ( 210 tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848} 211 udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848} 212 ) 213 testcases := []struct { 214 conn *PeerConn 215 f pp.PexPeerFlags 216 }{ 217 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: false}}, 0}, 218 {&PeerConn{Peer: Peer{outgoing: false, PeerPrefersEncryption: true}}, pp.PexPrefersEncryption}, 219 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: false}}, pp.PexOutgoingConn}, 220 {&PeerConn{Peer: Peer{outgoing: true, PeerPrefersEncryption: true}}, pp.PexOutgoingConn | pp.PexPrefersEncryption}, 221 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}}, pp.PexSupportsUtp}, 222 {&PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn | pp.PexSupportsUtp}, 223 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true}}, pp.PexOutgoingConn}, 224 {&PeerConn{Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network()}}, 0}, 225 } 226 for i, tc := range testcases { 227 f := tc.conn.pexPeerFlags() 228 require.EqualValues(t, tc.f, f, i) 229 } 230 } 231 232 func TestConnPexEvent(t *testing.T) { 233 var ( 234 udpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4848} 235 tcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4848} 236 dialTcpAddr = &net.TCPAddr{IP: net.IPv6loopback, Port: 4747} 237 dialUdpAddr = &net.UDPAddr{IP: net.IPv6loopback, Port: 4747} 238 ) 239 testcases := []struct { 240 t pexEventType 241 c *PeerConn 242 e pexEvent 243 }{ 244 { 245 pexAdd, 246 &PeerConn{Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}}, 247 pexEvent{pexAdd, udpAddr.AddrPort(), pp.PexSupportsUtp, nil}, 248 }, 249 { 250 pexDrop, 251 &PeerConn{ 252 Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network(), outgoing: true}, 253 PeerListenPort: dialTcpAddr.Port, 254 }, 255 pexEvent{pexDrop, tcpAddr.AddrPort(), pp.PexOutgoingConn, nil}, 256 }, 257 { 258 pexAdd, 259 &PeerConn{ 260 Peer: Peer{RemoteAddr: tcpAddr, Network: tcpAddr.Network()}, 261 PeerListenPort: dialTcpAddr.Port, 262 }, 263 pexEvent{pexAdd, dialTcpAddr.AddrPort(), 0, nil}, 264 }, 265 { 266 pexDrop, 267 &PeerConn{ 268 Peer: Peer{RemoteAddr: udpAddr, Network: udpAddr.Network()}, 269 PeerListenPort: dialUdpAddr.Port, 270 }, 271 pexEvent{pexDrop, dialUdpAddr.AddrPort(), pp.PexSupportsUtp, nil}, 272 }, 273 } 274 for i, tc := range testcases { 275 t.Run(fmt.Sprintf("%v", i), func(t *testing.T) { 276 e, err := tc.c.pexEvent(tc.t) 277 qt.Assert(t, qt.IsNil(err)) 278 qt.Check(t, qt.Equals(e, tc.e)) 279 }) 280 } 281 } 282 283 func TestHaveAllThenBitfield(t *testing.T) { 284 cl := newTestingClient(t) 285 tt := cl.newTorrentForTesting() 286 //pc := cl.newConnection(nil, newConnectionOpts{}) 287 pc := PeerConn{ 288 Peer: Peer{t: tt}, 289 } 290 pc.initRequestState() 291 pc.legacyPeerImpl = &pc 292 tt.conns[&pc] = struct{}{} 293 g.InitNew(&pc.callbacks) 294 qt.Assert(t, qt.IsNil(pc.onPeerSentHaveAll())) 295 qt.Check(t, qt.DeepEquals(pc.t.connsWithAllPieces, map[*Peer]struct{}{&pc.Peer: {}})) 296 pc.peerSentBitfield([]bool{false, false, true, false, true, true, false, false}) 297 qt.Check(t, qt.Equals(pc.peerMinPieces, 6)) 298 qt.Check(t, qt.HasLen(pc.t.connsWithAllPieces, 0)) 299 qt.Assert(t, qt.IsNil(pc.t.setInfoUnlocked(&metainfo.Info{ 300 Name: "herp", 301 Length: 7, 302 PieceLength: 1, 303 Pieces: make([]byte, pieceHash.Size()*7), 304 }))) 305 qt.Check(t, qt.Equals(tt.numPieces(), 7)) 306 qt.Check(t, qt.DeepEquals(tt.pieceAvailabilityRuns(), []pieceAvailabilityRun{ 307 // The last element of the bitfield is irrelevant, as the Torrent actually only has 7 308 // pieces. 309 {2, 0}, {1, 1}, {1, 0}, {2, 1}, {1, 0}, 310 })) 311 } 312 313 func TestApplyRequestStateWriteBufferConstraints(t *testing.T) { 314 qt.Check(t, qt.Equals(interestedMsgLen, 5)) 315 qt.Check(t, qt.Equals(requestMsgLen, 17)) 316 qt.Check(t, qt.IsTrue(maxLocalToRemoteRequests >= 8)) 317 t.Logf("max local to remote requests: %v", maxLocalToRemoteRequests) 318 } 319 320 func peerConnForPreferredNetworkDirection( 321 localPeerId, remotePeerId int, 322 outgoing, utp, ipv6 bool, 323 ) *PeerConn { 324 pc := PeerConn{} 325 pc.outgoing = outgoing 326 if utp { 327 pc.Network = "udp" 328 } 329 if ipv6 { 330 pc.RemoteAddr = &net.TCPAddr{IP: net.ParseIP("::420")} 331 } else { 332 pc.RemoteAddr = &net.TCPAddr{IP: net.IPv4(1, 2, 3, 4)} 333 } 334 binary.BigEndian.PutUint64(pc.PeerID[:], uint64(remotePeerId)) 335 cl := Client{} 336 binary.BigEndian.PutUint64(cl.peerID[:], uint64(localPeerId)) 337 pc.t = &Torrent{cl: &cl} 338 return &pc 339 } 340 341 func TestPreferredNetworkDirection(t *testing.T) { 342 pc := peerConnForPreferredNetworkDirection 343 344 // Prefer outgoing to lower peer ID 345 346 qt.Check(t, 347 qt.IsFalse(pc(1, 2, true, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, false))), 348 ) 349 qt.Check(t, 350 qt.IsTrue(pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, true, false, false))), 351 ) 352 qt.Check(t, 353 qt.IsFalse(pc(2, 1, false, false, false).hasPreferredNetworkOver(pc(2, 1, true, false, false))), 354 ) 355 356 // Don't prefer uTP 357 qt.Check(t, 358 qt.IsFalse(pc(1, 2, false, true, false).hasPreferredNetworkOver(pc(1, 2, false, false, false))), 359 ) 360 // Prefer IPv6 361 qt.Check(t, 362 qt.IsFalse(pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, true))), 363 ) 364 // No difference 365 qt.Check(t, 366 qt.IsFalse(pc(1, 2, false, false, false).hasPreferredNetworkOver(pc(1, 2, false, false, false))), 367 ) 368 } 369 370 func TestReceiveLargeRequest(t *testing.T) { 371 cl := newTestingClient(t) 372 pc := cl.newConnection(nil, newConnectionOpts{network: "test"}) 373 tor := cl.newTorrentForTesting() 374 tor.info = &metainfo.Info{PieceLength: 3 << 20} 375 pc.setTorrent(tor) 376 tor._completedPieces.Add(0) 377 pc.PeerExtensionBytes.SetBit(pp.ExtensionBitFast, true) 378 pc.choking = false 379 pc.initMessageWriter() 380 req := Request{} 381 req.Length = defaultChunkSize 382 qt.Assert(t, qt.IsTrue(pc.fastEnabled())) 383 qt.Check(t, qt.IsNil(pc.onReadRequest(req, false))) 384 qt.Check(t, qt.HasLen(pc.unreadPeerRequests, 1)) 385 req.Length = 2 << 20 386 qt.Check(t, qt.IsNil(pc.onReadRequest(req, false))) 387 qt.Check(t, qt.HasLen(pc.unreadPeerRequests, 2)) 388 pc.unreadPeerRequests = nil 389 pc.t.cl.config.UploadRateLimiter = rate.NewLimiter(1, defaultChunkSize) 390 req.Length = defaultChunkSize 391 qt.Check(t, qt.IsNil(pc.onReadRequest(req, false))) 392 qt.Check(t, qt.HasLen(pc.unreadPeerRequests, 1)) 393 req.Length = 2 << 20 394 qt.Check(t, qt.IsNil(pc.onReadRequest(req, false))) 395 qt.Check(t, qt.Equals(pc.messageWriter.writeBuffer.Len(), 17)) 396 } 397 398 func TestChunkOverflowsPiece(t *testing.T) { 399 check := func(begin, length, limit pp.Integer, expected bool) { 400 qt.Check(t, qt.Equals(chunkOverflowsPiece(ChunkSpec{begin, length}, limit), expected)) 401 } 402 check(2, 3, 1, true) 403 check(2, pp.IntegerMax, 1, true) 404 check(2, pp.IntegerMax, 3, true) 405 check(2, pp.IntegerMax, pp.IntegerMax, true) 406 check(2, pp.IntegerMax-2, pp.IntegerMax, false) 407 }