github.com/anacrolix/torrent@v1.61.0/tracker/server/server.go (about)

     1  package trackerServer
     2  
     3  import (
     4  	"context"
     5  	"encoding/hex"
     6  	"fmt"
     7  	"net/netip"
     8  	"sync"
     9  	"time"
    10  
    11  	"github.com/anacrolix/generics"
    12  	"github.com/anacrolix/log"
    13  	"github.com/anacrolix/torrent/types/infohash"
    14  	"go.opentelemetry.io/otel"
    15  	"go.opentelemetry.io/otel/attribute"
    16  	"go.opentelemetry.io/otel/codes"
    17  	"go.opentelemetry.io/otel/trace"
    18  
    19  	"github.com/anacrolix/torrent/tracker"
    20  	"github.com/anacrolix/torrent/tracker/udp"
    21  )
    22  
    23  // This is reserved for stuff like filtering by IP version, avoiding an announcer's IP or key,
    24  // limiting return count, etc.
    25  type GetPeersOpts struct {
    26  	// Negative numbers are not allowed.
    27  	MaxCount generics.Option[uint]
    28  }
    29  
    30  type InfoHash = infohash.T
    31  
    32  type PeerInfo struct {
    33  	AnnounceAddr
    34  }
    35  
    36  type AnnounceAddr = netip.AddrPort
    37  
    38  type AnnounceTracker interface {
    39  	TrackAnnounce(ctx context.Context, req udp.AnnounceRequest, addr AnnounceAddr) error
    40  	Scrape(ctx context.Context, infoHashes []InfoHash) ([]udp.ScrapeInfohashResult, error)
    41  	GetPeers(
    42  		ctx context.Context,
    43  		infoHash InfoHash,
    44  		opts GetPeersOpts,
    45  		remote AnnounceAddr,
    46  	) ServerAnnounceResult
    47  }
    48  
    49  type ServerAnnounceResult struct {
    50  	Err      error
    51  	Peers    []PeerInfo
    52  	Interval generics.Option[int32]
    53  	Leechers generics.Option[int32]
    54  	Seeders  generics.Option[int32]
    55  }
    56  
    57  type AnnounceHandler struct {
    58  	AnnounceTracker AnnounceTracker
    59  
    60  	UpstreamTrackers       []Client
    61  	UpstreamTrackerUrls    []string
    62  	UpstreamAnnouncePeerId [20]byte
    63  	UpstreamAnnounceGate   UpstreamAnnounceGater
    64  
    65  	mu sync.Mutex
    66  	// Operations are only removed when all the upstream peers have been tracked.
    67  	ongoingUpstreamAugmentations map[InfoHash]augmentationOperation
    68  }
    69  
    70  type peerSet = map[PeerInfo]struct{}
    71  
    72  type augmentationOperation struct {
    73  	// Closed when no more announce responses are pending. finalPeers will contain all the peers
    74  	// seen.
    75  	doneAnnouncing chan struct{}
    76  	// This receives the latest peerSet until doneAnnouncing is closed.
    77  	curPeers chan peerSet
    78  	// This contains the final peerSet after doneAnnouncing is closed.
    79  	finalPeers peerSet
    80  }
    81  
    82  func (me augmentationOperation) getCurPeers() (ret peerSet) {
    83  	ret, _ = me.getCurPeersAndDone()
    84  	return
    85  }
    86  
    87  func (me augmentationOperation) getCurPeersAndDone() (ret peerSet, done bool) {
    88  	select {
    89  	case ret = <-me.curPeers:
    90  	case <-me.doneAnnouncing:
    91  		ret = copyPeerSet(me.finalPeers)
    92  		done = true
    93  	}
    94  	return
    95  }
    96  
    97  // Adds peers from new that aren't in orig. Modifies both arguments.
    98  func addMissing(orig []PeerInfo, new peerSet) {
    99  	for _, peer := range orig {
   100  		delete(new, peer)
   101  	}
   102  	for peer := range new {
   103  		orig = append(orig, peer)
   104  	}
   105  }
   106  
   107  var tracer = otel.Tracer("torrent.tracker.udp")
   108  
   109  func (me *AnnounceHandler) Serve(
   110  	ctx context.Context, req AnnounceRequest, addr AnnounceAddr, opts GetPeersOpts,
   111  ) (ret ServerAnnounceResult) {
   112  	ctx, span := tracer.Start(
   113  		ctx,
   114  		"AnnounceHandler.Serve",
   115  		trace.WithAttributes(
   116  			attribute.Int64("announce.request.num_want", int64(req.NumWant)),
   117  			attribute.Int("announce.request.port", int(req.Port)),
   118  			attribute.String("announce.request.info_hash", hex.EncodeToString(req.InfoHash[:])),
   119  			attribute.String("announce.request.event", req.Event.String()),
   120  			attribute.Int64("announce.get_peers.opts.max_count_value", int64(opts.MaxCount.Value)),
   121  			attribute.Bool("announce.get_peers.opts.max_count_ok", opts.MaxCount.Ok),
   122  			attribute.String("announce.source.addr.ip", addr.Addr().String()),
   123  			attribute.Int("announce.source.addr.port", int(addr.Port())),
   124  		),
   125  	)
   126  	defer span.End()
   127  	defer func() {
   128  		span.SetAttributes(attribute.Int("announce.get_peers.len", len(ret.Peers)))
   129  		if ret.Err != nil {
   130  			span.SetStatus(codes.Error, ret.Err.Error())
   131  		}
   132  	}()
   133  
   134  	if req.Port != 0 {
   135  		addr = netip.AddrPortFrom(addr.Addr(), req.Port)
   136  	}
   137  	ret.Err = me.AnnounceTracker.TrackAnnounce(ctx, req, addr)
   138  	if ret.Err != nil {
   139  		ret.Err = fmt.Errorf("tracking announce: %w", ret.Err)
   140  		return
   141  	}
   142  	infoHash := req.InfoHash
   143  	var op generics.Option[augmentationOperation]
   144  	// Grab a handle to any augmentations that are already running.
   145  	me.mu.Lock()
   146  	op.Value, op.Ok = me.ongoingUpstreamAugmentations[infoHash]
   147  	me.mu.Unlock()
   148  	// Apply num_want limit to max count. I really can't tell if this is the right place to do it,
   149  	// but it seems the most flexible.
   150  	if req.NumWant != -1 {
   151  		newCount := uint(req.NumWant)
   152  		if opts.MaxCount.Ok {
   153  			if newCount < opts.MaxCount.Value {
   154  				opts.MaxCount.Value = newCount
   155  			}
   156  		} else {
   157  			opts.MaxCount = generics.Some(newCount)
   158  		}
   159  	}
   160  	ret = me.AnnounceTracker.GetPeers(ctx, infoHash, opts, addr)
   161  	if ret.Err != nil {
   162  		return
   163  	}
   164  	// Take whatever peers it has ready. If it's finished, it doesn't matter if we do this inside
   165  	// the mutex or not.
   166  	if op.Ok {
   167  		curPeers, done := op.Value.getCurPeersAndDone()
   168  		addMissing(ret.Peers, curPeers)
   169  		if done {
   170  			// It doesn't get any better with this operation. Forget it.
   171  			op.Ok = false
   172  		}
   173  	}
   174  	me.mu.Lock()
   175  	// If we didn't have an operation, and don't have enough peers, start one. Allowing 1 is
   176  	// assuming the announcing peer might be that one. Really we should record a value to prevent
   177  	// duplicate announces. Also don't announce upstream if we got no peers because the caller asked
   178  	// for none.
   179  	if !op.Ok && len(ret.Peers) <= 1 && opts.MaxCount.UnwrapOr(1) > 0 {
   180  		op.Value, op.Ok = me.ongoingUpstreamAugmentations[infoHash]
   181  		if !op.Ok {
   182  			op.Set(me.augmentPeersFromUpstream(req.InfoHash))
   183  			generics.MakeMapIfNilAndSet(&me.ongoingUpstreamAugmentations, infoHash, op.Value)
   184  		}
   185  	}
   186  	me.mu.Unlock()
   187  	// Wait a while for the current operation.
   188  	if op.Ok {
   189  		// Force the augmentation to return with whatever it has if it hasn't completed in a
   190  		// reasonable time.
   191  		ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
   192  		select {
   193  		case <-ctx.Done():
   194  		case <-op.Value.doneAnnouncing:
   195  		}
   196  		cancel()
   197  		addMissing(ret.Peers, op.Value.getCurPeers())
   198  	}
   199  	return
   200  }
   201  
   202  func (me *AnnounceHandler) augmentPeersFromUpstream(infoHash [20]byte) augmentationOperation {
   203  	const announceTimeout = time.Minute
   204  	announceCtx, cancel := context.WithTimeout(context.Background(), announceTimeout)
   205  	subReq := AnnounceRequest{
   206  		InfoHash: infoHash,
   207  		PeerId:   me.UpstreamAnnouncePeerId,
   208  		Event:    tracker.None,
   209  		Key:      0,
   210  		NumWant:  -1,
   211  		Port:     0,
   212  	}
   213  	peersChan := make(chan []Peer)
   214  	var pendingUpstreams sync.WaitGroup
   215  	for i := range me.UpstreamTrackers {
   216  		client := me.UpstreamTrackers[i]
   217  		url := me.UpstreamTrackerUrls[i]
   218  		pendingUpstreams.Add(1)
   219  		go func() {
   220  			started, err := me.UpstreamAnnounceGate.Start(announceCtx, url, infoHash, announceTimeout)
   221  			if err != nil {
   222  				log.Printf("error reserving announce for %x to %v: %v", infoHash, url, err)
   223  			}
   224  			if err != nil || !started {
   225  				peersChan <- nil
   226  				return
   227  			}
   228  			log.Printf("announcing %x upstream to %v", infoHash, url)
   229  			resp, err := client.Announce(announceCtx, subReq, tracker.AnnounceOpt{
   230  				UserAgent: "aragorn",
   231  			})
   232  			interval := resp.Interval
   233  			go func() {
   234  				if interval < 5*60 {
   235  					// This is as much to reduce load on upstream trackers in the event of errors,
   236  					// as it is to reduce load on our peer store.
   237  					interval = 5 * 60
   238  				}
   239  				err := me.UpstreamAnnounceGate.Completed(context.Background(), url, infoHash, interval)
   240  				if err != nil {
   241  					log.Printf("error recording completed announce for %x to %v: %v", infoHash, url, err)
   242  				}
   243  			}()
   244  			peersChan <- resp.Peers
   245  			if err != nil {
   246  				log.Levelf(log.Warning, "error announcing to upstream %q: %v", url, err)
   247  			}
   248  		}()
   249  	}
   250  	peersToTrack := make(map[string]Peer)
   251  	go func() {
   252  		pendingUpstreams.Wait()
   253  		cancel()
   254  		close(peersChan)
   255  		log.Levelf(log.Debug, "adding %v distinct peers from upstream trackers", len(peersToTrack))
   256  		for _, peer := range peersToTrack {
   257  			addrPort, ok := peer.ToNetipAddrPort()
   258  			if !ok {
   259  				continue
   260  			}
   261  			trackReq := AnnounceRequest{
   262  				InfoHash: infoHash,
   263  				Event:    tracker.Started,
   264  				Port:     uint16(peer.Port),
   265  				// Let's assume upstream peers are leechers without knowing better.
   266  				Left: -1,
   267  			}
   268  			copy(trackReq.PeerId[:], peer.ID)
   269  			// TODO: How do we know if these peers are leechers or seeders?
   270  			err := me.AnnounceTracker.TrackAnnounce(context.TODO(), trackReq, addrPort)
   271  			if err != nil {
   272  				log.Levelf(log.Error, "error tracking upstream peer: %v", err)
   273  			}
   274  		}
   275  		me.mu.Lock()
   276  		delete(me.ongoingUpstreamAugmentations, infoHash)
   277  		me.mu.Unlock()
   278  	}()
   279  	curPeersChan := make(chan map[PeerInfo]struct{})
   280  	doneChan := make(chan struct{})
   281  	retPeers := make(map[PeerInfo]struct{})
   282  	go func() {
   283  		defer close(doneChan)
   284  		for {
   285  			select {
   286  			case peers, ok := <-peersChan:
   287  				if !ok {
   288  					return
   289  				}
   290  				voldemort(peers, peersToTrack, retPeers)
   291  				pendingUpstreams.Done()
   292  			case curPeersChan <- copyPeerSet(retPeers):
   293  			}
   294  		}
   295  	}()
   296  	// Take return references.
   297  	return augmentationOperation{
   298  		curPeers:       curPeersChan,
   299  		finalPeers:     retPeers,
   300  		doneAnnouncing: doneChan,
   301  	}
   302  }
   303  
   304  func copyPeerSet(orig peerSet) (ret peerSet) {
   305  	ret = make(peerSet, len(orig))
   306  	for k, v := range orig {
   307  		ret[k] = v
   308  	}
   309  	return
   310  }
   311  
   312  // Adds peers to trailing containers.
   313  func voldemort(peers []Peer, toTrack map[string]Peer, sets ...map[PeerInfo]struct{}) {
   314  	for _, protoPeer := range peers {
   315  		toTrack[protoPeer.String()] = protoPeer
   316  		addr, ok := netip.AddrFromSlice(protoPeer.IP)
   317  		if !ok {
   318  			continue
   319  		}
   320  		handlerPeer := PeerInfo{netip.AddrPortFrom(addr, uint16(protoPeer.Port))}
   321  		for _, set := range sets {
   322  			set[handlerPeer] = struct{}{}
   323  		}
   324  	}
   325  }