github.com/anacrolix/torrent@v1.61.0/storage/piece-resource.go (about)

     1  package storage
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"encoding/hex"
     7  	"fmt"
     8  	"io"
     9  	"path"
    10  	"sort"
    11  	"strconv"
    12  	"sync"
    13  
    14  	g "github.com/anacrolix/generics"
    15  	"github.com/anacrolix/missinggo/v2/resource"
    16  
    17  	"github.com/anacrolix/torrent/metainfo"
    18  )
    19  
    20  type piecePerResource struct {
    21  	rp   PieceProvider
    22  	opts ResourcePiecesOpts
    23  }
    24  
    25  type ResourcePiecesOpts struct {
    26  	Capacity TorrentCapacity
    27  	// After marking a piece complete, don't bother deleting its incomplete blobs.
    28  	LeaveIncompleteChunks bool
    29  	// Sized puts require being able to stream from a statement executed on another connection.
    30  	// Without them, we buffer the entire read and then put that.
    31  	NoSizedPuts bool
    32  }
    33  
    34  func NewResourcePieces(p PieceProvider) ClientImpl {
    35  	return NewResourcePiecesOpts(p, ResourcePiecesOpts{})
    36  }
    37  
    38  func NewResourcePiecesOpts(p PieceProvider, opts ResourcePiecesOpts) ClientImpl {
    39  	return &piecePerResource{
    40  		rp:   p,
    41  		opts: opts,
    42  	}
    43  }
    44  
    45  type piecePerResourceTorrentImpl struct {
    46  	piecePerResource
    47  	locks []sync.RWMutex
    48  }
    49  
    50  func (piecePerResourceTorrentImpl) Close() error {
    51  	return nil
    52  }
    53  
    54  func (s piecePerResource) OpenTorrent(
    55  	ctx context.Context,
    56  	info *metainfo.Info,
    57  	infoHash metainfo.Hash,
    58  ) (TorrentImpl, error) {
    59  	t := piecePerResourceTorrentImpl{
    60  		s,
    61  		make([]sync.RWMutex, info.NumPieces()),
    62  	}
    63  	ret := TorrentImpl{
    64  		PieceWithHash: t.Piece,
    65  		Close:         t.Close,
    66  		Capacity:      s.opts.Capacity,
    67  	}
    68  	return ret, nil
    69  }
    70  
    71  func (s piecePerResourceTorrentImpl) Piece(p metainfo.Piece, pieceHash g.Option[[]byte]) PieceImpl {
    72  	return piecePerResourcePiece{
    73  		mp:               p,
    74  		pieceHash:        pieceHash,
    75  		piecePerResource: s.piecePerResource,
    76  		mu:               &s.locks[p.Index()],
    77  	}
    78  }
    79  
    80  type PieceProvider interface {
    81  	resource.Provider
    82  }
    83  
    84  type MovePrefixer interface {
    85  	MovePrefix(old, new string) error
    86  }
    87  
    88  type ConsecutiveChunkReader interface {
    89  	ReadConsecutiveChunks(prefix string) (io.ReadCloser, error)
    90  }
    91  
    92  type ChunksReaderer interface {
    93  	ChunksReader(dir string) (PieceReader, error)
    94  }
    95  
    96  type PrefixDeleter interface {
    97  	DeletePrefix(prefix string) error
    98  }
    99  
   100  type piecePerResourcePiece struct {
   101  	mp metainfo.Piece
   102  	// The piece hash if we have it. It could be 20 or 32 bytes depending on the info version.
   103  	pieceHash g.Option[[]byte]
   104  	piecePerResource
   105  	// This protects operations that move complete/incomplete pieces around, which can trigger read
   106  	// errors that may cause callers to do more drastic things.
   107  	mu *sync.RWMutex
   108  }
   109  
   110  var _ interface {
   111  	io.WriterTo
   112  	PieceReaderer
   113  } = piecePerResourcePiece{}
   114  
   115  func (s piecePerResourcePiece) WriteTo(w io.Writer) (int64, error) {
   116  	s.mu.RLock()
   117  	defer s.mu.RUnlock()
   118  	if s.mustIsComplete() {
   119  		if s.hasMovePrefix() {
   120  			if ccr, ok := s.rp.(ConsecutiveChunkReader); ok {
   121  				return s.writeConsecutiveChunks(ccr, s.completedDirPath(), w)
   122  			}
   123  		}
   124  		r, err := s.completedInstance().Get()
   125  		if err != nil {
   126  			return 0, fmt.Errorf("getting complete instance: %w", err)
   127  		}
   128  		defer r.Close()
   129  		return io.Copy(w, r)
   130  	}
   131  	if ccr, ok := s.rp.(ConsecutiveChunkReader); ok {
   132  		return s.writeConsecutiveChunks(ccr, s.incompleteDirPath(), w)
   133  	}
   134  	return io.Copy(w, io.NewSectionReader(s, 0, s.mp.Length()))
   135  }
   136  
   137  func (s piecePerResourcePiece) writeConsecutiveChunks(
   138  	ccw ConsecutiveChunkReader,
   139  	dir string,
   140  	w io.Writer,
   141  ) (int64, error) {
   142  	r, err := ccw.ReadConsecutiveChunks(dir + "/")
   143  	if err != nil {
   144  		return 0, err
   145  	}
   146  	defer r.Close()
   147  	return io.Copy(w, r)
   148  }
   149  
   150  // Returns if the piece is complete. Ok should be true, because we are the definitive source of
   151  // truth here.
   152  func (s piecePerResourcePiece) mustIsComplete() bool {
   153  	completion := s.completionLocked()
   154  	if !completion.Ok {
   155  		panic("must know complete definitively")
   156  	}
   157  	return completion.Complete
   158  }
   159  
   160  func (s piecePerResourcePiece) Completion() (_ Completion) {
   161  	s.mu.RLock()
   162  	defer s.mu.RUnlock()
   163  	return s.completionLocked()
   164  }
   165  
   166  func (s piecePerResourcePiece) completionLocked() (_ Completion) {
   167  	if !s.pieceHash.Ok {
   168  		return
   169  	}
   170  	fi, err := s.completedInstance().Stat()
   171  	if s.hasMovePrefix() {
   172  		return Completion{
   173  			Complete: err == nil && fi.Size() != 0,
   174  			Ok:       true,
   175  		}
   176  	}
   177  	return Completion{
   178  		Complete: err == nil && fi.Size() == s.mp.Length(),
   179  		Ok:       true,
   180  	}
   181  }
   182  
   183  type SizedPutter interface {
   184  	PutSized(io.Reader, int64) error
   185  }
   186  
   187  func (s piecePerResourcePiece) MarkComplete() (err error) {
   188  	s.mu.Lock()
   189  	defer s.mu.Unlock()
   190  	if mp, ok := s.rp.(MovePrefixer); ok {
   191  		err = mp.MovePrefix(s.incompleteDirPath()+"/", s.completedDirPath()+"/")
   192  		if err != nil {
   193  			err = fmt.Errorf("moving incomplete to complete: %w", err)
   194  		}
   195  		return
   196  	}
   197  	incompleteChunks := s.getChunks(s.incompleteDirPath())
   198  	r, err := func() (io.ReadCloser, error) {
   199  		if ccr, ok := s.rp.(ConsecutiveChunkReader); ok {
   200  			return ccr.ReadConsecutiveChunks(s.incompleteDirPath() + "/")
   201  		}
   202  		return io.NopCloser(io.NewSectionReader(incompleteChunks, 0, s.mp.Length())), nil
   203  	}()
   204  	if err != nil {
   205  		return fmt.Errorf("getting incomplete chunks reader: %w", err)
   206  	}
   207  	defer r.Close()
   208  	completedInstance := s.completedInstance()
   209  	err = func() error {
   210  		if sp, ok := completedInstance.(SizedPutter); ok && !s.opts.NoSizedPuts {
   211  			return sp.PutSized(r, s.mp.Length())
   212  		} else {
   213  			return completedInstance.Put(r)
   214  		}
   215  	}()
   216  	if err != nil || s.opts.LeaveIncompleteChunks {
   217  		return
   218  	}
   219  
   220  	// I think we do this synchronously here since we don't want callers to act on the completed
   221  	// piece if we're concurrently still deleting chunks. The caller may decide to start
   222  	// downloading chunks again and won't expect us to delete them. It seems to be much faster
   223  	// to let the resource provider do this if possible.
   224  	if pd, ok := s.rp.(PrefixDeleter); ok {
   225  		err = pd.DeletePrefix(s.incompleteDirPath() + "/")
   226  		if err != nil {
   227  			err = fmt.Errorf("deleting incomplete prefix: %w", err)
   228  		}
   229  	} else {
   230  		var wg sync.WaitGroup
   231  		for _, c := range incompleteChunks {
   232  			wg.Add(1)
   233  			go func(c chunk) {
   234  				defer wg.Done()
   235  				c.instance.Delete()
   236  			}(c)
   237  		}
   238  		wg.Wait()
   239  	}
   240  	return err
   241  }
   242  
   243  func (s piecePerResourcePiece) MarkNotComplete() error {
   244  	s.mu.Lock()
   245  	defer s.mu.Unlock()
   246  	return s.completedInstance().Delete()
   247  }
   248  
   249  func (s piecePerResourcePiece) ReadAt(b []byte, off int64) (n int, err error) {
   250  	r, err := s.NewReader()
   251  	if err != nil {
   252  		return
   253  	}
   254  	defer r.Close()
   255  	n, err = r.ReadAt(b, off)
   256  	return
   257  }
   258  
   259  func (s piecePerResourcePiece) WriteAt(b []byte, off int64) (n int, err error) {
   260  	s.mu.RLock()
   261  	defer s.mu.RUnlock()
   262  	i, err := s.rp.NewInstance(path.Join(s.incompleteDirPath(), strconv.FormatInt(off, 10)))
   263  	if err != nil {
   264  		panic(err)
   265  	}
   266  	r := bytes.NewReader(b)
   267  	if sp, ok := i.(SizedPutter); ok {
   268  		err = sp.PutSized(r, r.Size())
   269  	} else {
   270  		err = i.Put(r)
   271  	}
   272  	n = len(b) - r.Len()
   273  	return
   274  }
   275  
   276  type chunk struct {
   277  	instance resource.Instance
   278  	offset   int64
   279  }
   280  
   281  type chunks []chunk
   282  
   283  func (me chunks) ReadAt(b []byte, off int64) (n int, err error) {
   284  	i := sort.Search(len(me), func(i int) bool {
   285  		return me[i].offset > off
   286  	}) - 1
   287  	if i == -1 {
   288  		err = io.EOF
   289  		return
   290  	}
   291  	chunk := me[i]
   292  	// Go made me do this with it's bullshit named return values and := operator.
   293  again:
   294  	n1, err := chunk.instance.ReadAt(b, off-chunk.offset)
   295  	b = b[n1:]
   296  	n += n1
   297  	// Should we check here that we're not io.EOF or nil, per ReadAt's contract? That way we know we
   298  	// don't have an error anymore for the rest of the block.
   299  	if len(b) == 0 {
   300  		// err = nil, so we don't send io.EOF on chunk boundaries?
   301  		return
   302  	}
   303  	off += int64(n1)
   304  	i++
   305  	if i >= len(me) {
   306  		if err == nil {
   307  			err = io.EOF
   308  		}
   309  		return
   310  	}
   311  	chunk = me[i]
   312  	if chunk.offset > off {
   313  		if err == nil {
   314  			err = io.ErrUnexpectedEOF
   315  		}
   316  		return
   317  	}
   318  	goto again
   319  }
   320  
   321  func (s piecePerResourcePiece) getChunks(dir string) (chunks chunks) {
   322  	names, err := s.dirInstance(dir).Readdirnames()
   323  	if err != nil {
   324  		return
   325  	}
   326  	for _, n := range names {
   327  		offset, err := strconv.ParseInt(n, 10, 64)
   328  		if err != nil {
   329  			panic(err)
   330  		}
   331  		i, err := s.rp.NewInstance(path.Join(dir, n))
   332  		if err != nil {
   333  			panic(err)
   334  		}
   335  		chunks = append(chunks, chunk{i, offset})
   336  	}
   337  	sort.Slice(chunks, func(i, j int) bool {
   338  		return chunks[i].offset < chunks[j].offset
   339  	})
   340  	return
   341  }
   342  
   343  func (s piecePerResourcePiece) completedDirPath() string {
   344  	if !s.hasMovePrefix() {
   345  		panic("not move prefixing")
   346  	}
   347  	return path.Join("completed", s.hashHex())
   348  }
   349  
   350  func (s piecePerResourcePiece) completedInstancePath() string {
   351  	if s.hasMovePrefix() {
   352  		return s.completedDirPath() + "/0"
   353  	}
   354  	return path.Join("completed", s.hashHex())
   355  }
   356  
   357  func (s piecePerResourcePiece) completedInstance() resource.Instance {
   358  	i, err := s.rp.NewInstance(s.completedInstancePath())
   359  	if err != nil {
   360  		panic(err)
   361  	}
   362  	return i
   363  }
   364  
   365  // TODO: Add DirPrefix methods that include the "/" because it's easy to forget and always required.
   366  func (s piecePerResourcePiece) incompleteDirPath() string {
   367  	return path.Join("incompleted", s.hashHex())
   368  }
   369  
   370  func (s piecePerResourcePiece) dirInstance(path string) resource.DirInstance {
   371  	i, err := s.rp.NewInstance(path)
   372  	if err != nil {
   373  		panic(err)
   374  	}
   375  	return i.(resource.DirInstance)
   376  }
   377  
   378  func (me piecePerResourcePiece) hashHex() string {
   379  	return hex.EncodeToString(me.pieceHash.Unwrap())
   380  }
   381  
   382  func (me piecePerResourcePiece) hasMovePrefix() bool {
   383  	_, ok := me.rp.(MovePrefixer)
   384  	return ok
   385  }
   386  
   387  // Chunks are in dirs, we add the prefix ourselves.
   388  func (s piecePerResourcePiece) getChunksReader(dir string) (PieceReader, error) {
   389  	if opt, ok := s.rp.(ChunksReaderer); ok {
   390  		return opt.ChunksReader(dir)
   391  	}
   392  	return chunkPieceReader{s.getChunks(dir)}, nil
   393  }
   394  
   395  func (s piecePerResourcePiece) NewReader() (PieceReader, error) {
   396  	s.mu.RLock()
   397  	defer s.mu.RUnlock()
   398  	if s.mustIsComplete() {
   399  		if s.hasMovePrefix() {
   400  			return s.getChunksReader(s.completedDirPath())
   401  		}
   402  		return instancePieceReader{s.completedInstance()}, nil
   403  	}
   404  	return s.getChunksReader(s.incompleteDirPath())
   405  }
   406  
   407  type instancePieceReader struct {
   408  	resource.Instance
   409  }
   410  
   411  func (instancePieceReader) Close() error {
   412  	return nil
   413  }
   414  
   415  type chunkPieceReader struct {
   416  	chunks
   417  }
   418  
   419  func (chunkPieceReader) Close() error {
   420  	return nil
   421  }
   422  
   423  // TODO: Make an embedded Closer using reflect?