github.com/anacrolix/torrent@v1.61.0/storage/file-piece.go (about)

     1  package storage
     2  
     3  import (
     4  	"errors"
     5  	"expvar"
     6  	"fmt"
     7  	"io"
     8  	"io/fs"
     9  	"iter"
    10  	"log/slog"
    11  	"os"
    12  
    13  	g "github.com/anacrolix/generics"
    14  	"github.com/anacrolix/missinggo/v2/panicif"
    15  
    16  	"github.com/anacrolix/torrent/metainfo"
    17  	"github.com/anacrolix/torrent/segments"
    18  )
    19  
    20  // Piece within File storage. This is created on demand.
    21  type filePieceImpl struct {
    22  	t *fileTorrentImpl
    23  	p metainfo.Piece
    24  	io.WriterAt
    25  	io.ReaderAt
    26  }
    27  
    28  var _ interface {
    29  	PieceImpl
    30  	//PieceReaderer
    31  	io.WriterTo
    32  } = (*filePieceImpl)(nil)
    33  
    34  func (me *filePieceImpl) Flush() (err error) {
    35  	for fileIndex, extent := range me.fileExtents() {
    36  		file := me.t.file(fileIndex)
    37  		name := me.t.pathForWrite(&file)
    38  		err1 := me.t.io.flush(name, extent.Start, extent.Length)
    39  		if err1 != nil {
    40  			err = errors.Join(err, fmt.Errorf("flushing %q:%v+%v: %w", name, extent.Start, extent.Length, err1))
    41  			return
    42  		}
    43  	}
    44  	return nil
    45  }
    46  
    47  func (me *filePieceImpl) logger() *slog.Logger {
    48  	return me.t.client.opts.Logger
    49  }
    50  
    51  func (me *filePieceImpl) pieceKey() metainfo.PieceKey {
    52  	return metainfo.PieceKey{me.t.infoHash, me.p.Index()}
    53  }
    54  
    55  func (me *filePieceImpl) extent() segments.Extent {
    56  	return segments.Extent{
    57  		Start:  me.p.Offset(),
    58  		Length: me.p.Length(),
    59  	}
    60  }
    61  
    62  func (me *filePieceImpl) fileExtents() iter.Seq2[int, segments.Extent] {
    63  	return me.t.segmentLocater.LocateIter(me.extent())
    64  }
    65  
    66  func (me *filePieceImpl) pieceFiles() iter.Seq[file] {
    67  	return func(yield func(file) bool) {
    68  		for fileIndex := range me.fileExtents() {
    69  			f := me.t.file(fileIndex)
    70  			if !yield(f) {
    71  				return
    72  			}
    73  		}
    74  	}
    75  }
    76  
    77  func (me *filePieceImpl) pieceCompletion() PieceCompletion {
    78  	return me.t.pieceCompletion()
    79  }
    80  
    81  func (me *filePieceImpl) Completion() (c Completion) {
    82  	c = me.t.getCompletion(me.p.Index())
    83  	if !c.Ok || c.Err != nil {
    84  		return c
    85  	}
    86  	if c.Complete {
    87  		c = me.checkCompleteFileSizes()
    88  	}
    89  	return
    90  }
    91  
    92  func (me *filePieceImpl) iterFileSegments() iter.Seq2[int, segments.Extent] {
    93  	return func(yield func(int, segments.Extent) bool) {
    94  		pieceExtent := me.extent()
    95  		noFiles := true
    96  		for i, extent := range me.t.segmentLocater.LocateIter(pieceExtent) {
    97  			noFiles = false
    98  			if !yield(i, extent) {
    99  				return
   100  			}
   101  		}
   102  		panicif.NotEq(noFiles, pieceExtent.Length == 0)
   103  	}
   104  }
   105  
   106  // If a piece is complete, check constituent files have the minimum required sizes.
   107  func (me *filePieceImpl) checkCompleteFileSizes() (c Completion) {
   108  	c.Complete = true
   109  	c.Ok = true
   110  	for i, extent := range me.iterFileSegments() {
   111  		file := me.t.file(i)
   112  		file.mu.RLock()
   113  		s, err := os.Stat(file.safeOsPath)
   114  		if me.partFiles() && errors.Is(err, fs.ErrNotExist) {
   115  			// Can we use shared files for this? Is it faster?
   116  			s, err = os.Stat(file.partFilePath())
   117  		}
   118  		file.mu.RUnlock()
   119  		if err != nil {
   120  			if errors.Is(err, fs.ErrNotExist) {
   121  				me.logger().Warn(
   122  					"error checking file size for piece marked as complete",
   123  					"file", file.safeOsPath,
   124  					"piece", me.p.Index(),
   125  					"err", err)
   126  				c.Complete = false
   127  				me.markIncompletePieces(&file, 0)
   128  				return
   129  			}
   130  			c.Err = fmt.Errorf("checking file %v: %w", file.safeOsPath, err)
   131  			c.Complete = false
   132  			return
   133  		}
   134  		if s.Size() < extent.End() {
   135  			me.logger().Warn(
   136  				"file too small for piece marked as complete",
   137  				"piece", me.p.Index(),
   138  				"file", file.safeOsPath,
   139  				"size", s.Size(),
   140  				"extent", extent)
   141  			me.markIncompletePieces(&file, s.Size())
   142  			c.Complete = false
   143  			return
   144  		}
   145  	}
   146  	return
   147  }
   148  
   149  func (me *filePieceImpl) markIncompletePieces(file *file, size int64) {
   150  	if size >= file.length() {
   151  		return
   152  	}
   153  	pieceLength := me.t.info.PieceLength
   154  	begin := metainfo.PieceIndex((file.torrentOffset() + size) / pieceLength)
   155  	end := metainfo.PieceIndex((file.torrentOffset() + file.length() + pieceLength - 1) / pieceLength)
   156  	for p := begin; p < end; p++ {
   157  		key := metainfo.PieceKey{
   158  			InfoHash: me.t.infoHash,
   159  			Index:    p,
   160  		}
   161  		err := me.pieceCompletion().Set(key, false)
   162  		if err != nil {
   163  			me.logger().Error("error marking piece not complete", "piece", p, "err", err)
   164  			return
   165  		}
   166  	}
   167  }
   168  
   169  func (me *filePieceImpl) MarkComplete() (err error) {
   170  	err = me.pieceCompletion().Set(me.pieceKey(), true)
   171  	if err != nil {
   172  		return
   173  	}
   174  	if pieceCompletionIsPersistent(me.pieceCompletion()) {
   175  		err := me.Flush()
   176  		if err != nil {
   177  			me.logger().Warn("error flushing completed piece", "piece", me.p.Index(), "err", err)
   178  		}
   179  	}
   180  	for f := range me.pieceFiles() {
   181  		res := me.allFilePiecesComplete(f)
   182  		if res.Err != nil {
   183  			err = res.Err
   184  			return
   185  		}
   186  		if !res.Ok {
   187  			continue
   188  		}
   189  		err = me.promotePartFile(f)
   190  		if err != nil {
   191  			err = fmt.Errorf("error promoting part file %q: %w", f.safeOsPath, err)
   192  			return
   193  		}
   194  	}
   195  	return
   196  }
   197  
   198  func (me *filePieceImpl) allFilePiecesComplete(f file) (ret g.Result[bool]) {
   199  	next, stop := iter.Pull(GetPieceCompletionRange(
   200  		me.t.pieceCompletion(),
   201  		me.t.infoHash,
   202  		f.beginPieceIndex(),
   203  		f.endPieceIndex(),
   204  	))
   205  	defer stop()
   206  	for p := f.beginPieceIndex(); p < f.endPieceIndex(); p++ {
   207  		cmpl, ok := next()
   208  		panicif.False(ok)
   209  		if cmpl.Err != nil {
   210  			ret.Err = fmt.Errorf("error getting completion for piece %d: %w", p, cmpl.Err)
   211  			return
   212  		}
   213  		if !cmpl.Ok || !cmpl.Complete {
   214  			return
   215  		}
   216  	}
   217  	_, ok := next()
   218  	panicif.True(ok)
   219  	ret.SetOk(true)
   220  	return
   221  }
   222  
   223  func (me *filePieceImpl) MarkNotComplete() (err error) {
   224  	err = me.pieceCompletion().Set(me.pieceKey(), false)
   225  	if err != nil {
   226  		return
   227  	}
   228  	for f := range me.pieceFiles() {
   229  		err = me.onFileNotComplete(f)
   230  		if err != nil {
   231  			err = fmt.Errorf("preparing incomplete file %q: %w", f.safeOsPath, err)
   232  			return
   233  		}
   234  	}
   235  	return
   236  
   237  }
   238  
   239  func (me *filePieceImpl) promotePartFile(f file) (err error) {
   240  	// Flush file on completion, even if we don't promote it.
   241  	err = me.t.io.flush(f.partFilePath(), 0, f.length())
   242  	if err != nil {
   243  		me.logger().Warn("error flushing file before promotion", "file", f.partFilePath(), "err", err)
   244  		err = nil
   245  	}
   246  	if !me.partFiles() {
   247  		return nil
   248  	}
   249  	f.mu.Lock()
   250  	defer f.mu.Unlock()
   251  	f.race++
   252  	renamed, err := me.exclRenameIfExists(f.partFilePath(), f.safeOsPath)
   253  	if err != nil {
   254  		return
   255  	}
   256  	if !renamed {
   257  		return
   258  	}
   259  	err = os.Chmod(f.safeOsPath, filePerm&^0o222)
   260  	if err != nil {
   261  		me.logger().Info("error setting promoted file to read-only", "file", f.safeOsPath, "err", err)
   262  		err = nil
   263  	}
   264  	return
   265  }
   266  
   267  // Rename from if exists, and if so, to must not exist.
   268  func (me *filePieceImpl) exclRenameIfExists(from, to string) (renamed bool, err error) {
   269  	err = me.t.io.rename(from, to)
   270  	if err != nil {
   271  		if errors.Is(err, fs.ErrNotExist) {
   272  			err = nil
   273  		}
   274  		return
   275  	}
   276  	renamed = true
   277  	me.logger().Debug("renamed file", "from", from, "to", to)
   278  	return
   279  }
   280  
   281  func (me *filePieceImpl) onFileNotComplete(f file) (err error) {
   282  	if !me.partFiles() {
   283  		return
   284  	}
   285  	f.mu.Lock()
   286  	defer f.mu.Unlock()
   287  	f.race++
   288  	_, err = me.exclRenameIfExists(f.safeOsPath, f.partFilePath())
   289  	if err != nil {
   290  		err = fmt.Errorf("restoring part file: %w", err)
   291  		return
   292  	}
   293  	return
   294  }
   295  
   296  func (me *filePieceImpl) pathForWrite(f *file) string {
   297  	return me.t.pathForWrite(f)
   298  }
   299  
   300  func (me *filePieceImpl) partFiles() bool {
   301  	return me.t.partFiles()
   302  }
   303  
   304  type zeroReader struct{}
   305  
   306  func (me zeroReader) Read(p []byte) (n int, err error) {
   307  	clear(p)
   308  	return len(p), nil
   309  }
   310  
   311  func (me *filePieceImpl) WriteTo(w io.Writer) (n int64, err error) {
   312  	for fileIndex, extent := range me.iterFileSegments() {
   313  		var n1 int64
   314  		n1, err = me.writeFileTo(w, fileIndex, extent)
   315  		n += n1
   316  		if err != nil {
   317  			return
   318  		}
   319  		panicif.GreaterThan(n1, extent.Length)
   320  		if n1 < extent.Length {
   321  			return
   322  		}
   323  		panicif.NotEq(n1, extent.Length)
   324  	}
   325  	return
   326  }
   327  
   328  var (
   329  	packageExpvarMap = expvar.NewMap("torrentStorage")
   330  )
   331  
   332  type limitWriter struct {
   333  	rem int64
   334  	w   io.Writer
   335  }
   336  
   337  func (me *limitWriter) Write(p []byte) (n int, err error) {
   338  	n, err = me.w.Write(p[:min(int64(len(p)), me.rem)])
   339  	me.rem -= int64(n)
   340  	if err != nil {
   341  		return
   342  	}
   343  	p = p[n:]
   344  	if len(p) > 0 {
   345  		err = io.ErrShortWrite
   346  	}
   347  	return
   348  }
   349  
   350  func (me *filePieceImpl) writeFileTo(w io.Writer, fileIndex int, extent segments.Extent) (written int64, err error) {
   351  	if extent.Length == 0 {
   352  		return
   353  	}
   354  	file := me.t.file(fileIndex)
   355  	// Do we want io.WriterTo here, or are we happy to let that be type asserted in io.CopyN?
   356  	var f fileReader
   357  	f, err = me.t.openFile(file)
   358  	if err != nil {
   359  		if errors.Is(err, fs.ErrNotExist) {
   360  			err = nil
   361  		}
   362  		return
   363  	}
   364  	defer f.Close()
   365  	panicif.GreaterThan(extent.End(), file.FileInfo.Length)
   366  	extentRemaining := extent.Length
   367  	var dataOffset int64
   368  	dataOffset, err = f.seekDataOrEof(extent.Start)
   369  	if err != nil {
   370  		err = fmt.Errorf("seeking to start of extent: %w", err)
   371  		return
   372  	}
   373  	if dataOffset < extent.Start {
   374  		// File is too short.
   375  		return
   376  	}
   377  	if dataOffset > extent.Start {
   378  		// Write zeroes until the end of the hole we're in.
   379  		var n1 int64
   380  		n := min(dataOffset-extent.Start, extent.Length)
   381  		n1, err = writeZeroes(w, n)
   382  		packageExpvarMap.Add("bytesReadSkippedHole", n1)
   383  		written += n1
   384  		if err != nil {
   385  			return
   386  		}
   387  		panicif.NotEq(n1, n)
   388  		extentRemaining -= n1
   389  	}
   390  	n1, err := f.writeToN(w, extentRemaining)
   391  	packageExpvarMap.Add("bytesReadNotSkipped", n1)
   392  	written += n1
   393  	return
   394  }
   395  
   396  //
   397  //// TODO: Just implement StorageReader already.
   398  //func (me *filePieceImpl) NewReader() (PieceReader, error) {
   399  //
   400  //}