github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/sentry/fs/proc/seqfile/seqfile.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Package seqfile provides dynamic ordered files.
    16  package seqfile
    17  
    18  import (
    19  	"io"
    20  
    21  	"github.com/SagerNet/gvisor/pkg/abi/linux"
    22  	"github.com/SagerNet/gvisor/pkg/context"
    23  	"github.com/SagerNet/gvisor/pkg/errors/linuxerr"
    24  	"github.com/SagerNet/gvisor/pkg/hostarch"
    25  	"github.com/SagerNet/gvisor/pkg/sentry/fs"
    26  	"github.com/SagerNet/gvisor/pkg/sentry/fs/fsutil"
    27  	"github.com/SagerNet/gvisor/pkg/sentry/fs/proc/device"
    28  	ktime "github.com/SagerNet/gvisor/pkg/sentry/kernel/time"
    29  	"github.com/SagerNet/gvisor/pkg/sync"
    30  	"github.com/SagerNet/gvisor/pkg/usermem"
    31  	"github.com/SagerNet/gvisor/pkg/waiter"
    32  )
    33  
    34  // SeqHandle is a helper handle to seek in the file.
    35  type SeqHandle interface{}
    36  
    37  // SeqData holds the data for one unit in the file.
    38  //
    39  // +stateify savable
    40  type SeqData struct {
    41  	// The data to be returned to the user.
    42  	Buf []byte
    43  
    44  	// A seek handle used to find the next valid unit in ReadSeqFiledata.
    45  	Handle SeqHandle
    46  }
    47  
    48  // SeqSource is a data source for a SeqFile file.
    49  type SeqSource interface {
    50  	// NeedsUpdate returns true if the consumer of SeqData should call
    51  	// ReadSeqFileData again. Generation is the generation returned by
    52  	// ReadSeqFile or 0.
    53  	NeedsUpdate(generation int64) bool
    54  
    55  	// Returns a slice of SeqData ordered by unit and the current
    56  	// generation. The first entry in the slice is greater than the handle.
    57  	// If handle is nil then all known records are returned. Generation
    58  	// must always be greater than 0.
    59  	ReadSeqFileData(ctx context.Context, handle SeqHandle) ([]SeqData, int64)
    60  }
    61  
    62  // SeqGenerationCounter is a counter to keep track if the SeqSource should be
    63  // updated. SeqGenerationCounter is not thread-safe and should be protected
    64  // with a mutex.
    65  type SeqGenerationCounter struct {
    66  	// The generation that the SeqData is at.
    67  	generation int64
    68  }
    69  
    70  // SetGeneration sets the generation to the new value, be careful to not set it
    71  // to a value less than current.
    72  func (s *SeqGenerationCounter) SetGeneration(generation int64) {
    73  	s.generation = generation
    74  }
    75  
    76  // Update increments the current generation.
    77  func (s *SeqGenerationCounter) Update() {
    78  	s.generation++
    79  }
    80  
    81  // Generation returns the current generation counter.
    82  func (s *SeqGenerationCounter) Generation() int64 {
    83  	return s.generation
    84  }
    85  
    86  // IsCurrent returns whether the given generation is current or not.
    87  func (s *SeqGenerationCounter) IsCurrent(generation int64) bool {
    88  	return s.Generation() == generation
    89  }
    90  
    91  // SeqFile is used to provide dynamic files that can be ordered by record.
    92  //
    93  // +stateify savable
    94  type SeqFile struct {
    95  	fsutil.InodeGenericChecker `state:"nosave"`
    96  	fsutil.InodeNoopRelease    `state:"nosave"`
    97  	fsutil.InodeNoopWriteOut   `state:"nosave"`
    98  	fsutil.InodeNotAllocatable `state:"nosave"`
    99  	fsutil.InodeNotDirectory   `state:"nosave"`
   100  	fsutil.InodeNotMappable    `state:"nosave"`
   101  	fsutil.InodeNotSocket      `state:"nosave"`
   102  	fsutil.InodeNotSymlink     `state:"nosave"`
   103  	fsutil.InodeNotTruncatable `state:"nosave"`
   104  	fsutil.InodeVirtual        `state:"nosave"`
   105  
   106  	fsutil.InodeSimpleExtendedAttributes
   107  	fsutil.InodeSimpleAttributes
   108  
   109  	// mu protects the fields below.
   110  	mu sync.Mutex `state:"nosave"`
   111  
   112  	SeqSource
   113  
   114  	source     []SeqData
   115  	generation int64
   116  	lastRead   int64
   117  }
   118  
   119  var _ fs.InodeOperations = (*SeqFile)(nil)
   120  
   121  // NewSeqFile returns a seqfile suitable for use by external consumers.
   122  func NewSeqFile(ctx context.Context, source SeqSource) *SeqFile {
   123  	return &SeqFile{
   124  		InodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, fs.RootOwner, fs.FilePermsFromMode(0444), linux.PROC_SUPER_MAGIC),
   125  		SeqSource:             source,
   126  	}
   127  }
   128  
   129  // NewSeqFileInode returns an Inode with SeqFile InodeOperations.
   130  func NewSeqFileInode(ctx context.Context, source SeqSource, msrc *fs.MountSource) *fs.Inode {
   131  	iops := NewSeqFile(ctx, source)
   132  	sattr := fs.StableAttr{
   133  		DeviceID:  device.ProcDevice.DeviceID(),
   134  		InodeID:   device.ProcDevice.NextIno(),
   135  		BlockSize: hostarch.PageSize,
   136  		Type:      fs.SpecialFile,
   137  	}
   138  	return fs.NewInode(ctx, iops, msrc, sattr)
   139  }
   140  
   141  // UnstableAttr returns unstable attributes of the SeqFile.
   142  func (s *SeqFile) UnstableAttr(ctx context.Context, inode *fs.Inode) (fs.UnstableAttr, error) {
   143  	uattr, err := s.InodeSimpleAttributes.UnstableAttr(ctx, inode)
   144  	if err != nil {
   145  		return fs.UnstableAttr{}, err
   146  	}
   147  	uattr.ModificationTime = ktime.NowFromContext(ctx)
   148  	return uattr, nil
   149  }
   150  
   151  // GetFile implements fs.InodeOperations.GetFile.
   152  func (s *SeqFile) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) {
   153  	return fs.NewFile(ctx, dirent, flags, &seqFileOperations{seqFile: s}), nil
   154  }
   155  
   156  // findIndexAndOffset finds the unit that corresponds to a certain offset.
   157  // Returns the unit and the offset within the unit. If there are not enough
   158  // units len(data) and leftover offset is returned.
   159  func findIndexAndOffset(data []SeqData, offset int64) (int, int64) {
   160  	for i, buf := range data {
   161  		l := int64(len(buf.Buf))
   162  		if offset < l {
   163  			return i, offset
   164  		}
   165  		offset -= l
   166  	}
   167  	return len(data), offset
   168  }
   169  
   170  // updateSourceLocked requires that s.mu is held.
   171  func (s *SeqFile) updateSourceLocked(ctx context.Context, record int) {
   172  	var h SeqHandle
   173  	if record == 0 {
   174  		h = nil
   175  	} else {
   176  		h = s.source[record-1].Handle
   177  	}
   178  	// Save what we have previously read.
   179  	s.source = s.source[:record]
   180  	var newSource []SeqData
   181  	newSource, s.generation = s.SeqSource.ReadSeqFileData(ctx, h)
   182  	s.source = append(s.source, newSource...)
   183  }
   184  
   185  // seqFileOperations implements fs.FileOperations.
   186  //
   187  // +stateify savable
   188  type seqFileOperations struct {
   189  	fsutil.FileGenericSeek          `state:"nosave"`
   190  	fsutil.FileNoIoctl              `state:"nosave"`
   191  	fsutil.FileNoMMap               `state:"nosave"`
   192  	fsutil.FileNoSplice             `state:"nosave"`
   193  	fsutil.FileNoopFlush            `state:"nosave"`
   194  	fsutil.FileNoopFsync            `state:"nosave"`
   195  	fsutil.FileNoopRelease          `state:"nosave"`
   196  	fsutil.FileNotDirReaddir        `state:"nosave"`
   197  	fsutil.FileUseInodeUnstableAttr `state:"nosave"`
   198  	waiter.AlwaysReady              `state:"nosave"`
   199  
   200  	seqFile *SeqFile
   201  }
   202  
   203  var _ fs.FileOperations = (*seqFileOperations)(nil)
   204  
   205  // Write implements fs.FileOperations.Write.
   206  func (*seqFileOperations) Write(context.Context, *fs.File, usermem.IOSequence, int64) (int64, error) {
   207  	return 0, linuxerr.EACCES
   208  }
   209  
   210  // Read implements fs.FileOperations.Read.
   211  func (sfo *seqFileOperations) Read(ctx context.Context, file *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {
   212  	sfo.seqFile.mu.Lock()
   213  	defer sfo.seqFile.mu.Unlock()
   214  
   215  	sfo.seqFile.NotifyAccess(ctx)
   216  	defer func() { sfo.seqFile.lastRead = offset }()
   217  
   218  	updated := false
   219  
   220  	// Try to find where we should start reading this file.
   221  	i, recordOffset := findIndexAndOffset(sfo.seqFile.source, offset)
   222  	if i == len(sfo.seqFile.source) {
   223  		// Ok, we're at EOF. Let's first check to see if there might be
   224  		// more data available to us. If there is more data, add it to
   225  		// the end and try reading again.
   226  		if !sfo.seqFile.SeqSource.NeedsUpdate(sfo.seqFile.generation) {
   227  			return 0, io.EOF
   228  		}
   229  		oldLen := len(sfo.seqFile.source)
   230  		sfo.seqFile.updateSourceLocked(ctx, len(sfo.seqFile.source))
   231  		updated = true
   232  		// We know that we had consumed everything up until this point
   233  		// so we search in the new slice instead of starting over.
   234  		i, recordOffset = findIndexAndOffset(sfo.seqFile.source[oldLen:], recordOffset)
   235  		i += oldLen
   236  		// i is at most the length of the slice which is
   237  		// len(sfo.seqFile.source) - oldLen. So at most i will be equal to
   238  		// len(sfo.seqFile.source).
   239  		if i == len(sfo.seqFile.source) {
   240  			return 0, io.EOF
   241  		}
   242  	}
   243  
   244  	var done int64
   245  	// We're reading parts of a record, finish reading the current object
   246  	// before continuing on to the next. We don't refresh our data source
   247  	// before this record is completed.
   248  	if recordOffset != 0 {
   249  		n, err := dst.CopyOut(ctx, sfo.seqFile.source[i].Buf[recordOffset:])
   250  		done += int64(n)
   251  		dst = dst.DropFirst(n)
   252  		if dst.NumBytes() == 0 || err != nil {
   253  			return done, err
   254  		}
   255  		i++
   256  	}
   257  
   258  	// Next/New unit, update the source file if necessary. Make an extra
   259  	// check to see if we've seeked backwards and if so always update our
   260  	// data source.
   261  	if !updated && (sfo.seqFile.SeqSource.NeedsUpdate(sfo.seqFile.generation) || sfo.seqFile.lastRead > offset) {
   262  		sfo.seqFile.updateSourceLocked(ctx, i)
   263  		// recordOffset is 0 here and we won't update records behind the
   264  		// current one so recordOffset is still 0 even though source
   265  		// just got updated. Just read the next record.
   266  	}
   267  
   268  	// Finish by reading all the available data.
   269  	for _, buf := range sfo.seqFile.source[i:] {
   270  		n, err := dst.CopyOut(ctx, buf.Buf)
   271  		done += int64(n)
   272  		dst = dst.DropFirst(n)
   273  		if dst.NumBytes() == 0 || err != nil {
   274  			return done, err
   275  		}
   276  	}
   277  
   278  	// If the file shrank (entries not yet read were removed above)
   279  	// while we tried to read we can end up with nothing read.
   280  	if done == 0 && dst.NumBytes() != 0 {
   281  		return 0, io.EOF
   282  	}
   283  	return done, nil
   284  }