github.com/nicocha30/gvisor-ligolo@v0.0.0-20230726075806-989fa2c0a413/pkg/sentry/fsutil/dirty_set.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package fsutil
    16  
    17  import (
    18  	"math"
    19  
    20  	"github.com/nicocha30/gvisor-ligolo/pkg/context"
    21  	"github.com/nicocha30/gvisor-ligolo/pkg/hostarch"
    22  	"github.com/nicocha30/gvisor-ligolo/pkg/safemem"
    23  	"github.com/nicocha30/gvisor-ligolo/pkg/sentry/memmap"
    24  )
    25  
    26  // DirtySet maps offsets into a memmap.Mappable to DirtyInfo. It is used to
    27  // implement Mappables that cache data from another source.
    28  //
    29  // type DirtySet <generated by go_generics>
    30  
    31  // DirtyInfo is the value type of DirtySet, and represents information about a
    32  // Mappable offset that is dirty (the cached data for that offset is newer than
    33  // its source).
    34  //
    35  // +stateify savable
    36  type DirtyInfo struct {
    37  	// Keep is true if the represented offset is concurrently writable, such
    38  	// that writing the data for that offset back to the source does not
    39  	// guarantee that the offset is clean (since it may be concurrently
    40  	// rewritten after the writeback).
    41  	Keep bool
    42  }
    43  
    44  // dirtySetFunctions implements segment.Functions for DirtySet.
    45  type dirtySetFunctions struct{}
    46  
    47  // MinKey implements segment.Functions.MinKey.
    48  func (dirtySetFunctions) MinKey() uint64 {
    49  	return 0
    50  }
    51  
    52  // MaxKey implements segment.Functions.MaxKey.
    53  func (dirtySetFunctions) MaxKey() uint64 {
    54  	return math.MaxUint64
    55  }
    56  
    57  // ClearValue implements segment.Functions.ClearValue.
    58  func (dirtySetFunctions) ClearValue(val *DirtyInfo) {
    59  }
    60  
    61  // Merge implements segment.Functions.Merge.
    62  func (dirtySetFunctions) Merge(_ memmap.MappableRange, val1 DirtyInfo, _ memmap.MappableRange, val2 DirtyInfo) (DirtyInfo, bool) {
    63  	if val1 != val2 {
    64  		return DirtyInfo{}, false
    65  	}
    66  	return val1, true
    67  }
    68  
    69  // Split implements segment.Functions.Split.
    70  func (dirtySetFunctions) Split(_ memmap.MappableRange, val DirtyInfo, _ uint64) (DirtyInfo, DirtyInfo) {
    71  	return val, val
    72  }
    73  
    74  // MarkClean marks all offsets in mr as not dirty, except for those to which
    75  // KeepDirty has been applied.
    76  func (ds *DirtySet) MarkClean(mr memmap.MappableRange) {
    77  	seg := ds.LowerBoundSegment(mr.Start)
    78  	for seg.Ok() && seg.Start() < mr.End {
    79  		if seg.Value().Keep {
    80  			seg = seg.NextSegment()
    81  			continue
    82  		}
    83  		seg = ds.Isolate(seg, mr)
    84  		seg = ds.Remove(seg).NextSegment()
    85  	}
    86  }
    87  
    88  // KeepClean marks all offsets in mr as not dirty, even those that were
    89  // previously kept dirty by KeepDirty.
    90  func (ds *DirtySet) KeepClean(mr memmap.MappableRange) {
    91  	ds.RemoveRange(mr)
    92  }
    93  
    94  // MarkDirty marks all offsets in mr as dirty.
    95  func (ds *DirtySet) MarkDirty(mr memmap.MappableRange) {
    96  	ds.setDirty(mr, false)
    97  }
    98  
    99  // KeepDirty marks all offsets in mr as dirty and prevents them from being
   100  // marked as clean by MarkClean.
   101  func (ds *DirtySet) KeepDirty(mr memmap.MappableRange) {
   102  	ds.setDirty(mr, true)
   103  }
   104  
   105  func (ds *DirtySet) setDirty(mr memmap.MappableRange, keep bool) {
   106  	var changedAny bool
   107  	defer func() {
   108  		if changedAny {
   109  			// Merge segments split by Isolate to reduce cost of iteration.
   110  			ds.MergeRange(mr)
   111  		}
   112  	}()
   113  	seg, gap := ds.Find(mr.Start)
   114  	for {
   115  		switch {
   116  		case seg.Ok() && seg.Start() < mr.End:
   117  			if keep && !seg.Value().Keep {
   118  				changedAny = true
   119  				seg = ds.Isolate(seg, mr)
   120  				seg.ValuePtr().Keep = true
   121  			}
   122  			seg, gap = seg.NextNonEmpty()
   123  
   124  		case gap.Ok() && gap.Start() < mr.End:
   125  			changedAny = true
   126  			seg = ds.Insert(gap, gap.Range().Intersect(mr), DirtyInfo{keep})
   127  			seg, gap = seg.NextNonEmpty()
   128  
   129  		default:
   130  			return
   131  		}
   132  	}
   133  }
   134  
   135  // AllowClean allows MarkClean to mark offsets in mr as not dirty, ending the
   136  // effect of a previous call to KeepDirty. (It does not itself mark those
   137  // offsets as not dirty.)
   138  func (ds *DirtySet) AllowClean(mr memmap.MappableRange) {
   139  	var changedAny bool
   140  	defer func() {
   141  		if changedAny {
   142  			// Merge segments split by Isolate to reduce cost of iteration.
   143  			ds.MergeRange(mr)
   144  		}
   145  	}()
   146  	for seg := ds.LowerBoundSegment(mr.Start); seg.Ok() && seg.Start() < mr.End; seg = seg.NextSegment() {
   147  		if seg.Value().Keep {
   148  			changedAny = true
   149  			seg = ds.Isolate(seg, mr)
   150  			seg.ValuePtr().Keep = false
   151  		}
   152  	}
   153  }
   154  
   155  // SyncDirty passes pages in the range mr that are stored in cache and
   156  // identified as dirty to writeAt, updating dirty to reflect successful writes.
   157  // If writeAt returns a successful partial write, SyncDirty will call it
   158  // repeatedly until all bytes have been written. max is the true size of the
   159  // cached object; offsets beyond max will not be passed to writeAt, even if
   160  // they are marked dirty.
   161  func SyncDirty(ctx context.Context, mr memmap.MappableRange, cache *FileRangeSet, dirty *DirtySet, max uint64, mem memmap.File, writeAt func(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error)) error {
   162  	var changedDirty bool
   163  	defer func() {
   164  		if changedDirty {
   165  			// Merge segments split by Isolate to reduce cost of iteration.
   166  			dirty.MergeRange(mr)
   167  		}
   168  	}()
   169  	dseg := dirty.LowerBoundSegment(mr.Start)
   170  	for dseg.Ok() && dseg.Start() < mr.End {
   171  		var dr memmap.MappableRange
   172  		if dseg.Value().Keep {
   173  			dr = dseg.Range().Intersect(mr)
   174  		} else {
   175  			changedDirty = true
   176  			dseg = dirty.Isolate(dseg, mr)
   177  			dr = dseg.Range()
   178  		}
   179  		if err := syncDirtyRange(ctx, dr, cache, max, mem, writeAt); err != nil {
   180  			return err
   181  		}
   182  		if dseg.Value().Keep {
   183  			dseg = dseg.NextSegment()
   184  		} else {
   185  			dseg = dirty.Remove(dseg).NextSegment()
   186  		}
   187  	}
   188  	return nil
   189  }
   190  
   191  // SyncDirtyAll passes all pages stored in cache identified as dirty to
   192  // writeAt, updating dirty to reflect successful writes. If writeAt returns a
   193  // successful partial write, SyncDirtyAll will call it repeatedly until all
   194  // bytes have been written. max is the true size of the cached object; offsets
   195  // beyond max will not be passed to writeAt, even if they are marked dirty.
   196  func SyncDirtyAll(ctx context.Context, cache *FileRangeSet, dirty *DirtySet, max uint64, mem memmap.File, writeAt func(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error)) error {
   197  	dseg := dirty.FirstSegment()
   198  	for dseg.Ok() {
   199  		if err := syncDirtyRange(ctx, dseg.Range(), cache, max, mem, writeAt); err != nil {
   200  			return err
   201  		}
   202  		if dseg.Value().Keep {
   203  			dseg = dseg.NextSegment()
   204  		} else {
   205  			dseg = dirty.Remove(dseg).NextSegment()
   206  		}
   207  	}
   208  	return nil
   209  }
   210  
   211  // Preconditions: mr must be page-aligned.
   212  func syncDirtyRange(ctx context.Context, mr memmap.MappableRange, cache *FileRangeSet, max uint64, mem memmap.File, writeAt func(ctx context.Context, srcs safemem.BlockSeq, offset uint64) (uint64, error)) error {
   213  	for cseg := cache.LowerBoundSegment(mr.Start); cseg.Ok() && cseg.Start() < mr.End; cseg = cseg.NextSegment() {
   214  		wbr := cseg.Range().Intersect(mr)
   215  		if max < wbr.Start {
   216  			break
   217  		}
   218  		ims, err := mem.MapInternal(cseg.FileRangeOf(wbr), hostarch.Read)
   219  		if err != nil {
   220  			return err
   221  		}
   222  		if max < wbr.End {
   223  			ims = ims.TakeFirst64(max - wbr.Start)
   224  		}
   225  		offset := wbr.Start
   226  		for !ims.IsEmpty() {
   227  			n, err := writeAt(ctx, ims, offset)
   228  			if err != nil {
   229  				return err
   230  			}
   231  			offset += n
   232  			ims = ims.DropFirst64(n)
   233  		}
   234  	}
   235  	return nil
   236  }