github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/sentry/fs/fsutil/file_range_set.go (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package fsutil 16 17 import ( 18 "fmt" 19 "io" 20 "math" 21 22 "github.com/SagerNet/gvisor/pkg/context" 23 "github.com/SagerNet/gvisor/pkg/hostarch" 24 "github.com/SagerNet/gvisor/pkg/safemem" 25 "github.com/SagerNet/gvisor/pkg/sentry/memmap" 26 "github.com/SagerNet/gvisor/pkg/sentry/pgalloc" 27 "github.com/SagerNet/gvisor/pkg/sentry/usage" 28 ) 29 30 // FileRangeSet maps offsets into a memmap.Mappable to offsets into a 31 // memmap.File. It is used to implement Mappables that store data in 32 // sparsely-allocated memory. 33 // 34 // type FileRangeSet <generated by go_generics> 35 36 // FileRangeSetFunctions implements segment.Functions for FileRangeSet. 37 type FileRangeSetFunctions struct{} 38 39 // MinKey implements segment.Functions.MinKey. 40 func (FileRangeSetFunctions) MinKey() uint64 { 41 return 0 42 } 43 44 // MaxKey implements segment.Functions.MaxKey. 45 func (FileRangeSetFunctions) MaxKey() uint64 { 46 return math.MaxUint64 47 } 48 49 // ClearValue implements segment.Functions.ClearValue. 50 func (FileRangeSetFunctions) ClearValue(_ *uint64) { 51 } 52 53 // Merge implements segment.Functions.Merge. 54 func (FileRangeSetFunctions) Merge(mr1 memmap.MappableRange, frstart1 uint64, _ memmap.MappableRange, frstart2 uint64) (uint64, bool) { 55 if frstart1+mr1.Length() != frstart2 { 56 return 0, false 57 } 58 return frstart1, true 59 } 60 61 // Split implements segment.Functions.Split. 62 func (FileRangeSetFunctions) Split(mr memmap.MappableRange, frstart uint64, split uint64) (uint64, uint64) { 63 return frstart, frstart + (split - mr.Start) 64 } 65 66 // FileRange returns the FileRange mapped by seg. 67 func (seg FileRangeIterator) FileRange() memmap.FileRange { 68 return seg.FileRangeOf(seg.Range()) 69 } 70 71 // FileRangeOf returns the FileRange mapped by mr. 72 // 73 // Preconditions: 74 // * seg.Range().IsSupersetOf(mr). 75 // * mr.Length() != 0. 76 func (seg FileRangeIterator) FileRangeOf(mr memmap.MappableRange) memmap.FileRange { 77 frstart := seg.Value() + (mr.Start - seg.Start()) 78 return memmap.FileRange{frstart, frstart + mr.Length()} 79 } 80 81 // Fill attempts to ensure that all memmap.Mappable offsets in required are 82 // mapped to a memmap.File offset, by allocating from mf with the given 83 // memory usage kind and invoking readAt to store data into memory. (If readAt 84 // returns a successful partial read, Fill will call it repeatedly until all 85 // bytes have been read.) EOF is handled consistently with the requirements of 86 // mmap(2): bytes after EOF on the same page are zeroed; pages after EOF are 87 // invalid. fileSize is an upper bound on the file's size; bytes after fileSize 88 // will be zeroed without calling readAt. 89 // 90 // Fill may read offsets outside of required, but will never read offsets 91 // outside of optional. It returns a non-nil error if any error occurs, even 92 // if the error only affects offsets in optional, but not in required. 93 // 94 // Preconditions: 95 // * required.Length() > 0. 96 // * optional.IsSupersetOf(required). 97 // * required and optional must be page-aligned. 98 func (frs *FileRangeSet) Fill(ctx context.Context, required, optional memmap.MappableRange, fileSize uint64, mf *pgalloc.MemoryFile, kind usage.MemoryKind, readAt func(ctx context.Context, dsts safemem.BlockSeq, offset uint64) (uint64, error)) error { 99 gap := frs.LowerBoundGap(required.Start) 100 for gap.Ok() && gap.Start() < required.End { 101 if gap.Range().Length() == 0 { 102 gap = gap.NextGap() 103 continue 104 } 105 gr := gap.Range().Intersect(optional) 106 107 // Read data into the gap. 108 fr, err := mf.AllocateAndFill(gr.Length(), kind, safemem.ReaderFunc(func(dsts safemem.BlockSeq) (uint64, error) { 109 var done uint64 110 for !dsts.IsEmpty() { 111 n, err := func() (uint64, error) { 112 off := gr.Start + done 113 if off >= fileSize { 114 return 0, io.EOF 115 } 116 if off+dsts.NumBytes() > fileSize { 117 rd := fileSize - off 118 n, err := readAt(ctx, dsts.TakeFirst64(rd), off) 119 if n == rd && err == nil { 120 return n, io.EOF 121 } 122 return n, err 123 } 124 return readAt(ctx, dsts, off) 125 }() 126 done += n 127 dsts = dsts.DropFirst64(n) 128 if err != nil { 129 if err == io.EOF { 130 // MemoryFile.AllocateAndFill truncates down to a page 131 // boundary, but FileRangeSet.Fill is supposed to 132 // zero-fill to the end of the page in this case. 133 donepgaddr, ok := hostarch.Addr(done).RoundUp() 134 if donepg := uint64(donepgaddr); ok && donepg != done { 135 dsts.DropFirst64(donepg - done) 136 done = donepg 137 if dsts.IsEmpty() { 138 return done, nil 139 } 140 } 141 } 142 return done, err 143 } 144 } 145 return done, nil 146 })) 147 148 // Store anything we managed to read into the cache. 149 if done := fr.Length(); done != 0 { 150 gr.End = gr.Start + done 151 gap = frs.Insert(gap, gr, fr.Start).NextGap() 152 } 153 154 if err != nil { 155 return err 156 } 157 } 158 return nil 159 } 160 161 // Drop removes segments for memmap.Mappable offsets in mr, freeing the 162 // corresponding memmap.FileRanges. 163 // 164 // Preconditions: mr must be page-aligned. 165 func (frs *FileRangeSet) Drop(mr memmap.MappableRange, mf *pgalloc.MemoryFile) { 166 seg := frs.LowerBoundSegment(mr.Start) 167 for seg.Ok() && seg.Start() < mr.End { 168 seg = frs.Isolate(seg, mr) 169 mf.DecRef(seg.FileRange()) 170 seg = frs.Remove(seg).NextSegment() 171 } 172 } 173 174 // DropAll removes all segments in mr, freeing the corresponding 175 // memmap.FileRanges. 176 func (frs *FileRangeSet) DropAll(mf *pgalloc.MemoryFile) { 177 for seg := frs.FirstSegment(); seg.Ok(); seg = seg.NextSegment() { 178 mf.DecRef(seg.FileRange()) 179 } 180 frs.RemoveAll() 181 } 182 183 // Truncate updates frs to reflect Mappable truncation to the given length: 184 // bytes after the new EOF on the same page are zeroed, and pages after the new 185 // EOF are freed. 186 func (frs *FileRangeSet) Truncate(end uint64, mf *pgalloc.MemoryFile) { 187 pgendaddr, ok := hostarch.Addr(end).RoundUp() 188 if ok { 189 pgend := uint64(pgendaddr) 190 191 // Free truncated pages. 192 frs.SplitAt(pgend) 193 seg := frs.LowerBoundSegment(pgend) 194 for seg.Ok() { 195 mf.DecRef(seg.FileRange()) 196 seg = frs.Remove(seg).NextSegment() 197 } 198 199 if end == pgend { 200 return 201 } 202 } 203 204 // Here we know end < end.RoundUp(). If the new EOF lands in the 205 // middle of a page that we have, zero out its contents beyond the new 206 // length. 207 seg := frs.FindSegment(end) 208 if seg.Ok() { 209 fr := seg.FileRange() 210 fr.Start += end - seg.Start() 211 ims, err := mf.MapInternal(fr, hostarch.Write) 212 if err != nil { 213 // There's no good recourse from here. This means 214 // that we can't keep cached memory consistent with 215 // the new end of file. The caller may have already 216 // updated the file size on their backing file system. 217 // 218 // We don't want to risk blindly continuing onward, 219 // so in the extremely rare cases this does happen, 220 // we abandon ship. 221 panic(fmt.Sprintf("Failed to map %v: %v", fr, err)) 222 } 223 if _, err := safemem.ZeroSeq(ims); err != nil { 224 panic(fmt.Sprintf("Zeroing %v failed: %v", fr, err)) 225 } 226 } 227 }