github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/sentry/kernel/fd_table_unsafe.go (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package kernel 16 17 import ( 18 "sync/atomic" 19 "unsafe" 20 21 "github.com/SagerNet/gvisor/pkg/context" 22 "github.com/SagerNet/gvisor/pkg/sentry/fs" 23 "github.com/SagerNet/gvisor/pkg/sentry/vfs" 24 ) 25 26 type descriptorTable struct { 27 // slice is a *[]unsafe.Pointer, where each element is actually 28 // *descriptor object, updated atomically. 29 // 30 // Changes to the slice itself requiring holding FDTable.mu. 31 slice unsafe.Pointer `state:".(map[int32]*descriptor)"` 32 } 33 34 // initNoLeakCheck initializes the table without enabling leak checking. 35 // 36 // This is used when loading an FDTable after S/R, during which the ref count 37 // object itself will enable leak checking if necessary. 38 func (f *FDTable) initNoLeakCheck() { 39 var slice []unsafe.Pointer // Empty slice. 40 atomic.StorePointer(&f.slice, unsafe.Pointer(&slice)) 41 } 42 43 // init initializes the table with leak checking. 44 func (f *FDTable) init() { 45 f.initNoLeakCheck() 46 f.InitRefs() 47 } 48 49 // get gets a file entry. 50 // 51 // The boolean indicates whether this was in range. 52 // 53 //go:nosplit 54 func (f *FDTable) get(fd int32) (*fs.File, FDFlags, bool) { 55 file, _, flags, ok := f.getAll(fd) 56 return file, flags, ok 57 } 58 59 // getVFS2 gets a file entry. 60 // 61 // The boolean indicates whether this was in range. 62 // 63 //go:nosplit 64 func (f *FDTable) getVFS2(fd int32) (*vfs.FileDescription, FDFlags, bool) { 65 _, file, flags, ok := f.getAll(fd) 66 return file, flags, ok 67 } 68 69 // getAll gets a file entry. 70 // 71 // The boolean indicates whether this was in range. 72 // 73 //go:nosplit 74 func (f *FDTable) getAll(fd int32) (*fs.File, *vfs.FileDescription, FDFlags, bool) { 75 slice := *(*[]unsafe.Pointer)(atomic.LoadPointer(&f.slice)) 76 if fd >= int32(len(slice)) { 77 return nil, nil, FDFlags{}, false 78 } 79 d := (*descriptor)(atomic.LoadPointer(&slice[fd])) 80 if d == nil { 81 return nil, nil, FDFlags{}, true 82 } 83 if d.file != nil && d.fileVFS2 != nil { 84 panic("VFS1 and VFS2 files set") 85 } 86 return d.file, d.fileVFS2, d.flags, true 87 } 88 89 // CurrentMaxFDs returns the number of file descriptors that may be stored in f 90 // without reallocation. 91 func (f *FDTable) CurrentMaxFDs() int { 92 slice := *(*[]unsafe.Pointer)(atomic.LoadPointer(&f.slice)) 93 return len(slice) 94 } 95 96 // set sets an entry for VFS1, refer to setAll(). 97 // 98 // Precondition: mu must be held. 99 func (f *FDTable) set(ctx context.Context, fd int32, file *fs.File, flags FDFlags) *fs.File { 100 dropFile, _ := f.setAll(ctx, fd, file, nil, flags) 101 return dropFile 102 } 103 104 // setVFS2 sets an entry for VFS2, refer to setAll(). 105 // 106 // Precondition: mu must be held. 107 func (f *FDTable) setVFS2(ctx context.Context, fd int32, file *vfs.FileDescription, flags FDFlags) *vfs.FileDescription { 108 _, dropFile := f.setAll(ctx, fd, nil, file, flags) 109 return dropFile 110 } 111 112 // setAll sets the file description referred to by fd to file/fileVFS2. If 113 // file/fileVFS2 are non-nil, it takes a reference on them. If setAll replaces 114 // an existing file description, it returns it with the FDTable's reference 115 // transferred to the caller, which must call f.drop/dropVFS2() on the returned 116 // file after unlocking f.mu. 117 // 118 // Precondition: mu must be held. 119 func (f *FDTable) setAll(ctx context.Context, fd int32, file *fs.File, fileVFS2 *vfs.FileDescription, flags FDFlags) (*fs.File, *vfs.FileDescription) { 120 if file != nil && fileVFS2 != nil { 121 panic("VFS1 and VFS2 files set") 122 } 123 124 slicePtr := (*[]unsafe.Pointer)(atomic.LoadPointer(&f.slice)) 125 126 // Grow the table as required. 127 if last := int32(len(*slicePtr)); fd >= last { 128 end := fd + 1 129 if end < 2*last { 130 end = 2 * last 131 } 132 newSlice := append(*slicePtr, make([]unsafe.Pointer, end-last)...) 133 slicePtr = &newSlice 134 atomic.StorePointer(&f.slice, unsafe.Pointer(slicePtr)) 135 } 136 137 slice := *slicePtr 138 139 var desc *descriptor 140 if file != nil || fileVFS2 != nil { 141 desc = &descriptor{ 142 file: file, 143 fileVFS2: fileVFS2, 144 flags: flags, 145 } 146 } 147 148 // Update the single element. 149 orig := (*descriptor)(atomic.SwapPointer(&slice[fd], unsafe.Pointer(desc))) 150 151 // Acquire a table reference. 152 if desc != nil { 153 switch { 154 case desc.file != nil: 155 if orig == nil || desc.file != orig.file { 156 desc.file.IncRef() 157 } 158 case desc.fileVFS2 != nil: 159 if orig == nil || desc.fileVFS2 != orig.fileVFS2 { 160 desc.fileVFS2.IncRef() 161 } 162 } 163 } 164 165 // Adjust used. 166 switch { 167 case orig == nil && desc != nil: 168 atomic.AddInt32(&f.used, 1) 169 case orig != nil && desc == nil: 170 atomic.AddInt32(&f.used, -1) 171 } 172 173 if orig != nil { 174 switch { 175 case orig.file != nil: 176 if desc == nil || desc.file != orig.file { 177 return orig.file, nil 178 } 179 case orig.fileVFS2 != nil: 180 if desc == nil || desc.fileVFS2 != orig.fileVFS2 { 181 return nil, orig.fileVFS2 182 } 183 } 184 } 185 return nil, nil 186 }