github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/sentry/vfs/mount_test.go (about) 1 // Copyright 2019 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package vfs 16 17 import ( 18 "fmt" 19 "runtime" 20 "testing" 21 22 "github.com/SagerNet/gvisor/pkg/sync" 23 ) 24 25 func TestMountTableLookupEmpty(t *testing.T) { 26 var mt mountTable 27 mt.Init() 28 29 parent := &Mount{} 30 point := &Dentry{} 31 if m := mt.Lookup(parent, point); m != nil { 32 t.Errorf("Empty mountTable lookup: got %p, wanted nil", m) 33 } 34 } 35 36 func TestMountTableInsertLookup(t *testing.T) { 37 var mt mountTable 38 mt.Init() 39 40 mount := &Mount{} 41 mount.setKey(VirtualDentry{&Mount{}, &Dentry{}}) 42 mt.Insert(mount) 43 44 if m := mt.Lookup(mount.parent(), mount.point()); m != mount { 45 t.Errorf("mountTable positive lookup: got %p, wanted %p", m, mount) 46 } 47 48 otherParent := &Mount{} 49 if m := mt.Lookup(otherParent, mount.point()); m != nil { 50 t.Errorf("mountTable lookup with wrong mount parent: got %p, wanted nil", m) 51 } 52 otherPoint := &Dentry{} 53 if m := mt.Lookup(mount.parent(), otherPoint); m != nil { 54 t.Errorf("mountTable lookup with wrong mount point: got %p, wanted nil", m) 55 } 56 } 57 58 // TODO(github.com/SagerNet/issue/1035): concurrent lookup/insertion/removal. 59 60 // must be powers of 2 61 var benchNumMounts = []int{1 << 2, 1 << 5, 1 << 8} 62 63 // For all of the following: 64 // 65 // - BenchmarkMountTableFoo tests usage pattern "Foo" for mountTable. 66 // 67 // - BenchmarkMountMapFoo tests usage pattern "Foo" for a 68 // sync.RWMutex-protected map. (Mutator benchmarks do not use a RWMutex, since 69 // mountTable also requires external synchronization between mutators.) 70 // 71 // - BenchmarkMountSyncMapFoo tests usage pattern "Foo" for a sync.Map. 72 // 73 // ParallelLookup is by far the most common and performance-sensitive operation 74 // for this application. NegativeLookup is also important, but less so (only 75 // relevant with multiple mount namespaces and significant differences in 76 // mounts between them). Insertion and removal are benchmarked for 77 // completeness. 78 const enableComparativeBenchmarks = false 79 80 func newBenchMount() *Mount { 81 mount := &Mount{} 82 mount.loadKey(VirtualDentry{&Mount{}, &Dentry{}}) 83 return mount 84 } 85 86 func BenchmarkMountTableParallelLookup(b *testing.B) { 87 for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 { 88 for _, numMounts := range benchNumMounts { 89 desc := fmt.Sprintf("%dx%d", numG, numMounts) 90 b.Run(desc, func(b *testing.B) { 91 var mt mountTable 92 mt.Init() 93 keys := make([]VirtualDentry, 0, numMounts) 94 for i := 0; i < numMounts; i++ { 95 mount := newBenchMount() 96 mt.Insert(mount) 97 keys = append(keys, mount.saveKey()) 98 } 99 100 var ready sync.WaitGroup 101 begin := make(chan struct{}) 102 var end sync.WaitGroup 103 for g := 0; g < numG; g++ { 104 ready.Add(1) 105 end.Add(1) 106 go func() { 107 defer end.Done() 108 ready.Done() 109 <-begin 110 for i := 0; i < b.N; i++ { 111 k := keys[i&(numMounts-1)] 112 m := mt.Lookup(k.mount, k.dentry) 113 if m == nil { 114 b.Errorf("Lookup failed") 115 return 116 } 117 if parent := m.parent(); parent != k.mount { 118 b.Errorf("Lookup returned mount with parent %p, wanted %p", parent, k.mount) 119 return 120 } 121 if point := m.point(); point != k.dentry { 122 b.Errorf("Lookup returned mount with point %p, wanted %p", point, k.dentry) 123 return 124 } 125 } 126 }() 127 } 128 129 ready.Wait() 130 b.ResetTimer() 131 close(begin) 132 end.Wait() 133 }) 134 } 135 } 136 } 137 138 func BenchmarkMountMapParallelLookup(b *testing.B) { 139 if !enableComparativeBenchmarks { 140 b.Skipf("comparative benchmarks are disabled") 141 } 142 143 for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 { 144 for _, numMounts := range benchNumMounts { 145 desc := fmt.Sprintf("%dx%d", numG, numMounts) 146 b.Run(desc, func(b *testing.B) { 147 var mu sync.RWMutex 148 ms := make(map[VirtualDentry]*Mount) 149 keys := make([]VirtualDentry, 0, numMounts) 150 for i := 0; i < numMounts; i++ { 151 mount := newBenchMount() 152 key := mount.saveKey() 153 ms[key] = mount 154 keys = append(keys, key) 155 } 156 157 var ready sync.WaitGroup 158 begin := make(chan struct{}) 159 var end sync.WaitGroup 160 for g := 0; g < numG; g++ { 161 ready.Add(1) 162 end.Add(1) 163 go func() { 164 defer end.Done() 165 ready.Done() 166 <-begin 167 for i := 0; i < b.N; i++ { 168 k := keys[i&(numMounts-1)] 169 mu.RLock() 170 m := ms[k] 171 mu.RUnlock() 172 if m == nil { 173 b.Errorf("Lookup failed") 174 return 175 } 176 if parent := m.parent(); parent != k.mount { 177 b.Errorf("Lookup returned mount with parent %p, wanted %p", parent, k.mount) 178 return 179 } 180 if point := m.point(); point != k.dentry { 181 b.Errorf("Lookup returned mount with point %p, wanted %p", point, k.dentry) 182 return 183 } 184 } 185 }() 186 } 187 188 ready.Wait() 189 b.ResetTimer() 190 close(begin) 191 end.Wait() 192 }) 193 } 194 } 195 } 196 197 func BenchmarkMountSyncMapParallelLookup(b *testing.B) { 198 if !enableComparativeBenchmarks { 199 b.Skipf("comparative benchmarks are disabled") 200 } 201 202 for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 { 203 for _, numMounts := range benchNumMounts { 204 desc := fmt.Sprintf("%dx%d", numG, numMounts) 205 b.Run(desc, func(b *testing.B) { 206 var ms sync.Map 207 keys := make([]VirtualDentry, 0, numMounts) 208 for i := 0; i < numMounts; i++ { 209 mount := newBenchMount() 210 key := mount.getKey() 211 ms.Store(key, mount) 212 keys = append(keys, key) 213 } 214 215 var ready sync.WaitGroup 216 begin := make(chan struct{}) 217 var end sync.WaitGroup 218 for g := 0; g < numG; g++ { 219 ready.Add(1) 220 end.Add(1) 221 go func() { 222 defer end.Done() 223 ready.Done() 224 <-begin 225 for i := 0; i < b.N; i++ { 226 k := keys[i&(numMounts-1)] 227 mi, ok := ms.Load(k) 228 if !ok { 229 b.Errorf("Lookup failed") 230 return 231 } 232 m := mi.(*Mount) 233 if parent := m.parent(); parent != k.mount { 234 b.Errorf("Lookup returned mount with parent %p, wanted %p", parent, k.mount) 235 return 236 } 237 if point := m.point(); point != k.dentry { 238 b.Errorf("Lookup returned mount with point %p, wanted %p", point, k.dentry) 239 return 240 } 241 } 242 }() 243 } 244 245 ready.Wait() 246 b.ResetTimer() 247 close(begin) 248 end.Wait() 249 }) 250 } 251 } 252 } 253 254 func BenchmarkMountTableNegativeLookup(b *testing.B) { 255 for _, numMounts := range benchNumMounts { 256 desc := fmt.Sprintf("%d", numMounts) 257 b.Run(desc, func(b *testing.B) { 258 var mt mountTable 259 mt.Init() 260 for i := 0; i < numMounts; i++ { 261 mt.Insert(newBenchMount()) 262 } 263 negkeys := make([]VirtualDentry, 0, numMounts) 264 for i := 0; i < numMounts; i++ { 265 negkeys = append(negkeys, VirtualDentry{ 266 mount: &Mount{}, 267 dentry: &Dentry{}, 268 }) 269 } 270 271 b.ResetTimer() 272 for i := 0; i < b.N; i++ { 273 k := negkeys[i&(numMounts-1)] 274 m := mt.Lookup(k.mount, k.dentry) 275 if m != nil { 276 b.Fatalf("Lookup got %p, wanted nil", m) 277 } 278 } 279 }) 280 } 281 } 282 283 func BenchmarkMountMapNegativeLookup(b *testing.B) { 284 if !enableComparativeBenchmarks { 285 b.Skipf("comparative benchmarks are disabled") 286 } 287 288 for _, numMounts := range benchNumMounts { 289 desc := fmt.Sprintf("%d", numMounts) 290 b.Run(desc, func(b *testing.B) { 291 var mu sync.RWMutex 292 ms := make(map[VirtualDentry]*Mount) 293 for i := 0; i < numMounts; i++ { 294 mount := newBenchMount() 295 ms[mount.getKey()] = mount 296 } 297 negkeys := make([]VirtualDentry, 0, numMounts) 298 for i := 0; i < numMounts; i++ { 299 negkeys = append(negkeys, VirtualDentry{ 300 mount: &Mount{}, 301 dentry: &Dentry{}, 302 }) 303 } 304 305 b.ResetTimer() 306 for i := 0; i < b.N; i++ { 307 k := negkeys[i&(numMounts-1)] 308 mu.RLock() 309 m := ms[k] 310 mu.RUnlock() 311 if m != nil { 312 b.Fatalf("Lookup got %p, wanted nil", m) 313 } 314 } 315 }) 316 } 317 } 318 319 func BenchmarkMountSyncMapNegativeLookup(b *testing.B) { 320 if !enableComparativeBenchmarks { 321 b.Skipf("comparative benchmarks are disabled") 322 } 323 324 for _, numMounts := range benchNumMounts { 325 desc := fmt.Sprintf("%d", numMounts) 326 b.Run(desc, func(b *testing.B) { 327 var ms sync.Map 328 for i := 0; i < numMounts; i++ { 329 mount := newBenchMount() 330 ms.Store(mount.saveKey(), mount) 331 } 332 negkeys := make([]VirtualDentry, 0, numMounts) 333 for i := 0; i < numMounts; i++ { 334 negkeys = append(negkeys, VirtualDentry{ 335 mount: &Mount{}, 336 dentry: &Dentry{}, 337 }) 338 } 339 340 b.ResetTimer() 341 for i := 0; i < b.N; i++ { 342 k := negkeys[i&(numMounts-1)] 343 m, _ := ms.Load(k) 344 if m != nil { 345 b.Fatalf("Lookup got %p, wanted nil", m) 346 } 347 } 348 }) 349 } 350 } 351 352 func BenchmarkMountTableInsert(b *testing.B) { 353 // Preallocate Mounts so that allocation time isn't included in the 354 // benchmark. 355 mounts := make([]*Mount, 0, b.N) 356 for i := 0; i < b.N; i++ { 357 mounts = append(mounts, newBenchMount()) 358 } 359 360 var mt mountTable 361 mt.Init() 362 b.ResetTimer() 363 for i := range mounts { 364 mt.Insert(mounts[i]) 365 } 366 } 367 368 func BenchmarkMountMapInsert(b *testing.B) { 369 if !enableComparativeBenchmarks { 370 b.Skipf("comparative benchmarks are disabled") 371 } 372 373 // Preallocate Mounts so that allocation time isn't included in the 374 // benchmark. 375 mounts := make([]*Mount, 0, b.N) 376 for i := 0; i < b.N; i++ { 377 mounts = append(mounts, newBenchMount()) 378 } 379 380 ms := make(map[VirtualDentry]*Mount) 381 b.ResetTimer() 382 for i := range mounts { 383 mount := mounts[i] 384 ms[mount.saveKey()] = mount 385 } 386 } 387 388 func BenchmarkMountSyncMapInsert(b *testing.B) { 389 if !enableComparativeBenchmarks { 390 b.Skipf("comparative benchmarks are disabled") 391 } 392 393 // Preallocate Mounts so that allocation time isn't included in the 394 // benchmark. 395 mounts := make([]*Mount, 0, b.N) 396 for i := 0; i < b.N; i++ { 397 mounts = append(mounts, newBenchMount()) 398 } 399 400 var ms sync.Map 401 b.ResetTimer() 402 for i := range mounts { 403 mount := mounts[i] 404 ms.Store(mount.saveKey(), mount) 405 } 406 } 407 408 func BenchmarkMountTableRemove(b *testing.B) { 409 mounts := make([]*Mount, 0, b.N) 410 for i := 0; i < b.N; i++ { 411 mounts = append(mounts, newBenchMount()) 412 } 413 var mt mountTable 414 mt.Init() 415 for i := range mounts { 416 mt.Insert(mounts[i]) 417 } 418 419 b.ResetTimer() 420 for i := range mounts { 421 mt.Remove(mounts[i]) 422 } 423 } 424 425 func BenchmarkMountMapRemove(b *testing.B) { 426 if !enableComparativeBenchmarks { 427 b.Skipf("comparative benchmarks are disabled") 428 } 429 430 mounts := make([]*Mount, 0, b.N) 431 for i := 0; i < b.N; i++ { 432 mounts = append(mounts, newBenchMount()) 433 } 434 ms := make(map[VirtualDentry]*Mount) 435 for i := range mounts { 436 mount := mounts[i] 437 ms[mount.saveKey()] = mount 438 } 439 440 b.ResetTimer() 441 for i := range mounts { 442 mount := mounts[i] 443 delete(ms, mount.saveKey()) 444 } 445 } 446 447 func BenchmarkMountSyncMapRemove(b *testing.B) { 448 if !enableComparativeBenchmarks { 449 b.Skipf("comparative benchmarks are disabled") 450 } 451 452 mounts := make([]*Mount, 0, b.N) 453 for i := 0; i < b.N; i++ { 454 mounts = append(mounts, newBenchMount()) 455 } 456 var ms sync.Map 457 for i := range mounts { 458 mount := mounts[i] 459 ms.Store(mount.saveKey(), mount) 460 } 461 462 b.ResetTimer() 463 for i := range mounts { 464 mount := mounts[i] 465 ms.Delete(mount.saveKey()) 466 } 467 }