gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/pkg/sentry/vfs/mount_test.go (about) 1 // Copyright 2019 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package vfs 16 17 import ( 18 "fmt" 19 "runtime" 20 "testing" 21 22 "gvisor.dev/gvisor/pkg/sync" 23 ) 24 25 func TestMountTableLookupEmpty(t *testing.T) { 26 var mt mountTable 27 mt.Init() 28 29 parent := &Mount{} 30 point := &Dentry{} 31 if m := mt.Lookup(parent, point); m != nil { 32 t.Errorf("Empty mountTable lookup: got %p, wanted nil", m) 33 } 34 } 35 36 func TestMountTableInsertLookup(t *testing.T) { 37 var mt mountTable 38 mt.Init() 39 40 mount := &Mount{} 41 mount.setKey(VirtualDentry{&Mount{}, &Dentry{}}) 42 mt.Insert(mount) 43 44 if m := mt.Lookup(mount.parent(), mount.point()); m != mount { 45 t.Errorf("mountTable positive lookup: got %p, wanted %p", m, mount) 46 } 47 48 otherParent := &Mount{} 49 if m := mt.Lookup(otherParent, mount.point()); m != nil { 50 t.Errorf("mountTable lookup with wrong mount parent: got %p, wanted nil", m) 51 } 52 otherPoint := &Dentry{} 53 if m := mt.Lookup(mount.parent(), otherPoint); m != nil { 54 t.Errorf("mountTable lookup with wrong mount point: got %p, wanted nil", m) 55 } 56 } 57 58 // TODO(gvisor.dev/issue/1035): concurrent lookup/insertion/removal. 59 60 // must be powers of 2 61 var benchNumMounts = []int{1 << 2, 1 << 5, 1 << 8} 62 63 // For all of the following: 64 // 65 // - BenchmarkMountTableFoo tests usage pattern "Foo" for mountTable. 66 // 67 // - BenchmarkMountMapFoo tests usage pattern "Foo" for a 68 // 69 // sync.RWMutex-protected map. (Mutator benchmarks do not use a RWMutex, since 70 // mountTable also requires external synchronization between mutators.) 71 // 72 // - BenchmarkMountSyncMapFoo tests usage pattern "Foo" for a sync.Map. 73 // 74 // ParallelLookup is by far the most common and performance-sensitive operation 75 // for this application. NegativeLookup is also important, but less so (only 76 // relevant with multiple mount namespaces and significant differences in 77 // mounts between them). Insertion and removal are benchmarked for 78 // completeness. 79 const enableComparativeBenchmarks = false 80 81 func newBenchMount() *Mount { 82 mount := &Mount{} 83 mount.setKey(VirtualDentry{&Mount{}, &Dentry{}}) 84 return mount 85 } 86 87 func BenchmarkMountTableParallelLookup(b *testing.B) { 88 for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 { 89 for _, numMounts := range benchNumMounts { 90 desc := fmt.Sprintf("%dx%d", numG, numMounts) 91 b.Run(desc, func(b *testing.B) { 92 var mt mountTable 93 mt.Init() 94 keys := make([]VirtualDentry, 0, numMounts) 95 for i := 0; i < numMounts; i++ { 96 mount := newBenchMount() 97 mt.Insert(mount) 98 keys = append(keys, mount.saveKey()) 99 } 100 101 var ready sync.WaitGroup 102 begin := make(chan struct{}) 103 var end sync.WaitGroup 104 for g := 0; g < numG; g++ { 105 ready.Add(1) 106 end.Add(1) 107 go func() { 108 defer end.Done() 109 ready.Done() 110 <-begin 111 for i := 0; i < b.N; i++ { 112 k := keys[i&(numMounts-1)] 113 m := mt.Lookup(k.mount, k.dentry) 114 if m == nil { 115 b.Errorf("Lookup failed") 116 return 117 } 118 if parent := m.parent(); parent != k.mount { 119 b.Errorf("Lookup returned mount with parent %p, wanted %p", parent, k.mount) 120 return 121 } 122 if point := m.point(); point != k.dentry { 123 b.Errorf("Lookup returned mount with point %p, wanted %p", point, k.dentry) 124 return 125 } 126 } 127 }() 128 } 129 130 ready.Wait() 131 b.ResetTimer() 132 close(begin) 133 end.Wait() 134 }) 135 } 136 } 137 } 138 139 func BenchmarkMountMapParallelLookup(b *testing.B) { 140 if !enableComparativeBenchmarks { 141 b.Skipf("comparative benchmarks are disabled") 142 } 143 144 for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 { 145 for _, numMounts := range benchNumMounts { 146 desc := fmt.Sprintf("%dx%d", numG, numMounts) 147 b.Run(desc, func(b *testing.B) { 148 var mu sync.RWMutex 149 ms := make(map[VirtualDentry]*Mount) 150 keys := make([]VirtualDentry, 0, numMounts) 151 for i := 0; i < numMounts; i++ { 152 mount := newBenchMount() 153 key := mount.saveKey() 154 ms[key] = mount 155 keys = append(keys, key) 156 } 157 158 var ready sync.WaitGroup 159 begin := make(chan struct{}) 160 var end sync.WaitGroup 161 for g := 0; g < numG; g++ { 162 ready.Add(1) 163 end.Add(1) 164 go func() { 165 defer end.Done() 166 ready.Done() 167 <-begin 168 for i := 0; i < b.N; i++ { 169 k := keys[i&(numMounts-1)] 170 mu.RLock() 171 m := ms[k] 172 mu.RUnlock() 173 if m == nil { 174 b.Errorf("Lookup failed") 175 return 176 } 177 if parent := m.parent(); parent != k.mount { 178 b.Errorf("Lookup returned mount with parent %p, wanted %p", parent, k.mount) 179 return 180 } 181 if point := m.point(); point != k.dentry { 182 b.Errorf("Lookup returned mount with point %p, wanted %p", point, k.dentry) 183 return 184 } 185 } 186 }() 187 } 188 189 ready.Wait() 190 b.ResetTimer() 191 close(begin) 192 end.Wait() 193 }) 194 } 195 } 196 } 197 198 func BenchmarkMountSyncMapParallelLookup(b *testing.B) { 199 if !enableComparativeBenchmarks { 200 b.Skipf("comparative benchmarks are disabled") 201 } 202 203 for numG, maxG := 1, runtime.GOMAXPROCS(0); numG >= 0 && numG <= maxG; numG *= 2 { 204 for _, numMounts := range benchNumMounts { 205 desc := fmt.Sprintf("%dx%d", numG, numMounts) 206 b.Run(desc, func(b *testing.B) { 207 var ms sync.Map 208 keys := make([]VirtualDentry, 0, numMounts) 209 for i := 0; i < numMounts; i++ { 210 mount := newBenchMount() 211 key := mount.getKey() 212 ms.Store(key, mount) 213 keys = append(keys, key) 214 } 215 216 var ready sync.WaitGroup 217 begin := make(chan struct{}) 218 var end sync.WaitGroup 219 for g := 0; g < numG; g++ { 220 ready.Add(1) 221 end.Add(1) 222 go func() { 223 defer end.Done() 224 ready.Done() 225 <-begin 226 for i := 0; i < b.N; i++ { 227 k := keys[i&(numMounts-1)] 228 mi, ok := ms.Load(k) 229 if !ok { 230 b.Errorf("Lookup failed") 231 return 232 } 233 m := mi.(*Mount) 234 if parent := m.parent(); parent != k.mount { 235 b.Errorf("Lookup returned mount with parent %p, wanted %p", parent, k.mount) 236 return 237 } 238 if point := m.point(); point != k.dentry { 239 b.Errorf("Lookup returned mount with point %p, wanted %p", point, k.dentry) 240 return 241 } 242 } 243 }() 244 } 245 246 ready.Wait() 247 b.ResetTimer() 248 close(begin) 249 end.Wait() 250 }) 251 } 252 } 253 } 254 255 func BenchmarkMountTableNegativeLookup(b *testing.B) { 256 for _, numMounts := range benchNumMounts { 257 desc := fmt.Sprintf("%d", numMounts) 258 b.Run(desc, func(b *testing.B) { 259 var mt mountTable 260 mt.Init() 261 for i := 0; i < numMounts; i++ { 262 mt.Insert(newBenchMount()) 263 } 264 negkeys := make([]VirtualDentry, 0, numMounts) 265 for i := 0; i < numMounts; i++ { 266 negkeys = append(negkeys, VirtualDentry{ 267 mount: &Mount{}, 268 dentry: &Dentry{}, 269 }) 270 } 271 272 b.ResetTimer() 273 for i := 0; i < b.N; i++ { 274 k := negkeys[i&(numMounts-1)] 275 m := mt.Lookup(k.mount, k.dentry) 276 if m != nil { 277 b.Fatalf("Lookup got %p, wanted nil", m) 278 } 279 } 280 }) 281 } 282 } 283 284 func BenchmarkMountMapNegativeLookup(b *testing.B) { 285 if !enableComparativeBenchmarks { 286 b.Skipf("comparative benchmarks are disabled") 287 } 288 289 for _, numMounts := range benchNumMounts { 290 desc := fmt.Sprintf("%d", numMounts) 291 b.Run(desc, func(b *testing.B) { 292 var mu sync.RWMutex 293 ms := make(map[VirtualDentry]*Mount) 294 for i := 0; i < numMounts; i++ { 295 mount := newBenchMount() 296 ms[mount.getKey()] = mount 297 } 298 negkeys := make([]VirtualDentry, 0, numMounts) 299 for i := 0; i < numMounts; i++ { 300 negkeys = append(negkeys, VirtualDentry{ 301 mount: &Mount{}, 302 dentry: &Dentry{}, 303 }) 304 } 305 306 b.ResetTimer() 307 for i := 0; i < b.N; i++ { 308 k := negkeys[i&(numMounts-1)] 309 mu.RLock() 310 m := ms[k] 311 mu.RUnlock() 312 if m != nil { 313 b.Fatalf("Lookup got %p, wanted nil", m) 314 } 315 } 316 }) 317 } 318 } 319 320 func BenchmarkMountSyncMapNegativeLookup(b *testing.B) { 321 if !enableComparativeBenchmarks { 322 b.Skipf("comparative benchmarks are disabled") 323 } 324 325 for _, numMounts := range benchNumMounts { 326 desc := fmt.Sprintf("%d", numMounts) 327 b.Run(desc, func(b *testing.B) { 328 var ms sync.Map 329 for i := 0; i < numMounts; i++ { 330 mount := newBenchMount() 331 ms.Store(mount.saveKey(), mount) 332 } 333 negkeys := make([]VirtualDentry, 0, numMounts) 334 for i := 0; i < numMounts; i++ { 335 negkeys = append(negkeys, VirtualDentry{ 336 mount: &Mount{}, 337 dentry: &Dentry{}, 338 }) 339 } 340 341 b.ResetTimer() 342 for i := 0; i < b.N; i++ { 343 k := negkeys[i&(numMounts-1)] 344 m, _ := ms.Load(k) 345 if m != nil { 346 b.Fatalf("Lookup got %p, wanted nil", m) 347 } 348 } 349 }) 350 } 351 } 352 353 func BenchmarkMountTableInsert(b *testing.B) { 354 // Preallocate Mounts so that allocation time isn't included in the 355 // benchmark. 356 mounts := make([]*Mount, 0, b.N) 357 for i := 0; i < b.N; i++ { 358 mounts = append(mounts, newBenchMount()) 359 } 360 361 var mt mountTable 362 mt.Init() 363 b.ResetTimer() 364 for i := range mounts { 365 mt.Insert(mounts[i]) 366 } 367 } 368 369 func BenchmarkMountMapInsert(b *testing.B) { 370 if !enableComparativeBenchmarks { 371 b.Skipf("comparative benchmarks are disabled") 372 } 373 374 // Preallocate Mounts so that allocation time isn't included in the 375 // benchmark. 376 mounts := make([]*Mount, 0, b.N) 377 for i := 0; i < b.N; i++ { 378 mounts = append(mounts, newBenchMount()) 379 } 380 381 ms := make(map[VirtualDentry]*Mount) 382 b.ResetTimer() 383 for i := range mounts { 384 mount := mounts[i] 385 ms[mount.saveKey()] = mount 386 } 387 } 388 389 func BenchmarkMountSyncMapInsert(b *testing.B) { 390 if !enableComparativeBenchmarks { 391 b.Skipf("comparative benchmarks are disabled") 392 } 393 394 // Preallocate Mounts so that allocation time isn't included in the 395 // benchmark. 396 mounts := make([]*Mount, 0, b.N) 397 for i := 0; i < b.N; i++ { 398 mounts = append(mounts, newBenchMount()) 399 } 400 401 var ms sync.Map 402 b.ResetTimer() 403 for i := range mounts { 404 mount := mounts[i] 405 ms.Store(mount.saveKey(), mount) 406 } 407 } 408 409 func BenchmarkMountTableRemove(b *testing.B) { 410 mounts := make([]*Mount, 0, b.N) 411 for i := 0; i < b.N; i++ { 412 mounts = append(mounts, newBenchMount()) 413 } 414 var mt mountTable 415 mt.Init() 416 for i := range mounts { 417 mt.Insert(mounts[i]) 418 } 419 420 b.ResetTimer() 421 for i := range mounts { 422 mt.Remove(mounts[i]) 423 } 424 } 425 426 func BenchmarkMountMapRemove(b *testing.B) { 427 if !enableComparativeBenchmarks { 428 b.Skipf("comparative benchmarks are disabled") 429 } 430 431 mounts := make([]*Mount, 0, b.N) 432 for i := 0; i < b.N; i++ { 433 mounts = append(mounts, newBenchMount()) 434 } 435 ms := make(map[VirtualDentry]*Mount) 436 for i := range mounts { 437 mount := mounts[i] 438 ms[mount.saveKey()] = mount 439 } 440 441 b.ResetTimer() 442 for i := range mounts { 443 mount := mounts[i] 444 delete(ms, mount.saveKey()) 445 } 446 } 447 448 func BenchmarkMountSyncMapRemove(b *testing.B) { 449 if !enableComparativeBenchmarks { 450 b.Skipf("comparative benchmarks are disabled") 451 } 452 453 mounts := make([]*Mount, 0, b.N) 454 for i := 0; i < b.N; i++ { 455 mounts = append(mounts, newBenchMount()) 456 } 457 var ms sync.Map 458 for i := range mounts { 459 mount := mounts[i] 460 ms.Store(mount.saveKey(), mount) 461 } 462 463 b.ResetTimer() 464 for i := range mounts { 465 mount := mounts[i] 466 ms.Delete(mount.saveKey()) 467 } 468 }