github.com/cockroachdb/pebble@v0.0.0-20231214172447-ab4952c5f87b/version_set_test.go (about) 1 // Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package pebble 6 7 import ( 8 "io" 9 "testing" 10 "time" 11 12 "github.com/cockroachdb/pebble/internal/base" 13 "github.com/cockroachdb/pebble/internal/manifest" 14 "github.com/cockroachdb/pebble/objstorage/objstorageprovider" 15 "github.com/cockroachdb/pebble/record" 16 "github.com/cockroachdb/pebble/sstable" 17 "github.com/cockroachdb/pebble/vfs" 18 "github.com/stretchr/testify/require" 19 ) 20 21 func writeAndIngest(t *testing.T, mem vfs.FS, d *DB, k InternalKey, v []byte, filename string) { 22 path := mem.PathJoin("ext", filename) 23 f, err := mem.Create(path) 24 require.NoError(t, err) 25 w := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{}) 26 require.NoError(t, w.Add(k, v)) 27 require.NoError(t, w.Close()) 28 require.NoError(t, d.Ingest([]string{path})) 29 } 30 31 // d.mu should be help. logLock should not be held. 32 func checkBackingSize(t *testing.T, d *DB) { 33 d.mu.versions.logLock() 34 var backingSizeSum uint64 35 for _, backing := range d.mu.versions.backingState.fileBackingMap { 36 backingSizeSum += backing.Size 37 } 38 require.Equal(t, backingSizeSum, d.mu.versions.backingState.fileBackingSize) 39 d.mu.versions.logUnlock() 40 } 41 42 // TestLatestRefCounting sanity checks the ref counting implementation for 43 // FileMetadata.latestRefs, and makes sure that the zombie table implementation 44 // works when the version edit contains virtual sstables. It also checks that 45 // we're adding the physical sstable to the obsolete tables list iff the file is 46 // truly obsolete. 47 func TestLatestRefCounting(t *testing.T) { 48 mem := vfs.NewMem() 49 require.NoError(t, mem.MkdirAll("ext", 0755)) 50 51 opts := &Options{ 52 FS: mem, 53 MaxManifestFileSize: 1, 54 DisableAutomaticCompactions: true, 55 FormatMajorVersion: FormatVirtualSSTables, 56 } 57 d, err := Open("", opts) 58 require.NoError(t, err) 59 60 err = d.Set([]byte{'a'}, []byte{'a'}, nil) 61 require.NoError(t, err) 62 err = d.Set([]byte{'b'}, []byte{'b'}, nil) 63 require.NoError(t, err) 64 65 err = d.Flush() 66 require.NoError(t, err) 67 68 iter := d.mu.versions.currentVersion().Levels[0].Iter() 69 var f *fileMetadata = iter.First() 70 require.NotNil(t, f) 71 require.Equal(t, 1, int(f.LatestRefs())) 72 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 73 74 // Grab some new file nums. 75 d.mu.Lock() 76 f1 := FileNum(d.mu.versions.nextFileNum) 77 f2 := f1 + 1 78 d.mu.versions.nextFileNum += 2 79 d.mu.Unlock() 80 81 m1 := &manifest.FileMetadata{ 82 FileBacking: f.FileBacking, 83 FileNum: f1, 84 CreationTime: time.Now().Unix(), 85 Size: f.Size / 2, 86 SmallestSeqNum: f.SmallestSeqNum, 87 LargestSeqNum: f.LargestSeqNum, 88 Smallest: base.MakeInternalKey([]byte{'a'}, f.Smallest.SeqNum(), InternalKeyKindSet), 89 Largest: base.MakeInternalKey([]byte{'a'}, f.Smallest.SeqNum(), InternalKeyKindSet), 90 HasPointKeys: true, 91 Virtual: true, 92 } 93 94 m2 := &manifest.FileMetadata{ 95 FileBacking: f.FileBacking, 96 FileNum: f2, 97 CreationTime: time.Now().Unix(), 98 Size: f.Size - m1.Size, 99 SmallestSeqNum: f.SmallestSeqNum, 100 LargestSeqNum: f.LargestSeqNum, 101 Smallest: base.MakeInternalKey([]byte{'b'}, f.Largest.SeqNum(), InternalKeyKindSet), 102 Largest: base.MakeInternalKey([]byte{'b'}, f.Largest.SeqNum(), InternalKeyKindSet), 103 HasPointKeys: true, 104 Virtual: true, 105 } 106 107 m1.LargestPointKey = m1.Largest 108 m1.SmallestPointKey = m1.Smallest 109 110 m2.LargestPointKey = m2.Largest 111 m2.SmallestPointKey = m2.Smallest 112 113 m1.ValidateVirtual(f) 114 d.checkVirtualBounds(m1) 115 m2.ValidateVirtual(f) 116 d.checkVirtualBounds(m2) 117 118 fileMetrics := func(ve *versionEdit) map[int]*LevelMetrics { 119 metrics := newFileMetrics(ve.NewFiles) 120 for de, f := range ve.DeletedFiles { 121 lm := metrics[de.Level] 122 if lm == nil { 123 lm = &LevelMetrics{} 124 metrics[de.Level] = lm 125 } 126 metrics[de.Level].NumFiles-- 127 metrics[de.Level].Size -= int64(f.Size) 128 } 129 return metrics 130 } 131 132 d.mu.Lock() 133 defer d.mu.Unlock() 134 applyVE := func(ve *versionEdit) error { 135 d.mu.versions.logLock() 136 jobID := d.mu.nextJobID 137 d.mu.nextJobID++ 138 139 err := d.mu.versions.logAndApply(jobID, ve, fileMetrics(ve), false, func() []compactionInfo { 140 return d.getInProgressCompactionInfoLocked(nil) 141 }) 142 d.updateReadStateLocked(nil) 143 return err 144 } 145 146 // Virtualize f. 147 ve := manifest.VersionEdit{} 148 d1 := manifest.DeletedFileEntry{Level: 0, FileNum: f.FileNum} 149 n1 := manifest.NewFileEntry{Level: 0, Meta: m1} 150 n2 := manifest.NewFileEntry{Level: 0, Meta: m2} 151 152 ve.DeletedFiles = make(map[manifest.DeletedFileEntry]*manifest.FileMetadata) 153 ve.DeletedFiles[d1] = f 154 ve.NewFiles = append(ve.NewFiles, n1) 155 ve.NewFiles = append(ve.NewFiles, n2) 156 ve.CreatedBackingTables = append(ve.CreatedBackingTables, f.FileBacking) 157 158 require.NoError(t, applyVE(&ve)) 159 // 2 latestRefs from 2 virtual sstables in the latest version which refer 160 // to the physical sstable. 161 require.Equal(t, 2, int(m1.LatestRefs())) 162 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 163 require.Equal(t, 1, len(d.mu.versions.backingState.fileBackingMap)) 164 _, ok := d.mu.versions.backingState.fileBackingMap[f.FileBacking.DiskFileNum] 165 require.True(t, ok) 166 require.Equal(t, f.Size, m2.FileBacking.VirtualizedSize.Load()) 167 checkBackingSize(t, d) 168 169 // Make sure that f is not present in zombie list, because it is not yet a 170 // zombie. 171 require.Equal(t, 0, len(d.mu.versions.zombieTables)) 172 173 // Delete the virtual sstable m1. 174 ve = manifest.VersionEdit{} 175 d1 = manifest.DeletedFileEntry{Level: 0, FileNum: m1.FileNum} 176 ve.DeletedFiles = make(map[manifest.DeletedFileEntry]*manifest.FileMetadata) 177 ve.DeletedFiles[d1] = m1 178 require.NoError(t, applyVE(&ve)) 179 180 // Only one virtual sstable in the latest version, confirm that the latest 181 // version ref counting is correct. 182 require.Equal(t, 1, int(m2.LatestRefs())) 183 require.Equal(t, 0, len(d.mu.versions.zombieTables)) 184 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 185 require.Equal(t, 1, len(d.mu.versions.backingState.fileBackingMap)) 186 _, ok = d.mu.versions.backingState.fileBackingMap[f.FileBacking.DiskFileNum] 187 require.True(t, ok) 188 require.Equal(t, m2.Size, m2.FileBacking.VirtualizedSize.Load()) 189 checkBackingSize(t, d) 190 191 // Move m2 from L0 to L6 to test the move compaction case. 192 ve = manifest.VersionEdit{} 193 d1 = manifest.DeletedFileEntry{Level: 0, FileNum: m2.FileNum} 194 n1 = manifest.NewFileEntry{Level: 6, Meta: m2} 195 ve.DeletedFiles = make(map[manifest.DeletedFileEntry]*manifest.FileMetadata) 196 ve.DeletedFiles[d1] = m2 197 ve.NewFiles = append(ve.NewFiles, n1) 198 require.NoError(t, applyVE(&ve)) 199 checkBackingSize(t, d) 200 201 require.Equal(t, 1, int(m2.LatestRefs())) 202 require.Equal(t, 0, len(d.mu.versions.zombieTables)) 203 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 204 require.Equal(t, 1, len(d.mu.versions.backingState.fileBackingMap)) 205 _, ok = d.mu.versions.backingState.fileBackingMap[f.FileBacking.DiskFileNum] 206 require.True(t, ok) 207 require.Equal(t, m2.Size, m2.FileBacking.VirtualizedSize.Load()) 208 209 // Delete m2 from L6. 210 ve = manifest.VersionEdit{} 211 d1 = manifest.DeletedFileEntry{Level: 6, FileNum: m2.FileNum} 212 ve.DeletedFiles = make(map[manifest.DeletedFileEntry]*manifest.FileMetadata) 213 ve.DeletedFiles[d1] = m2 214 require.NoError(t, applyVE(&ve)) 215 checkBackingSize(t, d) 216 217 // All virtual sstables are gone. 218 require.Equal(t, 0, int(m2.LatestRefs())) 219 require.Equal(t, 1, len(d.mu.versions.zombieTables)) 220 require.Equal(t, f.Size, d.mu.versions.zombieTables[f.FileBacking.DiskFileNum]) 221 require.Equal(t, 0, len(d.mu.versions.backingState.fileBackingMap)) 222 _, ok = d.mu.versions.backingState.fileBackingMap[f.FileBacking.DiskFileNum] 223 require.False(t, ok) 224 require.Equal(t, 0, int(m2.FileBacking.VirtualizedSize.Load())) 225 checkBackingSize(t, d) 226 } 227 228 // TODO(bananabrick): Convert TestLatestRefCounting and this test into a single 229 // datadriven test. 230 func TestVirtualSSTableManifestReplay(t *testing.T) { 231 mem := vfs.NewMem() 232 require.NoError(t, mem.MkdirAll("ext", 0755)) 233 234 opts := &Options{ 235 FormatMajorVersion: FormatVirtualSSTables, 236 FS: mem, 237 MaxManifestFileSize: 1, 238 DisableAutomaticCompactions: true, 239 } 240 d, err := Open("", opts) 241 require.NoError(t, err) 242 243 err = d.Set([]byte{'a'}, []byte{'a'}, nil) 244 require.NoError(t, err) 245 err = d.Set([]byte{'b'}, []byte{'b'}, nil) 246 require.NoError(t, err) 247 248 err = d.Flush() 249 require.NoError(t, err) 250 251 iter := d.mu.versions.currentVersion().Levels[0].Iter() 252 var f *fileMetadata = iter.First() 253 require.NotNil(t, f) 254 require.Equal(t, 1, int(f.LatestRefs())) 255 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 256 257 // Grab some new file nums. 258 d.mu.Lock() 259 f1 := FileNum(d.mu.versions.nextFileNum) 260 f2 := f1 + 1 261 d.mu.versions.nextFileNum += 2 262 d.mu.Unlock() 263 264 m1 := &manifest.FileMetadata{ 265 FileBacking: f.FileBacking, 266 FileNum: f1, 267 CreationTime: time.Now().Unix(), 268 Size: f.Size / 2, 269 SmallestSeqNum: f.SmallestSeqNum, 270 LargestSeqNum: f.LargestSeqNum, 271 Smallest: base.MakeInternalKey([]byte{'a'}, f.Smallest.SeqNum(), InternalKeyKindSet), 272 Largest: base.MakeInternalKey([]byte{'a'}, f.Smallest.SeqNum(), InternalKeyKindSet), 273 HasPointKeys: true, 274 Virtual: true, 275 } 276 277 m2 := &manifest.FileMetadata{ 278 FileBacking: f.FileBacking, 279 FileNum: f2, 280 CreationTime: time.Now().Unix(), 281 Size: f.Size - m1.Size, 282 SmallestSeqNum: f.SmallestSeqNum, 283 LargestSeqNum: f.LargestSeqNum, 284 Smallest: base.MakeInternalKey([]byte{'b'}, f.Largest.SeqNum(), InternalKeyKindSet), 285 Largest: base.MakeInternalKey([]byte{'b'}, f.Largest.SeqNum(), InternalKeyKindSet), 286 HasPointKeys: true, 287 Virtual: true, 288 } 289 290 m1.LargestPointKey = m1.Largest 291 m1.SmallestPointKey = m1.Smallest 292 m1.Stats.NumEntries = 1 293 294 m2.LargestPointKey = m2.Largest 295 m2.SmallestPointKey = m2.Smallest 296 m2.Stats.NumEntries = 1 297 298 m1.ValidateVirtual(f) 299 d.checkVirtualBounds(m1) 300 m2.ValidateVirtual(f) 301 d.checkVirtualBounds(m2) 302 303 fileMetrics := func(ve *versionEdit) map[int]*LevelMetrics { 304 metrics := newFileMetrics(ve.NewFiles) 305 for de, f := range ve.DeletedFiles { 306 lm := metrics[de.Level] 307 if lm == nil { 308 lm = &LevelMetrics{} 309 metrics[de.Level] = lm 310 } 311 metrics[de.Level].NumFiles-- 312 metrics[de.Level].Size -= int64(f.Size) 313 } 314 return metrics 315 } 316 317 d.mu.Lock() 318 applyVE := func(ve *versionEdit) error { 319 d.mu.versions.logLock() 320 jobID := d.mu.nextJobID 321 d.mu.nextJobID++ 322 323 err := d.mu.versions.logAndApply(jobID, ve, fileMetrics(ve), false, func() []compactionInfo { 324 return d.getInProgressCompactionInfoLocked(nil) 325 }) 326 d.updateReadStateLocked(nil) 327 return err 328 } 329 330 // Virtualize f. 331 ve := manifest.VersionEdit{} 332 d1 := manifest.DeletedFileEntry{Level: 0, FileNum: f.FileNum} 333 n1 := manifest.NewFileEntry{Level: 0, Meta: m1} 334 n2 := manifest.NewFileEntry{Level: 0, Meta: m2} 335 336 ve.DeletedFiles = make(map[manifest.DeletedFileEntry]*manifest.FileMetadata) 337 ve.DeletedFiles[d1] = f 338 ve.NewFiles = append(ve.NewFiles, n1) 339 ve.NewFiles = append(ve.NewFiles, n2) 340 ve.CreatedBackingTables = append(ve.CreatedBackingTables, f.FileBacking) 341 342 require.NoError(t, applyVE(&ve)) 343 checkBackingSize(t, d) 344 d.mu.Unlock() 345 346 require.Equal(t, 2, int(m1.LatestRefs())) 347 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 348 require.Equal(t, 1, len(d.mu.versions.backingState.fileBackingMap)) 349 _, ok := d.mu.versions.backingState.fileBackingMap[f.FileBacking.DiskFileNum] 350 require.True(t, ok) 351 require.Equal(t, f.Size, m2.FileBacking.VirtualizedSize.Load()) 352 353 // Snapshot version edit will be written to a new manifest due to the flush. 354 d.Set([]byte{'c'}, []byte{'c'}, nil) 355 d.Flush() 356 357 require.NoError(t, d.Close()) 358 d, err = Open("", opts) 359 require.NoError(t, err) 360 361 d.mu.Lock() 362 it := d.mu.versions.currentVersion().Levels[0].Iter() 363 var virtualFile *fileMetadata 364 for f := it.First(); f != nil; f = it.Next() { 365 if f.Virtual { 366 virtualFile = f 367 break 368 } 369 } 370 371 require.Equal(t, 2, int(virtualFile.LatestRefs())) 372 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 373 require.Equal(t, 1, len(d.mu.versions.backingState.fileBackingMap)) 374 _, ok = d.mu.versions.backingState.fileBackingMap[f.FileBacking.DiskFileNum] 375 require.True(t, ok) 376 require.Equal(t, f.Size, virtualFile.FileBacking.VirtualizedSize.Load()) 377 checkBackingSize(t, d) 378 d.mu.Unlock() 379 380 // Will cause the virtual sstables to be deleted, and the file backing should 381 // also be removed. 382 d.Compact([]byte{'a'}, []byte{'z'}, false) 383 384 d.mu.Lock() 385 virtualFile = nil 386 it = d.mu.versions.currentVersion().Levels[0].Iter() 387 for f := it.First(); f != nil; f = it.Next() { 388 if f.Virtual { 389 virtualFile = f 390 break 391 } 392 } 393 require.Nil(t, virtualFile) 394 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 395 require.Equal(t, 0, len(d.mu.versions.backingState.fileBackingMap)) 396 checkBackingSize(t, d) 397 d.mu.Unlock() 398 399 // Close and restart to make sure that the new snapshot written during 400 // compaction doesn't have the file backing. 401 require.NoError(t, d.Close()) 402 d, err = Open("", opts) 403 require.NoError(t, err) 404 405 d.mu.Lock() 406 virtualFile = nil 407 it = d.mu.versions.currentVersion().Levels[0].Iter() 408 for f := it.First(); f != nil; f = it.Next() { 409 if f.Virtual { 410 virtualFile = f 411 break 412 } 413 } 414 require.Nil(t, virtualFile) 415 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 416 require.Equal(t, 0, len(d.mu.versions.backingState.fileBackingMap)) 417 checkBackingSize(t, d) 418 d.mu.Unlock() 419 require.NoError(t, d.Close()) 420 } 421 422 func TestVersionSetCheckpoint(t *testing.T) { 423 mem := vfs.NewMem() 424 require.NoError(t, mem.MkdirAll("ext", 0755)) 425 426 opts := &Options{ 427 FS: mem, 428 MaxManifestFileSize: 1, 429 } 430 d, err := Open("", opts) 431 require.NoError(t, err) 432 433 // Multiple manifest files are created such that the latest one must have a correct snapshot 434 // of the preceding state for the DB to be opened correctly and see the written data. 435 // Snapshot has no files, so first edit will cause manifest rotation. 436 writeAndIngest(t, mem, d, base.MakeInternalKey([]byte("a"), 0, InternalKeyKindSet), []byte("b"), "a") 437 // Snapshot has no files, and manifest has an edit from the previous ingest, 438 // so this second ingest will cause manifest rotation. 439 writeAndIngest(t, mem, d, base.MakeInternalKey([]byte("c"), 0, InternalKeyKindSet), []byte("d"), "c") 440 require.NoError(t, d.Close()) 441 d, err = Open("", opts) 442 require.NoError(t, err) 443 checkValue := func(k string, expected string) { 444 v, closer, err := d.Get([]byte(k)) 445 require.NoError(t, err) 446 require.Equal(t, expected, string(v)) 447 closer.Close() 448 } 449 checkValue("a", "b") 450 checkValue("c", "d") 451 require.NoError(t, d.Close()) 452 } 453 454 func TestVersionSetSeqNums(t *testing.T) { 455 mem := vfs.NewMem() 456 require.NoError(t, mem.MkdirAll("ext", 0755)) 457 458 opts := &Options{ 459 FS: mem, 460 MaxManifestFileSize: 1, 461 } 462 d, err := Open("", opts) 463 require.NoError(t, err) 464 465 // Snapshot has no files, so first edit will cause manifest rotation. 466 writeAndIngest(t, mem, d, base.MakeInternalKey([]byte("a"), 0, InternalKeyKindSet), []byte("b"), "a") 467 // Snapshot has no files, and manifest has an edit from the previous ingest, 468 // so this second ingest will cause manifest rotation. 469 writeAndIngest(t, mem, d, base.MakeInternalKey([]byte("c"), 0, InternalKeyKindSet), []byte("d"), "c") 470 require.NoError(t, d.Close()) 471 d, err = Open("", opts) 472 require.NoError(t, err) 473 defer d.Close() 474 d.TestOnlyWaitForCleaning() 475 476 // Check that the manifest has the correct LastSeqNum, equalling the highest 477 // observed SeqNum. 478 filenames, err := mem.List("") 479 require.NoError(t, err) 480 var manifest vfs.File 481 for _, filename := range filenames { 482 fileType, _, ok := base.ParseFilename(mem, filename) 483 if ok && fileType == fileTypeManifest { 484 manifest, err = mem.Open(filename) 485 require.NoError(t, err) 486 } 487 } 488 require.NotNil(t, manifest) 489 defer manifest.Close() 490 rr := record.NewReader(manifest, 0 /* logNum */) 491 lastSeqNum := uint64(0) 492 for { 493 r, err := rr.Next() 494 if err == io.EOF { 495 break 496 } 497 require.NoError(t, err) 498 var ve versionEdit 499 err = ve.Decode(r) 500 require.NoError(t, err) 501 if ve.LastSeqNum != 0 { 502 lastSeqNum = ve.LastSeqNum 503 } 504 } 505 // 2 ingestions happened, so LastSeqNum should equal base.SeqNumStart + 1. 506 require.Equal(t, uint64(11), lastSeqNum) 507 // logSeqNum is always one greater than the last assigned sequence number. 508 require.Equal(t, d.mu.versions.logSeqNum.Load(), lastSeqNum+1) 509 }