github.com/cockroachdb/pebble@v1.1.2/version_set_test.go (about) 1 // Copyright 2020 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package pebble 6 7 import ( 8 "io" 9 "testing" 10 "time" 11 12 "github.com/cockroachdb/pebble/internal/base" 13 "github.com/cockroachdb/pebble/internal/manifest" 14 "github.com/cockroachdb/pebble/objstorage/objstorageprovider" 15 "github.com/cockroachdb/pebble/record" 16 "github.com/cockroachdb/pebble/sstable" 17 "github.com/cockroachdb/pebble/vfs" 18 "github.com/stretchr/testify/require" 19 ) 20 21 func writeAndIngest(t *testing.T, mem vfs.FS, d *DB, k InternalKey, v []byte, filename string) { 22 path := mem.PathJoin("ext", filename) 23 f, err := mem.Create(path) 24 require.NoError(t, err) 25 w := sstable.NewWriter(objstorageprovider.NewFileWritable(f), sstable.WriterOptions{}) 26 require.NoError(t, w.Add(k, v)) 27 require.NoError(t, w.Close()) 28 require.NoError(t, d.Ingest([]string{path})) 29 } 30 31 // d.mu should be help. logLock should not be held. 32 func checkBackingSize(t *testing.T, d *DB) { 33 d.mu.versions.logLock() 34 var backingSizeSum uint64 35 for _, backing := range d.mu.versions.backingState.fileBackingMap { 36 backingSizeSum += backing.Size 37 } 38 require.Equal(t, backingSizeSum, d.mu.versions.backingState.fileBackingSize) 39 d.mu.versions.logUnlock() 40 } 41 42 // TestLatestRefCounting sanity checks the ref counting implementation for 43 // FileMetadata.latestRefs, and makes sure that the zombie table implementation 44 // works when the version edit contains virtual sstables. It also checks that 45 // we're adding the physical sstable to the obsolete tables list iff the file is 46 // truly obsolete. 47 func TestLatestRefCounting(t *testing.T) { 48 mem := vfs.NewMem() 49 require.NoError(t, mem.MkdirAll("ext", 0755)) 50 51 opts := &Options{ 52 FS: mem, 53 MaxManifestFileSize: 1, 54 DisableAutomaticCompactions: true, 55 FormatMajorVersion: FormatVirtualSSTables, 56 } 57 d, err := Open("", opts) 58 require.NoError(t, err) 59 60 err = d.Set([]byte{'a'}, []byte{'a'}, nil) 61 require.NoError(t, err) 62 err = d.Set([]byte{'b'}, []byte{'b'}, nil) 63 require.NoError(t, err) 64 65 err = d.Flush() 66 require.NoError(t, err) 67 68 iter := d.mu.versions.currentVersion().Levels[0].Iter() 69 var f *fileMetadata = iter.First() 70 require.NotNil(t, f) 71 require.Equal(t, 1, int(f.LatestRefs())) 72 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 73 74 // Grab some new file nums. 75 d.mu.Lock() 76 f1 := d.mu.versions.nextFileNum 77 f2 := f1 + 1 78 d.mu.versions.nextFileNum += 2 79 d.mu.Unlock() 80 81 m1 := &manifest.FileMetadata{ 82 FileBacking: f.FileBacking, 83 FileNum: f1, 84 CreationTime: time.Now().Unix(), 85 Size: f.Size / 2, 86 SmallestSeqNum: f.SmallestSeqNum, 87 LargestSeqNum: f.LargestSeqNum, 88 Smallest: base.MakeInternalKey([]byte{'a'}, f.Smallest.SeqNum(), InternalKeyKindSet), 89 Largest: base.MakeInternalKey([]byte{'a'}, f.Smallest.SeqNum(), InternalKeyKindSet), 90 HasPointKeys: true, 91 Virtual: true, 92 } 93 94 m2 := &manifest.FileMetadata{ 95 FileBacking: f.FileBacking, 96 FileNum: f2, 97 CreationTime: time.Now().Unix(), 98 Size: f.Size - m1.Size, 99 SmallestSeqNum: f.SmallestSeqNum, 100 LargestSeqNum: f.LargestSeqNum, 101 Smallest: base.MakeInternalKey([]byte{'b'}, f.Largest.SeqNum(), InternalKeyKindSet), 102 Largest: base.MakeInternalKey([]byte{'b'}, f.Largest.SeqNum(), InternalKeyKindSet), 103 HasPointKeys: true, 104 Virtual: true, 105 } 106 107 m1.LargestPointKey = m1.Largest 108 m1.SmallestPointKey = m1.Smallest 109 110 m2.LargestPointKey = m2.Largest 111 m2.SmallestPointKey = m2.Smallest 112 113 m1.ValidateVirtual(f) 114 d.checkVirtualBounds(m1) 115 m2.ValidateVirtual(f) 116 d.checkVirtualBounds(m2) 117 118 fileMetrics := func(ve *versionEdit) map[int]*LevelMetrics { 119 metrics := newFileMetrics(ve.NewFiles) 120 for de, f := range ve.DeletedFiles { 121 lm := metrics[de.Level] 122 if lm == nil { 123 lm = &LevelMetrics{} 124 metrics[de.Level] = lm 125 } 126 metrics[de.Level].NumFiles-- 127 metrics[de.Level].Size -= int64(f.Size) 128 } 129 return metrics 130 } 131 132 d.mu.Lock() 133 defer d.mu.Unlock() 134 applyVE := func(ve *versionEdit) error { 135 d.mu.versions.logLock() 136 jobID := d.mu.nextJobID 137 d.mu.nextJobID++ 138 139 err := d.mu.versions.logAndApply(jobID, ve, fileMetrics(ve), false, func() []compactionInfo { 140 return d.getInProgressCompactionInfoLocked(nil) 141 }) 142 d.updateReadStateLocked(nil) 143 return err 144 } 145 146 // Virtualize f. 147 ve := manifest.VersionEdit{} 148 d1 := manifest.DeletedFileEntry{Level: 0, FileNum: f.FileNum} 149 n1 := manifest.NewFileEntry{Level: 0, Meta: m1} 150 n2 := manifest.NewFileEntry{Level: 0, Meta: m2} 151 152 ve.DeletedFiles = make(map[manifest.DeletedFileEntry]*manifest.FileMetadata) 153 ve.DeletedFiles[d1] = f 154 ve.NewFiles = append(ve.NewFiles, n1) 155 ve.NewFiles = append(ve.NewFiles, n2) 156 ve.CreatedBackingTables = append(ve.CreatedBackingTables, f.FileBacking) 157 158 require.NoError(t, applyVE(&ve)) 159 // 2 latestRefs from 2 virtual sstables in the latest version which refer 160 // to the physical sstable. 161 require.Equal(t, 2, int(m1.LatestRefs())) 162 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 163 require.Equal(t, 1, len(d.mu.versions.backingState.fileBackingMap)) 164 _, ok := d.mu.versions.backingState.fileBackingMap[f.FileBacking.DiskFileNum] 165 require.True(t, ok) 166 require.Equal(t, f.Size, m2.FileBacking.VirtualizedSize.Load()) 167 checkBackingSize(t, d) 168 169 // Make sure that f is not present in zombie list, because it is not yet a 170 // zombie. 171 require.Equal(t, 0, len(d.mu.versions.zombieTables)) 172 173 // Delete the virtual sstable m1. 174 ve = manifest.VersionEdit{} 175 d1 = manifest.DeletedFileEntry{Level: 0, FileNum: m1.FileNum} 176 ve.DeletedFiles = make(map[manifest.DeletedFileEntry]*manifest.FileMetadata) 177 ve.DeletedFiles[d1] = m1 178 require.NoError(t, applyVE(&ve)) 179 180 // Only one virtual sstable in the latest version, confirm that the latest 181 // version ref counting is correct. 182 require.Equal(t, 1, int(m2.LatestRefs())) 183 require.Equal(t, 0, len(d.mu.versions.zombieTables)) 184 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 185 require.Equal(t, 1, len(d.mu.versions.backingState.fileBackingMap)) 186 _, ok = d.mu.versions.backingState.fileBackingMap[f.FileBacking.DiskFileNum] 187 require.True(t, ok) 188 require.Equal(t, m2.Size, m2.FileBacking.VirtualizedSize.Load()) 189 checkBackingSize(t, d) 190 191 // Move m2 from L0 to L6 to test the move compaction case. 192 ve = manifest.VersionEdit{} 193 d1 = manifest.DeletedFileEntry{Level: 0, FileNum: m2.FileNum} 194 n1 = manifest.NewFileEntry{Level: 6, Meta: m2} 195 ve.DeletedFiles = make(map[manifest.DeletedFileEntry]*manifest.FileMetadata) 196 ve.DeletedFiles[d1] = m2 197 ve.NewFiles = append(ve.NewFiles, n1) 198 require.NoError(t, applyVE(&ve)) 199 checkBackingSize(t, d) 200 201 require.Equal(t, 1, int(m2.LatestRefs())) 202 require.Equal(t, 0, len(d.mu.versions.zombieTables)) 203 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 204 require.Equal(t, 1, len(d.mu.versions.backingState.fileBackingMap)) 205 _, ok = d.mu.versions.backingState.fileBackingMap[f.FileBacking.DiskFileNum] 206 require.True(t, ok) 207 require.Equal(t, m2.Size, m2.FileBacking.VirtualizedSize.Load()) 208 209 // Delete m2 from L6. 210 ve = manifest.VersionEdit{} 211 d1 = manifest.DeletedFileEntry{Level: 6, FileNum: m2.FileNum} 212 ve.DeletedFiles = make(map[manifest.DeletedFileEntry]*manifest.FileMetadata) 213 ve.DeletedFiles[d1] = m2 214 require.NoError(t, applyVE(&ve)) 215 checkBackingSize(t, d) 216 217 // All virtual sstables are gone. 218 require.Equal(t, 0, int(m2.LatestRefs())) 219 require.Equal(t, 1, len(d.mu.versions.zombieTables)) 220 require.Equal(t, f.Size, d.mu.versions.zombieTables[f.FileBacking.DiskFileNum]) 221 require.Equal(t, 0, len(d.mu.versions.backingState.fileBackingMap)) 222 _, ok = d.mu.versions.backingState.fileBackingMap[f.FileBacking.DiskFileNum] 223 require.False(t, ok) 224 require.Equal(t, 0, int(m2.FileBacking.VirtualizedSize.Load())) 225 checkBackingSize(t, d) 226 227 // Make sure that the backing file is added to the obsolete tables list. 228 require.Equal(t, 1, len(d.mu.versions.obsoleteTables)) 229 230 } 231 232 // TODO(bananabrick): Convert TestLatestRefCounting and this test into a single 233 // datadriven test. 234 func TestVirtualSSTableManifestReplay(t *testing.T) { 235 mem := vfs.NewMem() 236 require.NoError(t, mem.MkdirAll("ext", 0755)) 237 238 opts := &Options{ 239 FormatMajorVersion: FormatVirtualSSTables, 240 FS: mem, 241 MaxManifestFileSize: 1, 242 DisableAutomaticCompactions: true, 243 } 244 d, err := Open("", opts) 245 require.NoError(t, err) 246 247 err = d.Set([]byte{'a'}, []byte{'a'}, nil) 248 require.NoError(t, err) 249 err = d.Set([]byte{'b'}, []byte{'b'}, nil) 250 require.NoError(t, err) 251 252 err = d.Flush() 253 require.NoError(t, err) 254 255 iter := d.mu.versions.currentVersion().Levels[0].Iter() 256 var f *fileMetadata = iter.First() 257 require.NotNil(t, f) 258 require.Equal(t, 1, int(f.LatestRefs())) 259 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 260 261 // Grab some new file nums. 262 d.mu.Lock() 263 f1 := d.mu.versions.nextFileNum 264 f2 := f1 + 1 265 d.mu.versions.nextFileNum += 2 266 d.mu.Unlock() 267 268 m1 := &manifest.FileMetadata{ 269 FileBacking: f.FileBacking, 270 FileNum: f1, 271 CreationTime: time.Now().Unix(), 272 Size: f.Size / 2, 273 SmallestSeqNum: f.SmallestSeqNum, 274 LargestSeqNum: f.LargestSeqNum, 275 Smallest: base.MakeInternalKey([]byte{'a'}, f.Smallest.SeqNum(), InternalKeyKindSet), 276 Largest: base.MakeInternalKey([]byte{'a'}, f.Smallest.SeqNum(), InternalKeyKindSet), 277 HasPointKeys: true, 278 Virtual: true, 279 } 280 281 m2 := &manifest.FileMetadata{ 282 FileBacking: f.FileBacking, 283 FileNum: f2, 284 CreationTime: time.Now().Unix(), 285 Size: f.Size - m1.Size, 286 SmallestSeqNum: f.SmallestSeqNum, 287 LargestSeqNum: f.LargestSeqNum, 288 Smallest: base.MakeInternalKey([]byte{'b'}, f.Largest.SeqNum(), InternalKeyKindSet), 289 Largest: base.MakeInternalKey([]byte{'b'}, f.Largest.SeqNum(), InternalKeyKindSet), 290 HasPointKeys: true, 291 Virtual: true, 292 } 293 294 m1.LargestPointKey = m1.Largest 295 m1.SmallestPointKey = m1.Smallest 296 m1.Stats.NumEntries = 1 297 298 m2.LargestPointKey = m2.Largest 299 m2.SmallestPointKey = m2.Smallest 300 m2.Stats.NumEntries = 1 301 302 m1.ValidateVirtual(f) 303 d.checkVirtualBounds(m1) 304 m2.ValidateVirtual(f) 305 d.checkVirtualBounds(m2) 306 307 fileMetrics := func(ve *versionEdit) map[int]*LevelMetrics { 308 metrics := newFileMetrics(ve.NewFiles) 309 for de, f := range ve.DeletedFiles { 310 lm := metrics[de.Level] 311 if lm == nil { 312 lm = &LevelMetrics{} 313 metrics[de.Level] = lm 314 } 315 metrics[de.Level].NumFiles-- 316 metrics[de.Level].Size -= int64(f.Size) 317 } 318 return metrics 319 } 320 321 d.mu.Lock() 322 applyVE := func(ve *versionEdit) error { 323 d.mu.versions.logLock() 324 jobID := d.mu.nextJobID 325 d.mu.nextJobID++ 326 327 err := d.mu.versions.logAndApply(jobID, ve, fileMetrics(ve), false, func() []compactionInfo { 328 return d.getInProgressCompactionInfoLocked(nil) 329 }) 330 d.updateReadStateLocked(nil) 331 return err 332 } 333 334 // Virtualize f. 335 ve := manifest.VersionEdit{} 336 d1 := manifest.DeletedFileEntry{Level: 0, FileNum: f.FileNum} 337 n1 := manifest.NewFileEntry{Level: 0, Meta: m1} 338 n2 := manifest.NewFileEntry{Level: 0, Meta: m2} 339 340 ve.DeletedFiles = make(map[manifest.DeletedFileEntry]*manifest.FileMetadata) 341 ve.DeletedFiles[d1] = f 342 ve.NewFiles = append(ve.NewFiles, n1) 343 ve.NewFiles = append(ve.NewFiles, n2) 344 ve.CreatedBackingTables = append(ve.CreatedBackingTables, f.FileBacking) 345 346 require.NoError(t, applyVE(&ve)) 347 checkBackingSize(t, d) 348 d.mu.Unlock() 349 350 require.Equal(t, 2, int(m1.LatestRefs())) 351 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 352 require.Equal(t, 1, len(d.mu.versions.backingState.fileBackingMap)) 353 _, ok := d.mu.versions.backingState.fileBackingMap[f.FileBacking.DiskFileNum] 354 require.True(t, ok) 355 require.Equal(t, f.Size, m2.FileBacking.VirtualizedSize.Load()) 356 357 // Snapshot version edit will be written to a new manifest due to the flush. 358 d.Set([]byte{'c'}, []byte{'c'}, nil) 359 d.Flush() 360 361 require.NoError(t, d.Close()) 362 d, err = Open("", opts) 363 require.NoError(t, err) 364 365 d.mu.Lock() 366 it := d.mu.versions.currentVersion().Levels[0].Iter() 367 var virtualFile *fileMetadata 368 for f := it.First(); f != nil; f = it.Next() { 369 if f.Virtual { 370 virtualFile = f 371 break 372 } 373 } 374 375 require.Equal(t, 2, int(virtualFile.LatestRefs())) 376 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 377 require.Equal(t, 1, len(d.mu.versions.backingState.fileBackingMap)) 378 _, ok = d.mu.versions.backingState.fileBackingMap[f.FileBacking.DiskFileNum] 379 require.True(t, ok) 380 require.Equal(t, f.Size, virtualFile.FileBacking.VirtualizedSize.Load()) 381 checkBackingSize(t, d) 382 d.mu.Unlock() 383 384 // Will cause the virtual sstables to be deleted, and the file backing should 385 // also be removed. 386 d.Compact([]byte{'a'}, []byte{'z'}, false) 387 388 d.mu.Lock() 389 virtualFile = nil 390 it = d.mu.versions.currentVersion().Levels[0].Iter() 391 for f := it.First(); f != nil; f = it.Next() { 392 if f.Virtual { 393 virtualFile = f 394 break 395 } 396 } 397 require.Nil(t, virtualFile) 398 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 399 require.Equal(t, 0, len(d.mu.versions.backingState.fileBackingMap)) 400 checkBackingSize(t, d) 401 d.mu.Unlock() 402 403 // Close and restart to make sure that the new snapshot written during 404 // compaction doesn't have the file backing. 405 require.NoError(t, d.Close()) 406 d, err = Open("", opts) 407 require.NoError(t, err) 408 409 d.mu.Lock() 410 virtualFile = nil 411 it = d.mu.versions.currentVersion().Levels[0].Iter() 412 for f := it.First(); f != nil; f = it.Next() { 413 if f.Virtual { 414 virtualFile = f 415 break 416 } 417 } 418 require.Nil(t, virtualFile) 419 require.Equal(t, 0, len(d.mu.versions.obsoleteTables)) 420 require.Equal(t, 0, len(d.mu.versions.backingState.fileBackingMap)) 421 checkBackingSize(t, d) 422 d.mu.Unlock() 423 require.NoError(t, d.Close()) 424 } 425 426 func TestVersionSetCheckpoint(t *testing.T) { 427 mem := vfs.NewMem() 428 require.NoError(t, mem.MkdirAll("ext", 0755)) 429 430 opts := &Options{ 431 FS: mem, 432 MaxManifestFileSize: 1, 433 } 434 d, err := Open("", opts) 435 require.NoError(t, err) 436 437 // Multiple manifest files are created such that the latest one must have a correct snapshot 438 // of the preceding state for the DB to be opened correctly and see the written data. 439 // Snapshot has no files, so first edit will cause manifest rotation. 440 writeAndIngest(t, mem, d, base.MakeInternalKey([]byte("a"), 0, InternalKeyKindSet), []byte("b"), "a") 441 // Snapshot has no files, and manifest has an edit from the previous ingest, 442 // so this second ingest will cause manifest rotation. 443 writeAndIngest(t, mem, d, base.MakeInternalKey([]byte("c"), 0, InternalKeyKindSet), []byte("d"), "c") 444 require.NoError(t, d.Close()) 445 d, err = Open("", opts) 446 require.NoError(t, err) 447 checkValue := func(k string, expected string) { 448 v, closer, err := d.Get([]byte(k)) 449 require.NoError(t, err) 450 require.Equal(t, expected, string(v)) 451 closer.Close() 452 } 453 checkValue("a", "b") 454 checkValue("c", "d") 455 require.NoError(t, d.Close()) 456 } 457 458 func TestVersionSetSeqNums(t *testing.T) { 459 mem := vfs.NewMem() 460 require.NoError(t, mem.MkdirAll("ext", 0755)) 461 462 opts := &Options{ 463 FS: mem, 464 MaxManifestFileSize: 1, 465 } 466 d, err := Open("", opts) 467 require.NoError(t, err) 468 469 // Snapshot has no files, so first edit will cause manifest rotation. 470 writeAndIngest(t, mem, d, base.MakeInternalKey([]byte("a"), 0, InternalKeyKindSet), []byte("b"), "a") 471 // Snapshot has no files, and manifest has an edit from the previous ingest, 472 // so this second ingest will cause manifest rotation. 473 writeAndIngest(t, mem, d, base.MakeInternalKey([]byte("c"), 0, InternalKeyKindSet), []byte("d"), "c") 474 require.NoError(t, d.Close()) 475 d, err = Open("", opts) 476 require.NoError(t, err) 477 defer d.Close() 478 d.TestOnlyWaitForCleaning() 479 480 // Check that the manifest has the correct LastSeqNum, equalling the highest 481 // observed SeqNum. 482 filenames, err := mem.List("") 483 require.NoError(t, err) 484 var manifest vfs.File 485 for _, filename := range filenames { 486 fileType, _, ok := base.ParseFilename(mem, filename) 487 if ok && fileType == fileTypeManifest { 488 manifest, err = mem.Open(filename) 489 require.NoError(t, err) 490 } 491 } 492 require.NotNil(t, manifest) 493 defer manifest.Close() 494 rr := record.NewReader(manifest, 0 /* logNum */) 495 lastSeqNum := uint64(0) 496 for { 497 r, err := rr.Next() 498 if err == io.EOF { 499 break 500 } 501 require.NoError(t, err) 502 var ve versionEdit 503 err = ve.Decode(r) 504 require.NoError(t, err) 505 if ve.LastSeqNum != 0 { 506 lastSeqNum = ve.LastSeqNum 507 } 508 } 509 // 2 ingestions happened, so LastSeqNum should equal base.SeqNumStart + 1. 510 require.Equal(t, uint64(11), lastSeqNum) 511 // logSeqNum is always one greater than the last assigned sequence number. 512 require.Equal(t, d.mu.versions.logSeqNum.Load(), lastSeqNum+1) 513 }