github.com/nats-io/nats-server/v2@v2.11.0-preview.2/server/filestore_test.go (about) 1 // Copyright 2019-2024 The NATS Authors 2 // Licensed under the Apache License, Version 2.0 (the "License"); 3 // you may not use this file except in compliance with the License. 4 // You may obtain a copy of the License at 5 // 6 // http://www.apache.org/licenses/LICENSE-2.0 7 // 8 // Unless required by applicable law or agreed to in writing, software 9 // distributed under the License is distributed on an "AS IS" BASIS, 10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 //go:build !skip_store_tests 15 // +build !skip_store_tests 16 17 package server 18 19 import ( 20 "archive/tar" 21 "bytes" 22 "crypto/hmac" 23 crand "crypto/rand" 24 "crypto/sha256" 25 "encoding/hex" 26 "encoding/json" 27 "errors" 28 "fmt" 29 "io" 30 "math/bits" 31 "math/rand" 32 "os" 33 "path/filepath" 34 "reflect" 35 "strings" 36 "sync" 37 "sync/atomic" 38 "testing" 39 "time" 40 41 "github.com/klauspost/compress/s2" 42 ) 43 44 func testFileStoreAllPermutations(t *testing.T, fn func(t *testing.T, fcfg FileStoreConfig)) { 45 for _, fcfg := range []FileStoreConfig{ 46 {Cipher: NoCipher, Compression: NoCompression}, 47 {Cipher: NoCipher, Compression: S2Compression}, 48 {Cipher: AES, Compression: NoCompression}, 49 {Cipher: AES, Compression: S2Compression}, 50 {Cipher: ChaCha, Compression: NoCompression}, 51 {Cipher: ChaCha, Compression: S2Compression}, 52 } { 53 subtestName := fmt.Sprintf("%s-%s", fcfg.Cipher, fcfg.Compression) 54 t.Run(subtestName, func(t *testing.T) { 55 fcfg.StoreDir = t.TempDir() 56 fn(t, fcfg) 57 time.Sleep(100 * time.Millisecond) 58 }) 59 } 60 } 61 62 func prf(fcfg *FileStoreConfig) func(context []byte) ([]byte, error) { 63 if fcfg.Cipher == NoCipher { 64 return nil 65 } 66 return func(context []byte) ([]byte, error) { 67 h := hmac.New(sha256.New, []byte("dlc22")) 68 if _, err := h.Write(context); err != nil { 69 return nil, err 70 } 71 return h.Sum(nil), nil 72 } 73 } 74 75 func TestFileStoreBasics(t *testing.T) { 76 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 77 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 78 require_NoError(t, err) 79 defer fs.Stop() 80 81 subj, msg := "foo", []byte("Hello World") 82 for i := 1; i <= 5; i++ { 83 now := time.Now().UnixNano() 84 if seq, ts, err := fs.StoreMsg(subj, nil, msg); err != nil { 85 t.Fatalf("Error storing msg: %v", err) 86 } else if seq != uint64(i) { 87 t.Fatalf("Expected sequence to be %d, got %d", i, seq) 88 } else if ts < now || ts > now+int64(time.Millisecond) { 89 t.Fatalf("Expected timestamp to be current, got %v", ts-now) 90 } 91 } 92 93 state := fs.State() 94 if state.Msgs != 5 { 95 t.Fatalf("Expected 5 msgs, got %d", state.Msgs) 96 } 97 expectedSize := 5 * fileStoreMsgSize(subj, nil, msg) 98 if state.Bytes != expectedSize { 99 t.Fatalf("Expected %d bytes, got %d", expectedSize, state.Bytes) 100 } 101 102 var smv StoreMsg 103 sm, err := fs.LoadMsg(2, &smv) 104 if err != nil { 105 t.Fatalf("Unexpected error looking up msg: %v", err) 106 } 107 if sm.subj != subj { 108 t.Fatalf("Subjects don't match, original %q vs %q", subj, sm.subj) 109 } 110 if !bytes.Equal(sm.msg, msg) { 111 t.Fatalf("Msgs don't match, original %q vs %q", msg, sm.msg) 112 } 113 _, err = fs.LoadMsg(3, nil) 114 if err != nil { 115 t.Fatalf("Unexpected error looking up msg: %v", err) 116 } 117 118 remove := func(seq, expectedMsgs uint64) { 119 t.Helper() 120 removed, err := fs.RemoveMsg(seq) 121 if err != nil { 122 t.Fatalf("Got an error on remove of %d: %v", seq, err) 123 } 124 if !removed { 125 t.Fatalf("Expected remove to return true for %d", seq) 126 } 127 if state := fs.State(); state.Msgs != expectedMsgs { 128 t.Fatalf("Expected %d msgs, got %d", expectedMsgs, state.Msgs) 129 } 130 } 131 132 // Remove first 133 remove(1, 4) 134 // Remove last 135 remove(5, 3) 136 // Remove a middle 137 remove(3, 2) 138 }) 139 } 140 141 func TestFileStoreMsgHeaders(t *testing.T) { 142 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 143 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 144 require_NoError(t, err) 145 defer fs.Stop() 146 147 subj, hdr, msg := "foo", []byte("name:derek"), []byte("Hello World") 148 elen := 22 + len(subj) + 4 + len(hdr) + len(msg) + 8 149 if sz := int(fileStoreMsgSize(subj, hdr, msg)); sz != elen { 150 t.Fatalf("Wrong size for stored msg with header") 151 } 152 fs.StoreMsg(subj, hdr, msg) 153 var smv StoreMsg 154 sm, err := fs.LoadMsg(1, &smv) 155 if err != nil { 156 t.Fatalf("Unexpected error looking up msg: %v", err) 157 } 158 if !bytes.Equal(msg, sm.msg) { 159 t.Fatalf("Expected same msg, got %q vs %q", sm.msg, msg) 160 } 161 if !bytes.Equal(hdr, sm.hdr) { 162 t.Fatalf("Expected same hdr, got %q vs %q", sm.hdr, hdr) 163 } 164 if removed, _ := fs.EraseMsg(1); !removed { 165 t.Fatalf("Expected erase msg to return success") 166 } 167 }) 168 } 169 170 func TestFileStoreBasicWriteMsgsAndRestore(t *testing.T) { 171 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 172 if _, err := newFileStore(fcfg, StreamConfig{Storage: MemoryStorage}); err == nil { 173 t.Fatalf("Expected an error with wrong type") 174 } 175 if _, err := newFileStore(fcfg, StreamConfig{Storage: FileStorage}); err == nil { 176 t.Fatalf("Expected an error with no name") 177 } 178 179 created := time.Now() 180 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 181 require_NoError(t, err) 182 defer fs.Stop() 183 184 subj := "foo" 185 186 // Write 100 msgs 187 toStore := uint64(100) 188 for i := uint64(1); i <= toStore; i++ { 189 msg := []byte(fmt.Sprintf("[%08d] Hello World!", i)) 190 if seq, _, err := fs.StoreMsg(subj, nil, msg); err != nil { 191 t.Fatalf("Error storing msg: %v", err) 192 } else if seq != uint64(i) { 193 t.Fatalf("Expected sequence to be %d, got %d", i, seq) 194 } 195 } 196 state := fs.State() 197 if state.Msgs != toStore { 198 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 199 } 200 msg22 := []byte(fmt.Sprintf("[%08d] Hello World!", 22)) 201 expectedSize := toStore * fileStoreMsgSize(subj, nil, msg22) 202 203 if state.Bytes != expectedSize { 204 t.Fatalf("Expected %d bytes, got %d", expectedSize, state.Bytes) 205 } 206 // Stop will flush to disk. 207 fs.Stop() 208 209 // Make sure Store call after does not work. 210 if _, _, err := fs.StoreMsg(subj, nil, []byte("no work")); err == nil { 211 t.Fatalf("Expected an error for StoreMsg call after Stop, got none") 212 } 213 214 // Restart 215 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 216 require_NoError(t, err) 217 defer fs.Stop() 218 219 state = fs.State() 220 if state.Msgs != toStore { 221 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 222 } 223 if state.Bytes != expectedSize { 224 t.Fatalf("Expected %d bytes, got %d", expectedSize, state.Bytes) 225 } 226 227 // Now write 100 more msgs 228 for i := uint64(101); i <= toStore*2; i++ { 229 msg := []byte(fmt.Sprintf("[%08d] Hello World!", i)) 230 if seq, _, err := fs.StoreMsg(subj, nil, msg); err != nil { 231 t.Fatalf("Error storing msg: %v", err) 232 } else if seq != uint64(i) { 233 t.Fatalf("Expected sequence to be %d, got %d", i, seq) 234 } 235 } 236 state = fs.State() 237 if state.Msgs != toStore*2 { 238 t.Fatalf("Expected %d msgs, got %d", toStore*2, state.Msgs) 239 } 240 241 // Now cycle again and make sure that last batch was stored. 242 // Stop will flush to disk. 243 fs.Stop() 244 245 // Restart 246 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 247 require_NoError(t, err) 248 defer fs.Stop() 249 250 state = fs.State() 251 if state.Msgs != toStore*2 { 252 t.Fatalf("Expected %d msgs, got %d", toStore*2, state.Msgs) 253 } 254 if state.Bytes != expectedSize*2 { 255 t.Fatalf("Expected %d bytes, got %d", expectedSize*2, state.Bytes) 256 } 257 258 fs.Purge() 259 fs.Stop() 260 261 // Restart 262 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 263 require_NoError(t, err) 264 defer fs.Stop() 265 266 state = fs.State() 267 if state.Msgs != 0 { 268 t.Fatalf("Expected %d msgs, got %d", 0, state.Msgs) 269 } 270 if state.Bytes != 0 { 271 t.Fatalf("Expected %d bytes, got %d", 0, state.Bytes) 272 } 273 274 seq, _, err := fs.StoreMsg(subj, nil, []byte("Hello")) 275 if err != nil { 276 t.Fatalf("Unexpected error: %v", err) 277 } 278 fs.RemoveMsg(seq) 279 280 fs.Stop() 281 282 // Restart 283 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 284 require_NoError(t, err) 285 defer fs.Stop() 286 287 state = fs.State() 288 require_Equal(t, state.FirstSeq, seq+1) 289 }) 290 } 291 292 func TestFileStoreSelectNextFirst(t *testing.T) { 293 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 294 fcfg.BlockSize = 256 295 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 296 require_NoError(t, err) 297 defer fs.Stop() 298 299 numMsgs := 10 300 subj, msg := "zzz", []byte("Hello World") 301 for i := 0; i < numMsgs; i++ { 302 fs.StoreMsg(subj, nil, msg) 303 } 304 if state := fs.State(); state.Msgs != uint64(numMsgs) { 305 t.Fatalf("Expected %d msgs, got %d", numMsgs, state.Msgs) 306 } 307 308 // Note the 256 block size is tied to the msg size below to give us 5 messages per block. 309 if fmb := fs.selectMsgBlock(1); fmb.msgs != 5 { 310 t.Fatalf("Expected 5 messages per block, but got %d", fmb.msgs) 311 } 312 313 // Delete 2-7, this will cross message blocks. 314 for i := 2; i <= 7; i++ { 315 fs.RemoveMsg(uint64(i)) 316 } 317 318 if state := fs.State(); state.Msgs != 4 || state.FirstSeq != 1 { 319 t.Fatalf("Expected 4 msgs, first seq of 11, got msgs of %d and first seq of %d", state.Msgs, state.FirstSeq) 320 } 321 // Now close the gap which will force the system to jump underlying message blocks to find the right sequence. 322 fs.RemoveMsg(1) 323 if state := fs.State(); state.Msgs != 3 || state.FirstSeq != 8 { 324 t.Fatalf("Expected 3 msgs, first seq of 8, got msgs of %d and first seq of %d", state.Msgs, state.FirstSeq) 325 } 326 }) 327 } 328 329 func TestFileStoreSkipMsg(t *testing.T) { 330 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 331 fcfg.BlockSize = 256 332 created := time.Now() 333 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 334 require_NoError(t, err) 335 defer fs.Stop() 336 337 numSkips := 10 338 for i := 0; i < numSkips; i++ { 339 fs.SkipMsg() 340 } 341 state := fs.State() 342 if state.Msgs != 0 { 343 t.Fatalf("Expected %d msgs, got %d", 0, state.Msgs) 344 } 345 if state.FirstSeq != uint64(numSkips+1) || state.LastSeq != uint64(numSkips) { 346 t.Fatalf("Expected first to be %d and last to be %d. got first %d and last %d", numSkips+1, numSkips, state.FirstSeq, state.LastSeq) 347 } 348 349 fs.StoreMsg("zzz", nil, []byte("Hello World!")) 350 fs.SkipMsg() 351 fs.SkipMsg() 352 fs.StoreMsg("zzz", nil, []byte("Hello World!")) 353 fs.SkipMsg() 354 355 state = fs.State() 356 if state.Msgs != 2 { 357 t.Fatalf("Expected %d msgs, got %d", 2, state.Msgs) 358 } 359 if state.FirstSeq != uint64(numSkips+1) || state.LastSeq != uint64(numSkips+5) { 360 t.Fatalf("Expected first to be %d and last to be %d. got first %d and last %d", numSkips+1, numSkips+5, state.FirstSeq, state.LastSeq) 361 } 362 363 // Make sure we recover same state. 364 fs.Stop() 365 366 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 367 require_NoError(t, err) 368 defer fs.Stop() 369 370 state = fs.State() 371 if state.Msgs != 2 { 372 t.Fatalf("Expected %d msgs, got %d", 2, state.Msgs) 373 } 374 if state.FirstSeq != uint64(numSkips+1) || state.LastSeq != uint64(numSkips+5) { 375 t.Fatalf("Expected first to be %d and last to be %d. got first %d and last %d", numSkips+1, numSkips+5, state.FirstSeq, state.LastSeq) 376 } 377 378 var smv StoreMsg 379 sm, err := fs.LoadMsg(11, &smv) 380 if err != nil { 381 t.Fatalf("Unexpected error looking up seq 11: %v", err) 382 } 383 if sm.subj != "zzz" || string(sm.msg) != "Hello World!" { 384 t.Fatalf("Message did not match") 385 } 386 387 fs.SkipMsg() 388 nseq, _, err := fs.StoreMsg("AAA", nil, []byte("Skip?")) 389 if err != nil { 390 t.Fatalf("Unexpected error looking up seq 11: %v", err) 391 } 392 if nseq != 17 { 393 t.Fatalf("Expected seq of %d but got %d", 17, nseq) 394 } 395 396 // Make sure we recover same state. 397 fs.Stop() 398 399 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 400 require_NoError(t, err) 401 defer fs.Stop() 402 403 sm, err = fs.LoadMsg(nseq, &smv) 404 if err != nil { 405 t.Fatalf("Unexpected error looking up seq %d: %v", nseq, err) 406 } 407 if sm.subj != "AAA" || string(sm.msg) != "Skip?" { 408 t.Fatalf("Message did not match") 409 } 410 }) 411 } 412 413 func TestFileStoreWriteExpireWrite(t *testing.T) { 414 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 415 cexp := 10 * time.Millisecond 416 fcfg.CacheExpire = cexp 417 created := time.Now() 418 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 419 require_NoError(t, err) 420 defer fs.Stop() 421 422 toSend := 10 423 for i := 0; i < toSend; i++ { 424 fs.StoreMsg("zzz", nil, []byte("Hello World!")) 425 } 426 427 // Wait for write cache portion to go to zero. 428 checkFor(t, time.Second, 20*time.Millisecond, func() error { 429 if csz := fs.cacheSize(); csz != 0 { 430 return fmt.Errorf("cache size not 0, got %s", friendlyBytes(int64(csz))) 431 } 432 return nil 433 }) 434 435 for i := 0; i < toSend; i++ { 436 fs.StoreMsg("zzz", nil, []byte("Hello World! - 22")) 437 } 438 439 if state := fs.State(); state.Msgs != uint64(toSend*2) { 440 t.Fatalf("Expected %d msgs, got %d", toSend*2, state.Msgs) 441 } 442 443 // Make sure we recover same state. 444 fs.Stop() 445 446 fcfg.CacheExpire = 0 447 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 448 require_NoError(t, err) 449 defer fs.Stop() 450 451 if state := fs.State(); state.Msgs != uint64(toSend*2) { 452 t.Fatalf("Expected %d msgs, got %d", toSend*2, state.Msgs) 453 } 454 455 // Now load them in and check. 456 var smv StoreMsg 457 for i := 1; i <= toSend*2; i++ { 458 sm, err := fs.LoadMsg(uint64(i), &smv) 459 if err != nil { 460 t.Fatalf("Unexpected error looking up seq %d: %v", i, err) 461 } 462 str := "Hello World!" 463 if i > toSend { 464 str = "Hello World! - 22" 465 } 466 if sm.subj != "zzz" || string(sm.msg) != str { 467 t.Fatalf("Message did not match") 468 } 469 } 470 }) 471 } 472 473 func TestFileStoreMsgLimit(t *testing.T) { 474 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 475 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage, MaxMsgs: 10}, time.Now(), prf(&fcfg), nil) 476 require_NoError(t, err) 477 defer fs.Stop() 478 479 subj, msg := "foo", []byte("Hello World") 480 for i := 0; i < 10; i++ { 481 fs.StoreMsg(subj, nil, msg) 482 } 483 state := fs.State() 484 if state.Msgs != 10 { 485 t.Fatalf("Expected %d msgs, got %d", 10, state.Msgs) 486 } 487 if _, _, err := fs.StoreMsg(subj, nil, msg); err != nil { 488 t.Fatalf("Error storing msg: %v", err) 489 } 490 state = fs.State() 491 if state.Msgs != 10 { 492 t.Fatalf("Expected %d msgs, got %d", 10, state.Msgs) 493 } 494 if state.LastSeq != 11 { 495 t.Fatalf("Expected the last sequence to be 11 now, but got %d", state.LastSeq) 496 } 497 if state.FirstSeq != 2 { 498 t.Fatalf("Expected the first sequence to be 2 now, but got %d", state.FirstSeq) 499 } 500 // Make sure we can not lookup seq 1. 501 if _, err := fs.LoadMsg(1, nil); err == nil { 502 t.Fatalf("Expected error looking up seq 1 but got none") 503 } 504 }) 505 } 506 507 func TestFileStoreMsgLimitBug(t *testing.T) { 508 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 509 created := time.Now() 510 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage, MaxMsgs: 1}, created, prf(&fcfg), nil) 511 require_NoError(t, err) 512 defer fs.Stop() 513 514 subj, msg := "foo", []byte("Hello World") 515 fs.StoreMsg(subj, nil, msg) 516 fs.StoreMsg(subj, nil, msg) 517 fs.Stop() 518 519 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage, MaxMsgs: 1}, created, prf(&fcfg), nil) 520 require_NoError(t, err) 521 defer fs.Stop() 522 fs.StoreMsg(subj, nil, msg) 523 }) 524 } 525 526 func TestFileStoreBytesLimit(t *testing.T) { 527 subj, msg := "foo", make([]byte, 512) 528 storedMsgSize := fileStoreMsgSize(subj, nil, msg) 529 530 toStore := uint64(1024) 531 maxBytes := storedMsgSize * toStore 532 533 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 534 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage, MaxBytes: int64(maxBytes)}, time.Now(), prf(&fcfg), nil) 535 require_NoError(t, err) 536 defer fs.Stop() 537 538 for i := uint64(0); i < toStore; i++ { 539 fs.StoreMsg(subj, nil, msg) 540 } 541 state := fs.State() 542 if state.Msgs != toStore { 543 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 544 } 545 if state.Bytes != storedMsgSize*toStore { 546 t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, state.Bytes) 547 } 548 549 // Now send 10 more and check that bytes limit enforced. 550 for i := 0; i < 10; i++ { 551 if _, _, err := fs.StoreMsg(subj, nil, msg); err != nil { 552 t.Fatalf("Error storing msg: %v", err) 553 } 554 } 555 state = fs.State() 556 if state.Msgs != toStore { 557 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 558 } 559 if state.Bytes != storedMsgSize*toStore { 560 t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, state.Bytes) 561 } 562 if state.FirstSeq != 11 { 563 t.Fatalf("Expected first sequence to be 11, got %d", state.FirstSeq) 564 } 565 if state.LastSeq != toStore+10 { 566 t.Fatalf("Expected last sequence to be %d, got %d", toStore+10, state.LastSeq) 567 } 568 }) 569 } 570 571 // https://github.com/nats-io/nats-server/issues/4771 572 func TestFileStoreBytesLimitWithDiscardNew(t *testing.T) { 573 subj, msg := "tiny", make([]byte, 7) 574 storedMsgSize := fileStoreMsgSize(subj, nil, msg) 575 576 toStore := uint64(2) 577 maxBytes := 100 578 579 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 580 cfg := StreamConfig{Name: "zzz", Storage: FileStorage, MaxBytes: int64(maxBytes), Discard: DiscardNew} 581 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 582 require_NoError(t, err) 583 defer fs.Stop() 584 585 for i := 0; i < 10; i++ { 586 _, _, err := fs.StoreMsg(subj, nil, msg) 587 if i < int(toStore) { 588 if err != nil { 589 t.Fatalf("Error storing msg: %v", err) 590 } 591 } else if !errors.Is(err, ErrMaxBytes) { 592 t.Fatalf("Storing msg should result in: %v", ErrMaxBytes) 593 } 594 } 595 state := fs.State() 596 if state.Msgs != toStore { 597 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 598 } 599 if state.Bytes != storedMsgSize*toStore { 600 t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, state.Bytes) 601 } 602 }) 603 } 604 605 func TestFileStoreAgeLimit(t *testing.T) { 606 maxAge := 1 * time.Second 607 608 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 609 if fcfg.Compression != NoCompression { 610 // TODO(nat): This test fails at the moment with compression enabled 611 // because it takes longer to compress the blocks, by which time the 612 // messages have expired. Need to think about a balanced age so that 613 // the test doesn't take too long in non-compressed cases. 614 t.SkipNow() 615 } 616 617 fcfg.BlockSize = 256 618 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage, MaxAge: maxAge}, time.Now(), prf(&fcfg), nil) 619 require_NoError(t, err) 620 defer fs.Stop() 621 622 // Store some messages. Does not really matter how many. 623 subj, msg := "foo", []byte("Hello World") 624 toStore := 500 625 for i := 0; i < toStore; i++ { 626 if _, _, err := fs.StoreMsg(subj, nil, msg); err != nil { 627 t.Fatalf("Unexpected error: %v", err) 628 } 629 } 630 state := fs.State() 631 if state.Msgs != uint64(toStore) { 632 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 633 } 634 checkExpired := func(t *testing.T) { 635 t.Helper() 636 checkFor(t, 5*time.Second, maxAge, func() error { 637 state = fs.State() 638 if state.Msgs != 0 { 639 return fmt.Errorf("Expected no msgs, got %d", state.Msgs) 640 } 641 if state.Bytes != 0 { 642 return fmt.Errorf("Expected no bytes, got %d", state.Bytes) 643 } 644 return nil 645 }) 646 } 647 // Let them expire 648 checkExpired(t) 649 650 // Now add some more and make sure that timer will fire again. 651 for i := 0; i < toStore; i++ { 652 fs.StoreMsg(subj, nil, msg) 653 } 654 state = fs.State() 655 if state.Msgs != uint64(toStore) { 656 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 657 } 658 fs.RemoveMsg(502) 659 fs.RemoveMsg(602) 660 fs.RemoveMsg(702) 661 fs.RemoveMsg(802) 662 // We will measure the time to make sure expires works with interior deletes. 663 start := time.Now() 664 checkExpired(t) 665 if elapsed := time.Since(start); elapsed > 5*time.Second { 666 t.Fatalf("Took too long to expire: %v", elapsed) 667 } 668 }) 669 } 670 671 func TestFileStoreTimeStamps(t *testing.T) { 672 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 673 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 674 require_NoError(t, err) 675 defer fs.Stop() 676 677 last := time.Now().UnixNano() 678 subj, msg := "foo", []byte("Hello World") 679 for i := 0; i < 10; i++ { 680 time.Sleep(5 * time.Millisecond) 681 fs.StoreMsg(subj, nil, msg) 682 } 683 var smv StoreMsg 684 for seq := uint64(1); seq <= 10; seq++ { 685 sm, err := fs.LoadMsg(seq, &smv) 686 if err != nil { 687 t.Fatalf("Unexpected error looking up msg [%d]: %v", seq, err) 688 } 689 // These should be different 690 if sm.ts <= last { 691 t.Fatalf("Expected different timestamps, got last %v vs %v", last, sm.ts) 692 } 693 last = sm.ts 694 } 695 }) 696 } 697 698 func TestFileStorePurge(t *testing.T) { 699 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 700 blkSize := uint64(64 * 1024) 701 fcfg.BlockSize = blkSize 702 created := time.Now() 703 704 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 705 require_NoError(t, err) 706 defer fs.Stop() 707 708 subj, msg := "foo", make([]byte, 8*1024) 709 storedMsgSize := fileStoreMsgSize(subj, nil, msg) 710 711 toStore := uint64(1024) 712 for i := uint64(0); i < toStore; i++ { 713 fs.StoreMsg(subj, nil, msg) 714 } 715 state := fs.State() 716 if state.Msgs != toStore { 717 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 718 } 719 if state.Bytes != storedMsgSize*toStore { 720 t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, state.Bytes) 721 } 722 723 expectedBlocks := int(storedMsgSize * toStore / blkSize) 724 if numBlocks := fs.numMsgBlocks(); numBlocks <= expectedBlocks { 725 t.Fatalf("Expected to have more then %d msg blocks, got %d", blkSize, numBlocks) 726 } 727 728 fs.Purge() 729 730 if numBlocks := fs.numMsgBlocks(); numBlocks != 1 { 731 t.Fatalf("Expected to have exactly 1 empty msg block, got %d", numBlocks) 732 } 733 734 checkPurgeState := func(stored uint64) { 735 t.Helper() 736 state = fs.State() 737 if state.Msgs != 0 { 738 t.Fatalf("Expected 0 msgs after purge, got %d", state.Msgs) 739 } 740 if state.Bytes != 0 { 741 t.Fatalf("Expected 0 bytes after purge, got %d", state.Bytes) 742 } 743 if state.LastSeq != stored { 744 t.Fatalf("Expected LastSeq to be %d., got %d", toStore, state.LastSeq) 745 } 746 if state.FirstSeq != stored+1 { 747 t.Fatalf("Expected FirstSeq to be %d., got %d", toStore+1, state.FirstSeq) 748 } 749 } 750 checkPurgeState(toStore) 751 752 // Make sure we recover same state. 753 fs.Stop() 754 755 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 756 require_NoError(t, err) 757 defer fs.Stop() 758 759 if numBlocks := fs.numMsgBlocks(); numBlocks != 1 { 760 t.Fatalf("Expected to have exactly 1 empty msg block, got %d", numBlocks) 761 } 762 763 checkPurgeState(toStore) 764 765 // Now make sure we clean up any dangling purged messages. 766 for i := uint64(0); i < toStore; i++ { 767 fs.StoreMsg(subj, nil, msg) 768 } 769 state = fs.State() 770 if state.Msgs != toStore { 771 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 772 } 773 if state.Bytes != storedMsgSize*toStore { 774 t.Fatalf("Expected bytes to be %d, got %d", storedMsgSize*toStore, state.Bytes) 775 } 776 777 // We will simulate crashing before the purge directory is cleared. 778 mdir := filepath.Join(fs.fcfg.StoreDir, msgDir) 779 pdir := filepath.Join(fs.fcfg.StoreDir, "ptest") 780 os.Rename(mdir, pdir) 781 os.MkdirAll(mdir, 0755) 782 783 fs.Purge() 784 checkPurgeState(toStore * 2) 785 786 // Make sure we recover same state. 787 fs.Stop() 788 789 purgeDir := filepath.Join(fs.fcfg.StoreDir, purgeDir) 790 os.Rename(pdir, purgeDir) 791 792 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 793 require_NoError(t, err) 794 defer fs.Stop() 795 796 if numBlocks := fs.numMsgBlocks(); numBlocks != 1 { 797 t.Fatalf("Expected to have exactly 1 empty msg block, got %d", numBlocks) 798 } 799 800 checkPurgeState(toStore * 2) 801 802 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 803 if _, err := os.Stat(purgeDir); err == nil { 804 return fmt.Errorf("purge directory still present") 805 } 806 return nil 807 }) 808 }) 809 } 810 811 func TestFileStoreCompact(t *testing.T) { 812 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 813 fcfg.BlockSize = 350 814 created := time.Now() 815 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 816 require_NoError(t, err) 817 defer fs.Stop() 818 819 subj, msg := "foo", []byte("Hello World") 820 for i := 0; i < 10; i++ { 821 fs.StoreMsg(subj, nil, msg) 822 } 823 if state := fs.State(); state.Msgs != 10 { 824 t.Fatalf("Expected 10 msgs, got %d", state.Msgs) 825 } 826 n, err := fs.Compact(6) 827 if err != nil { 828 t.Fatalf("Unexpected error: %v", err) 829 } 830 if n != 5 { 831 t.Fatalf("Expected to have purged 5 msgs, got %d", n) 832 } 833 state := fs.State() 834 if state.Msgs != 5 { 835 t.Fatalf("Expected 5 msgs, got %d", state.Msgs) 836 } 837 if state.FirstSeq != 6 { 838 t.Fatalf("Expected first seq of 6, got %d", state.FirstSeq) 839 } 840 // Now test that compact will also reset first if seq > last 841 n, err = fs.Compact(100) 842 if err != nil { 843 t.Fatalf("Unexpected error: %v", err) 844 } 845 if n != 5 { 846 t.Fatalf("Expected to have purged 5 msgs, got %d", n) 847 } 848 if state = fs.State(); state.FirstSeq != 100 { 849 t.Fatalf("Expected first seq of 100, got %d", state.FirstSeq) 850 } 851 852 fs.Stop() 853 854 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 855 require_NoError(t, err) 856 defer fs.Stop() 857 858 if state = fs.State(); state.FirstSeq != 100 { 859 t.Fatalf("Expected first seq of 100, got %d", state.FirstSeq) 860 } 861 }) 862 } 863 864 func TestFileStoreCompactLastPlusOne(t *testing.T) { 865 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 866 fcfg.BlockSize = 8192 867 fcfg.AsyncFlush = true 868 869 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 870 require_NoError(t, err) 871 defer fs.Stop() 872 873 subj, msg := "foo", make([]byte, 10_000) 874 for i := 0; i < 10_000; i++ { 875 if _, _, err := fs.StoreMsg(subj, nil, msg); err != nil { 876 t.Fatalf("Unexpected error: %v", err) 877 } 878 } 879 880 // The performance of this test is quite terrible with compression 881 // if we have AsyncFlush = false, so we'll batch flushes instead. 882 fs.mu.Lock() 883 fs.checkAndFlushAllBlocks() 884 fs.mu.Unlock() 885 886 if state := fs.State(); state.Msgs != 10_000 { 887 t.Fatalf("Expected 1000000 msgs, got %d", state.Msgs) 888 } 889 if _, err := fs.Compact(10_001); err != nil { 890 t.Fatalf("Unexpected error: %v", err) 891 } 892 state := fs.State() 893 if state.Msgs != 0 { 894 t.Fatalf("Expected no message but got %d", state.Msgs) 895 } 896 897 fs.StoreMsg(subj, nil, msg) 898 state = fs.State() 899 if state.Msgs != 1 { 900 t.Fatalf("Expected one message but got %d", state.Msgs) 901 } 902 }) 903 } 904 905 func TestFileStoreCompactMsgCountBug(t *testing.T) { 906 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 907 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 908 require_NoError(t, err) 909 defer fs.Stop() 910 911 subj, msg := "foo", []byte("Hello World") 912 for i := 0; i < 10; i++ { 913 fs.StoreMsg(subj, nil, msg) 914 } 915 if state := fs.State(); state.Msgs != 10 { 916 t.Fatalf("Expected 10 msgs, got %d", state.Msgs) 917 } 918 // Now delete 2,3,4. 919 fs.EraseMsg(2) 920 fs.EraseMsg(3) 921 fs.EraseMsg(4) 922 923 // Also delete 7,8, and 9. 924 fs.RemoveMsg(7) 925 fs.RemoveMsg(8) 926 fs.RemoveMsg(9) 927 928 n, err := fs.Compact(6) 929 if err != nil { 930 t.Fatalf("Unexpected error: %v", err) 931 } 932 // 1 & 5 933 if n != 2 { 934 t.Fatalf("Expected to have deleted 2 msgs, got %d", n) 935 } 936 if state := fs.State(); state.Msgs != 2 { 937 t.Fatalf("Expected to have 2 remaining, got %d", state.Msgs) 938 } 939 }) 940 } 941 942 func TestFileStoreCompactPerf(t *testing.T) { 943 t.SkipNow() 944 945 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 946 fcfg.BlockSize = 8192 947 fcfg.AsyncFlush = true 948 949 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 950 require_NoError(t, err) 951 defer fs.Stop() 952 953 subj, msg := "foo", []byte("Hello World") 954 for i := 0; i < 100_000; i++ { 955 fs.StoreMsg(subj, nil, msg) 956 } 957 if state := fs.State(); state.Msgs != 100_000 { 958 t.Fatalf("Expected 1000000 msgs, got %d", state.Msgs) 959 } 960 start := time.Now() 961 n, err := fs.Compact(90_001) 962 if err != nil { 963 t.Fatalf("Unexpected error: %v", err) 964 } 965 t.Logf("Took %v to compact\n", time.Since(start)) 966 967 if n != 90_000 { 968 t.Fatalf("Expected to have purged 90_000 msgs, got %d", n) 969 } 970 state := fs.State() 971 if state.Msgs != 10_000 { 972 t.Fatalf("Expected 10_000 msgs, got %d", state.Msgs) 973 } 974 if state.FirstSeq != 90_001 { 975 t.Fatalf("Expected first seq of 90_001, got %d", state.FirstSeq) 976 } 977 }) 978 } 979 980 func TestFileStoreStreamTruncate(t *testing.T) { 981 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 982 fcfg.BlockSize = 350 983 cfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage} 984 created := time.Now() 985 986 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 987 require_NoError(t, err) 988 defer fs.Stop() 989 990 tseq := uint64(50) 991 992 subj, toStore := "foo", uint64(100) 993 for i := uint64(1); i < tseq; i++ { 994 _, _, err := fs.StoreMsg(subj, nil, []byte("ok")) 995 require_NoError(t, err) 996 } 997 subj = "bar" 998 for i := tseq; i <= toStore; i++ { 999 _, _, err := fs.StoreMsg(subj, nil, []byte("ok")) 1000 require_NoError(t, err) 1001 } 1002 1003 if state := fs.State(); state.Msgs != toStore { 1004 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 1005 } 1006 1007 // Check that sequence has to be interior. 1008 if err := fs.Truncate(toStore + 1); err != ErrInvalidSequence { 1009 t.Fatalf("Expected err of '%v', got '%v'", ErrInvalidSequence, err) 1010 } 1011 1012 if err := fs.Truncate(tseq); err != nil { 1013 t.Fatalf("Unexpected error: %v", err) 1014 } 1015 if state := fs.State(); state.Msgs != tseq { 1016 t.Fatalf("Expected %d msgs, got %d", tseq, state.Msgs) 1017 } 1018 1019 // Now make sure we report properly if we have some deleted interior messages. 1020 fs.RemoveMsg(10) 1021 fs.RemoveMsg(20) 1022 fs.RemoveMsg(30) 1023 fs.RemoveMsg(40) 1024 1025 tseq = uint64(25) 1026 if err := fs.Truncate(tseq); err != nil { 1027 t.Fatalf("Unexpected error: %v", err) 1028 } 1029 state := fs.State() 1030 if state.Msgs != tseq-2 { 1031 t.Fatalf("Expected %d msgs, got %d", tseq-2, state.Msgs) 1032 } 1033 expected := []uint64{10, 20} 1034 if !reflect.DeepEqual(state.Deleted, expected) { 1035 t.Fatalf("Expected deleted to be %+v, got %+v\n", expected, state.Deleted) 1036 } 1037 1038 before := state 1039 1040 // Make sure we can recover same state. 1041 fs.Stop() 1042 1043 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 1044 require_NoError(t, err) 1045 defer fs.Stop() 1046 1047 if state := fs.State(); !reflect.DeepEqual(state, before) { 1048 t.Fatalf("Expected state of %+v, got %+v", before, state) 1049 } 1050 1051 mb := fs.getFirstBlock() 1052 require_True(t, mb != nil) 1053 require_NoError(t, mb.loadMsgs()) 1054 }) 1055 } 1056 1057 func TestFileStoreRemovePartialRecovery(t *testing.T) { 1058 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 1059 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage} 1060 created := time.Now() 1061 1062 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 1063 require_NoError(t, err) 1064 defer fs.Stop() 1065 1066 subj, msg := "foo", []byte("Hello World") 1067 toStore := 100 1068 for i := 0; i < toStore; i++ { 1069 fs.StoreMsg(subj, nil, msg) 1070 } 1071 state := fs.State() 1072 if state.Msgs != uint64(toStore) { 1073 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 1074 } 1075 1076 // Remove half 1077 for i := 1; i <= toStore/2; i++ { 1078 fs.RemoveMsg(uint64(i)) 1079 } 1080 1081 state = fs.State() 1082 if state.Msgs != uint64(toStore/2) { 1083 t.Fatalf("Expected %d msgs, got %d", toStore/2, state.Msgs) 1084 } 1085 1086 // Make sure we recover same state. 1087 fs.Stop() 1088 1089 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 1090 require_NoError(t, err) 1091 defer fs.Stop() 1092 1093 state2 := fs.State() 1094 if !reflect.DeepEqual(state2, state) { 1095 t.Fatalf("Expected recovered state to be the same, got %+v vs %+v\n", state2, state) 1096 } 1097 }) 1098 } 1099 1100 func TestFileStoreRemoveOutOfOrderRecovery(t *testing.T) { 1101 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 1102 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage} 1103 created := time.Now() 1104 1105 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 1106 require_NoError(t, err) 1107 defer fs.Stop() 1108 1109 subj, msg := "foo", []byte("Hello World") 1110 toStore := 100 1111 for i := 0; i < toStore; i++ { 1112 fs.StoreMsg(subj, nil, msg) 1113 } 1114 state := fs.State() 1115 if state.Msgs != uint64(toStore) { 1116 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 1117 } 1118 1119 // Remove evens 1120 for i := 2; i <= toStore; i += 2 { 1121 if removed, _ := fs.RemoveMsg(uint64(i)); !removed { 1122 t.Fatalf("Expected remove to return true") 1123 } 1124 } 1125 1126 state = fs.State() 1127 if state.Msgs != uint64(toStore/2) { 1128 t.Fatalf("Expected %d msgs, got %d", toStore/2, state.Msgs) 1129 } 1130 1131 var smv StoreMsg 1132 if _, err := fs.LoadMsg(1, &smv); err != nil { 1133 t.Fatalf("Expected to retrieve seq 1") 1134 } 1135 for i := 2; i <= toStore; i += 2 { 1136 if _, err := fs.LoadMsg(uint64(i), &smv); err == nil { 1137 t.Fatalf("Expected error looking up seq %d that should be deleted", i) 1138 } 1139 } 1140 1141 // Make sure we recover same state. 1142 fs.Stop() 1143 1144 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 1145 require_NoError(t, err) 1146 defer fs.Stop() 1147 1148 state2 := fs.State() 1149 if !reflect.DeepEqual(state2, state) { 1150 t.Fatalf("Expected recovered states to be the same, got %+v vs %+v\n", state, state2) 1151 } 1152 1153 if _, err := fs.LoadMsg(1, &smv); err != nil { 1154 t.Fatalf("Expected to retrieve seq 1") 1155 } 1156 for i := 2; i <= toStore; i += 2 { 1157 if _, err := fs.LoadMsg(uint64(i), nil); err == nil { 1158 t.Fatalf("Expected error looking up seq %d that should be deleted", i) 1159 } 1160 } 1161 }) 1162 } 1163 1164 func TestFileStoreAgeLimitRecovery(t *testing.T) { 1165 maxAge := 1 * time.Second 1166 1167 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 1168 fcfg.CacheExpire = 1 * time.Millisecond 1169 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage, MaxAge: maxAge} 1170 created := time.Now() 1171 1172 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 1173 require_NoError(t, err) 1174 defer fs.Stop() 1175 1176 // Store some messages. Does not really matter how many. 1177 subj, msg := "foo", []byte("Hello World") 1178 toStore := 100 1179 for i := 0; i < toStore; i++ { 1180 fs.StoreMsg(subj, nil, msg) 1181 } 1182 state := fs.State() 1183 if state.Msgs != uint64(toStore) { 1184 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 1185 } 1186 fs.Stop() 1187 1188 time.Sleep(maxAge) 1189 1190 fcfg.CacheExpire = 0 1191 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 1192 require_NoError(t, err) 1193 defer fs.Stop() 1194 1195 // Make sure they expire. 1196 checkFor(t, time.Second, 2*maxAge, func() error { 1197 t.Helper() 1198 state = fs.State() 1199 if state.Msgs != 0 { 1200 return fmt.Errorf("Expected no msgs, got %d", state.Msgs) 1201 } 1202 if state.Bytes != 0 { 1203 return fmt.Errorf("Expected no bytes, got %d", state.Bytes) 1204 } 1205 return nil 1206 }) 1207 }) 1208 } 1209 1210 func TestFileStoreBitRot(t *testing.T) { 1211 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 1212 created := time.Now() 1213 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 1214 require_NoError(t, err) 1215 defer fs.Stop() 1216 1217 // Store some messages. Does not really matter how many. 1218 subj, msg := "foo", []byte("Hello World") 1219 toStore := 100 1220 for i := 0; i < toStore; i++ { 1221 fs.StoreMsg(subj, nil, msg) 1222 } 1223 state := fs.State() 1224 if state.Msgs != uint64(toStore) { 1225 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 1226 } 1227 1228 if ld := fs.checkMsgs(); ld != nil && len(ld.Msgs) > 0 { 1229 t.Fatalf("Expected to have no corrupt msgs, got %d", len(ld.Msgs)) 1230 } 1231 1232 for i := 0; i < 10; i++ { 1233 // Now twiddle some bits. 1234 fs.mu.Lock() 1235 lmb := fs.lmb 1236 contents, err := os.ReadFile(lmb.mfn) 1237 require_NoError(t, err) 1238 require_True(t, len(contents) > 0) 1239 1240 var index int 1241 for { 1242 index = rand.Intn(len(contents)) 1243 // Reverse one byte anywhere. 1244 b := contents[index] 1245 contents[index] = bits.Reverse8(b) 1246 if b != contents[index] { 1247 break 1248 } 1249 } 1250 os.WriteFile(lmb.mfn, contents, 0644) 1251 fs.mu.Unlock() 1252 1253 ld := fs.checkMsgs() 1254 if len(ld.Msgs) > 0 { 1255 break 1256 } 1257 // If our bitrot caused us to not be able to recover any messages we can break as well. 1258 if state := fs.State(); state.Msgs == 0 { 1259 break 1260 } 1261 // Fail the test if we have tried the 10 times and still did not 1262 // get any corruption report. 1263 if i == 9 { 1264 t.Fatalf("Expected to have corrupt msgs got none: changed [%d]", index) 1265 } 1266 } 1267 1268 // Make sure we can restore. 1269 fs.Stop() 1270 1271 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 1272 require_NoError(t, err) 1273 defer fs.Stop() 1274 1275 // checkMsgs will repair the underlying store, so checkMsgs should be clean now. 1276 if ld := fs.checkMsgs(); ld != nil { 1277 // If we have no msgs left this will report the head msgs as lost again. 1278 if state := fs.State(); state.Msgs > 0 { 1279 t.Fatalf("Expected no errors restoring checked and fixed filestore, got %+v", ld) 1280 } 1281 } 1282 }) 1283 } 1284 1285 func TestFileStoreEraseMsg(t *testing.T) { 1286 // Just do no encryption, etc. 1287 fcfg := FileStoreConfig{StoreDir: t.TempDir()} 1288 fs, err := newFileStore(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}) 1289 require_NoError(t, err) 1290 defer fs.Stop() 1291 1292 subj, msg := "foo", []byte("Hello World") 1293 fs.StoreMsg(subj, nil, msg) 1294 fs.StoreMsg(subj, nil, msg) // To keep block from being deleted. 1295 var smv StoreMsg 1296 sm, err := fs.LoadMsg(1, &smv) 1297 if err != nil { 1298 t.Fatalf("Unexpected error looking up msg: %v", err) 1299 } 1300 if !bytes.Equal(msg, sm.msg) { 1301 t.Fatalf("Expected same msg, got %q vs %q", sm.msg, msg) 1302 } 1303 if removed, _ := fs.EraseMsg(1); !removed { 1304 t.Fatalf("Expected erase msg to return success") 1305 } 1306 if sm2, _ := fs.msgForSeq(1, nil); sm2 != nil { 1307 t.Fatalf("Expected msg to be erased") 1308 } 1309 fs.checkAndFlushAllBlocks() 1310 1311 // Now look on disk as well. 1312 rl := fileStoreMsgSize(subj, nil, msg) 1313 buf := make([]byte, rl) 1314 fp, err := os.Open(filepath.Join(fcfg.StoreDir, msgDir, fmt.Sprintf(blkScan, 1))) 1315 if err != nil { 1316 t.Fatalf("Error opening msg block file: %v", err) 1317 } 1318 defer fp.Close() 1319 1320 fp.ReadAt(buf, 0) 1321 fs.mu.RLock() 1322 mb := fs.blks[0] 1323 fs.mu.RUnlock() 1324 mb.mu.Lock() 1325 sm, err = mb.msgFromBuf(buf, nil, nil) 1326 mb.mu.Unlock() 1327 if err != nil { 1328 t.Fatalf("error reading message from block: %v", err) 1329 } 1330 if sm.subj == subj { 1331 t.Fatalf("Expected the subjects to be different") 1332 } 1333 if sm.seq != 0 && sm.seq&ebit == 0 { 1334 t.Fatalf("Expected seq to be 0, marking as deleted, got %d", sm.seq) 1335 } 1336 if sm.ts != 0 { 1337 t.Fatalf("Expected timestamp to be 0, got %d", sm.ts) 1338 } 1339 if bytes.Equal(sm.msg, msg) { 1340 t.Fatalf("Expected message body to be randomized") 1341 } 1342 } 1343 1344 func TestFileStoreEraseAndNoIndexRecovery(t *testing.T) { 1345 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 1346 created := time.Now() 1347 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 1348 require_NoError(t, err) 1349 defer fs.Stop() 1350 1351 subj, msg := "foo", []byte("Hello World") 1352 toStore := 100 1353 for i := 0; i < toStore; i++ { 1354 fs.StoreMsg(subj, nil, msg) 1355 } 1356 state := fs.State() 1357 if state.Msgs != uint64(toStore) { 1358 t.Fatalf("Expected %d msgs, got %d", toStore, state.Msgs) 1359 } 1360 1361 // Erase the even messages. 1362 for i := 2; i <= toStore; i += 2 { 1363 if removed, _ := fs.EraseMsg(uint64(i)); !removed { 1364 t.Fatalf("Expected erase msg to return true") 1365 } 1366 } 1367 state = fs.State() 1368 if state.Msgs != uint64(toStore/2) { 1369 t.Fatalf("Expected %d msgs, got %d", toStore/2, state.Msgs) 1370 } 1371 1372 // Stop and remove the optional index file. 1373 fs.Stop() 1374 ifn := filepath.Join(fcfg.StoreDir, msgDir, fmt.Sprintf(indexScan, 1)) 1375 os.Remove(ifn) 1376 1377 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 1378 require_NoError(t, err) 1379 defer fs.Stop() 1380 1381 state = fs.State() 1382 if state.Msgs != uint64(toStore/2) { 1383 t.Fatalf("Expected %d msgs, got %d", toStore/2, state.Msgs) 1384 } 1385 1386 for i := 2; i <= toStore; i += 2 { 1387 if _, err := fs.LoadMsg(uint64(i), nil); err == nil { 1388 t.Fatalf("Expected error looking up seq %d that should be erased", i) 1389 } 1390 } 1391 }) 1392 } 1393 1394 func TestFileStoreMeta(t *testing.T) { 1395 // Just do no encryption, etc. 1396 fcfg := FileStoreConfig{StoreDir: t.TempDir()} 1397 mconfig := StreamConfig{Name: "ZZ-22-33", Storage: FileStorage, Subjects: []string{"foo.*"}, Replicas: 22} 1398 fs, err := newFileStore(fcfg, mconfig) 1399 require_NoError(t, err) 1400 defer fs.Stop() 1401 1402 metafile := filepath.Join(fcfg.StoreDir, JetStreamMetaFile) 1403 metasum := filepath.Join(fcfg.StoreDir, JetStreamMetaFileSum) 1404 1405 // Test to make sure meta file and checksum are present. 1406 if _, err := os.Stat(metafile); os.IsNotExist(err) { 1407 t.Fatalf("Expected metafile %q to exist", metafile) 1408 } 1409 if _, err := os.Stat(metasum); os.IsNotExist(err) { 1410 t.Fatalf("Expected metafile's checksum %q to exist", metasum) 1411 } 1412 1413 buf, err := os.ReadFile(metafile) 1414 if err != nil { 1415 t.Fatalf("Error reading metafile: %v", err) 1416 } 1417 var mconfig2 StreamConfig 1418 if err := json.Unmarshal(buf, &mconfig2); err != nil { 1419 t.Fatalf("Error unmarshalling: %v", err) 1420 } 1421 if !reflect.DeepEqual(mconfig, mconfig2) { 1422 t.Fatalf("Stream configs not equal, got %+v vs %+v", mconfig2, mconfig) 1423 } 1424 checksum, err := os.ReadFile(metasum) 1425 if err != nil { 1426 t.Fatalf("Error reading metafile checksum: %v", err) 1427 } 1428 1429 fs.mu.Lock() 1430 fs.hh.Reset() 1431 fs.hh.Write(buf) 1432 mychecksum := hex.EncodeToString(fs.hh.Sum(nil)) 1433 fs.mu.Unlock() 1434 1435 if mychecksum != string(checksum) { 1436 t.Fatalf("Checksums do not match, got %q vs %q", mychecksum, checksum) 1437 } 1438 1439 // Now create a consumer. Same deal for them. 1440 oconfig := ConsumerConfig{ 1441 DeliverSubject: "d", 1442 FilterSubject: "foo", 1443 AckPolicy: AckAll, 1444 } 1445 oname := "obs22" 1446 obs, err := fs.ConsumerStore(oname, &oconfig) 1447 if err != nil { 1448 t.Fatalf("Unexpected error: %v", err) 1449 } 1450 1451 ometafile := filepath.Join(fcfg.StoreDir, consumerDir, oname, JetStreamMetaFile) 1452 ometasum := filepath.Join(fcfg.StoreDir, consumerDir, oname, JetStreamMetaFileSum) 1453 1454 // Test to make sure meta file and checksum are present. 1455 if _, err := os.Stat(ometafile); os.IsNotExist(err) { 1456 t.Fatalf("Expected consumer metafile %q to exist", ometafile) 1457 } 1458 if _, err := os.Stat(ometasum); os.IsNotExist(err) { 1459 t.Fatalf("Expected consumer metafile's checksum %q to exist", ometasum) 1460 } 1461 1462 buf, err = os.ReadFile(ometafile) 1463 if err != nil { 1464 t.Fatalf("Error reading consumer metafile: %v", err) 1465 } 1466 1467 var oconfig2 ConsumerConfig 1468 if err := json.Unmarshal(buf, &oconfig2); err != nil { 1469 t.Fatalf("Error unmarshalling: %v", err) 1470 } 1471 // Since we set name we will get that back now. 1472 oconfig.Name = oname 1473 if !reflect.DeepEqual(oconfig2, oconfig) { 1474 t.Fatalf("Consumer configs not equal, got %+v vs %+v", oconfig2, oconfig) 1475 } 1476 checksum, err = os.ReadFile(ometasum) 1477 1478 if err != nil { 1479 t.Fatalf("Error reading consumer metafile checksum: %v", err) 1480 } 1481 1482 hh := obs.(*consumerFileStore).hh 1483 hh.Reset() 1484 hh.Write(buf) 1485 mychecksum = hex.EncodeToString(hh.Sum(nil)) 1486 if mychecksum != string(checksum) { 1487 t.Fatalf("Checksums do not match, got %q vs %q", mychecksum, checksum) 1488 } 1489 } 1490 1491 func TestFileStoreWriteAndReadSameBlock(t *testing.T) { 1492 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 1493 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 1494 require_NoError(t, err) 1495 defer fs.Stop() 1496 1497 subj, msg := "foo", []byte("Hello World!") 1498 1499 for i := uint64(1); i <= 10; i++ { 1500 fs.StoreMsg(subj, nil, msg) 1501 if _, err := fs.LoadMsg(i, nil); err != nil { 1502 t.Fatalf("Error loading %d: %v", i, err) 1503 } 1504 } 1505 }) 1506 } 1507 1508 func TestFileStoreAndRetrieveMultiBlock(t *testing.T) { 1509 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 1510 subj, msg := "foo", []byte("Hello World!") 1511 storedMsgSize := fileStoreMsgSize(subj, nil, msg) 1512 1513 fcfg.BlockSize = 4 * storedMsgSize 1514 created := time.Now() 1515 1516 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 1517 require_NoError(t, err) 1518 defer fs.Stop() 1519 1520 for i := 0; i < 20; i++ { 1521 fs.StoreMsg(subj, nil, msg) 1522 } 1523 state := fs.State() 1524 if state.Msgs != 20 { 1525 t.Fatalf("Expected 20 msgs, got %d", state.Msgs) 1526 } 1527 fs.Stop() 1528 1529 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 1530 require_NoError(t, err) 1531 defer fs.Stop() 1532 1533 var smv StoreMsg 1534 for i := uint64(1); i <= 20; i++ { 1535 if _, err := fs.LoadMsg(i, &smv); err != nil { 1536 t.Fatalf("Error loading %d: %v", i, err) 1537 } 1538 } 1539 }) 1540 } 1541 1542 func TestFileStoreCollapseDmap(t *testing.T) { 1543 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 1544 subj, msg := "foo", []byte("Hello World!") 1545 storedMsgSize := fileStoreMsgSize(subj, nil, msg) 1546 1547 fcfg.BlockSize = 4 * storedMsgSize 1548 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 1549 require_NoError(t, err) 1550 defer fs.Stop() 1551 1552 for i := 0; i < 10; i++ { 1553 fs.StoreMsg(subj, nil, msg) 1554 } 1555 state := fs.State() 1556 if state.Msgs != 10 { 1557 t.Fatalf("Expected 10 msgs, got %d", state.Msgs) 1558 } 1559 1560 checkDmapTotal := func(total int) { 1561 t.Helper() 1562 if nde := fs.dmapEntries(); nde != total { 1563 t.Fatalf("Expecting only %d entries, got %d", total, nde) 1564 } 1565 } 1566 1567 checkFirstSeq := func(seq uint64) { 1568 t.Helper() 1569 state := fs.State() 1570 if state.FirstSeq != seq { 1571 t.Fatalf("Expected first seq to be %d, got %d", seq, state.FirstSeq) 1572 } 1573 } 1574 1575 // Now remove some out of order, forming gaps and entries in dmaps. 1576 fs.RemoveMsg(2) 1577 checkFirstSeq(1) 1578 fs.RemoveMsg(4) 1579 checkFirstSeq(1) 1580 fs.RemoveMsg(8) 1581 checkFirstSeq(1) 1582 1583 state = fs.State() 1584 if state.Msgs != 7 { 1585 t.Fatalf("Expected 7 msgs, got %d", state.Msgs) 1586 } 1587 1588 checkDmapTotal(3) 1589 1590 // Close gaps.. 1591 fs.RemoveMsg(1) 1592 checkDmapTotal(2) 1593 checkFirstSeq(3) 1594 1595 fs.RemoveMsg(3) 1596 checkDmapTotal(1) 1597 checkFirstSeq(5) 1598 1599 fs.RemoveMsg(5) 1600 checkDmapTotal(1) 1601 checkFirstSeq(6) 1602 1603 fs.RemoveMsg(7) 1604 checkDmapTotal(2) 1605 1606 fs.RemoveMsg(6) 1607 checkDmapTotal(0) 1608 }) 1609 } 1610 1611 func TestFileStoreReadCache(t *testing.T) { 1612 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 1613 fcfg.CacheExpire = 100 * time.Millisecond 1614 1615 subj, msg := "foo.bar", make([]byte, 1024) 1616 storedMsgSize := fileStoreMsgSize(subj, nil, msg) 1617 1618 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 1619 require_NoError(t, err) 1620 defer fs.Stop() 1621 1622 toStore := 500 1623 totalBytes := uint64(toStore) * storedMsgSize 1624 1625 for i := 0; i < toStore; i++ { 1626 fs.StoreMsg(subj, nil, msg) 1627 } 1628 1629 // Wait for cache to go to zero. 1630 checkFor(t, time.Second, 10*time.Millisecond, func() error { 1631 if csz := fs.cacheSize(); csz != 0 { 1632 return fmt.Errorf("cache size not 0, got %s", friendlyBytes(int64(csz))) 1633 } 1634 return nil 1635 }) 1636 1637 fs.LoadMsg(1, nil) 1638 if csz := fs.cacheSize(); csz != totalBytes { 1639 t.Fatalf("Expected all messages to be cached, got %d vs %d", csz, totalBytes) 1640 } 1641 // Should expire and be removed. 1642 checkFor(t, time.Second, 10*time.Millisecond, func() error { 1643 if csz := fs.cacheSize(); csz != 0 { 1644 return fmt.Errorf("cache size not 0, got %s", friendlyBytes(int64(csz))) 1645 } 1646 return nil 1647 }) 1648 if cls := fs.cacheLoads(); cls != 1 { 1649 t.Fatalf("Expected only 1 cache load, got %d", cls) 1650 } 1651 // Now make sure we do not reload cache if there is activity. 1652 fs.LoadMsg(1, nil) 1653 timeout := time.Now().Add(250 * time.Millisecond) 1654 for time.Now().Before(timeout) { 1655 if cls := fs.cacheLoads(); cls != 2 { 1656 t.Fatalf("cache loads not 2, got %d", cls) 1657 } 1658 time.Sleep(5 * time.Millisecond) 1659 fs.LoadMsg(1, nil) // register activity. 1660 } 1661 }) 1662 } 1663 1664 func TestFileStorePartialCacheExpiration(t *testing.T) { 1665 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 1666 cexp := 10 * time.Millisecond 1667 fcfg.CacheExpire = cexp 1668 1669 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 1670 require_NoError(t, err) 1671 defer fs.Stop() 1672 1673 fs.StoreMsg("foo", nil, []byte("msg1")) 1674 1675 // Should expire and be removed. 1676 time.Sleep(2 * cexp) 1677 fs.StoreMsg("bar", nil, []byte("msg2")) 1678 1679 // Again wait for cache to expire. 1680 time.Sleep(2 * cexp) 1681 if _, err := fs.LoadMsg(1, nil); err != nil { 1682 t.Fatalf("Error loading message 1: %v", err) 1683 } 1684 }) 1685 } 1686 1687 func TestFileStorePartialIndexes(t *testing.T) { 1688 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 1689 cexp := 10 * time.Millisecond 1690 fcfg.CacheExpire = cexp 1691 1692 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 1693 require_NoError(t, err) 1694 defer fs.Stop() 1695 1696 toSend := 5 1697 for i := 0; i < toSend; i++ { 1698 fs.StoreMsg("foo", nil, []byte("ok-1")) 1699 } 1700 1701 // Now wait til the cache expires, including the index. 1702 fs.mu.Lock() 1703 mb := fs.blks[0] 1704 fs.mu.Unlock() 1705 1706 // Force idx to expire by resetting last remove ts. 1707 mb.mu.Lock() 1708 mb.llts = mb.llts - int64(defaultCacheBufferExpiration*2) 1709 mb.mu.Unlock() 1710 1711 checkFor(t, time.Second, 10*time.Millisecond, func() error { 1712 mb.mu.Lock() 1713 defer mb.mu.Unlock() 1714 if mb.cache == nil || len(mb.cache.idx) == 0 { 1715 return nil 1716 } 1717 return fmt.Errorf("Index not empty") 1718 }) 1719 1720 // Create a partial cache by adding more msgs. 1721 for i := 0; i < toSend; i++ { 1722 fs.StoreMsg("foo", nil, []byte("ok-2")) 1723 } 1724 // If we now load in a message in second half if we do not 1725 // detect idx is a partial correctly this will panic. 1726 if _, err := fs.LoadMsg(8, nil); err != nil { 1727 t.Fatalf("Error loading %d: %v", 1, err) 1728 } 1729 }) 1730 } 1731 1732 func TestFileStoreSnapshot(t *testing.T) { 1733 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 1734 subj, msg := "foo", []byte("Hello Snappy!") 1735 scfg := StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage} 1736 1737 fs, err := newFileStoreWithCreated(fcfg, scfg, time.Now(), prf(&fcfg), nil) 1738 require_NoError(t, err) 1739 defer fs.Stop() 1740 1741 toSend := 2233 1742 for i := 0; i < toSend; i++ { 1743 fs.StoreMsg(subj, nil, msg) 1744 } 1745 1746 // Create a few consumers. 1747 o1, err := fs.ConsumerStore("o22", &ConsumerConfig{}) 1748 if err != nil { 1749 t.Fatalf("Unexpected error: %v", err) 1750 } 1751 o2, err := fs.ConsumerStore("o33", &ConsumerConfig{}) 1752 if err != nil { 1753 t.Fatalf("Unexpected error: %v", err) 1754 } 1755 state := &ConsumerState{} 1756 state.Delivered.Consumer = 100 1757 state.Delivered.Stream = 100 1758 state.AckFloor.Consumer = 22 1759 state.AckFloor.Stream = 22 1760 1761 if err := o1.Update(state); err != nil { 1762 t.Fatalf("Unexpected error updating state: %v", err) 1763 } 1764 state.AckFloor.Consumer = 33 1765 state.AckFloor.Stream = 33 1766 1767 if err := o2.Update(state); err != nil { 1768 t.Fatalf("Unexpected error updating state: %v", err) 1769 } 1770 1771 snapshot := func() []byte { 1772 t.Helper() 1773 r, err := fs.Snapshot(5*time.Second, true, true) 1774 if err != nil { 1775 t.Fatalf("Error creating snapshot") 1776 } 1777 snapshot, err := io.ReadAll(r.Reader) 1778 if err != nil { 1779 t.Fatalf("Error reading snapshot") 1780 } 1781 return snapshot 1782 } 1783 1784 // This will unzip the snapshot and create a new filestore that will recover the state. 1785 // We will compare the states for this vs the original one. 1786 verifySnapshot := func(snap []byte) { 1787 t.Helper() 1788 r := bytes.NewReader(snap) 1789 tr := tar.NewReader(s2.NewReader(r)) 1790 1791 rstoreDir := t.TempDir() 1792 1793 for { 1794 hdr, err := tr.Next() 1795 if err == io.EOF { 1796 break // End of archive 1797 } 1798 if err != nil { 1799 t.Fatalf("Error getting next entry from snapshot: %v", err) 1800 } 1801 fpath := filepath.Join(rstoreDir, filepath.Clean(hdr.Name)) 1802 pdir := filepath.Dir(fpath) 1803 os.MkdirAll(pdir, 0755) 1804 fd, err := os.OpenFile(fpath, os.O_CREATE|os.O_RDWR, 0600) 1805 if err != nil { 1806 t.Fatalf("Error opening file[%s]: %v", fpath, err) 1807 } 1808 if _, err := io.Copy(fd, tr); err != nil { 1809 t.Fatalf("Error writing file[%s]: %v", fpath, err) 1810 } 1811 fd.Close() 1812 } 1813 1814 fcfg.StoreDir = rstoreDir 1815 fsr, err := newFileStoreWithCreated(fcfg, scfg, time.Now(), prf(&fcfg), nil) 1816 require_NoError(t, err) 1817 defer fsr.Stop() 1818 state := fs.State() 1819 rstate := fsr.State() 1820 1821 // FIXME(dlc) 1822 // Right now the upper layers in JetStream recover the consumers and do not expect 1823 // the lower layers to do that. So for now blank that out of our original state. 1824 // Will have more exhaustive tests in jetstream_test.go. 1825 state.Consumers = 0 1826 1827 // Just check the state. 1828 if !reflect.DeepEqual(rstate, state) { 1829 t.Fatalf("Restored state does not match:\n%+v\n\n%+v", rstate, state) 1830 } 1831 } 1832 1833 // Simple case first. 1834 snap := snapshot() 1835 verifySnapshot(snap) 1836 1837 // Remove first 100 messages. 1838 for i := 1; i <= 100; i++ { 1839 fs.RemoveMsg(uint64(i)) 1840 } 1841 1842 snap = snapshot() 1843 verifySnapshot(snap) 1844 1845 // Now sporadic messages inside the stream. 1846 total := int64(toSend - 100) 1847 // Delete 50 random messages. 1848 for i := 0; i < 50; i++ { 1849 seq := uint64(rand.Int63n(total) + 101) 1850 fs.RemoveMsg(seq) 1851 } 1852 1853 snap = snapshot() 1854 verifySnapshot(snap) 1855 1856 // Make sure compaction works with snapshots. 1857 fs.mu.RLock() 1858 for _, mb := range fs.blks { 1859 // Should not call compact on last msg block. 1860 if mb != fs.lmb { 1861 mb.mu.Lock() 1862 mb.compact() 1863 mb.mu.Unlock() 1864 } 1865 } 1866 fs.mu.RUnlock() 1867 1868 snap = snapshot() 1869 verifySnapshot(snap) 1870 1871 // Now check to make sure that we get the correct error when trying to delete or erase 1872 // a message when a snapshot is in progress and that closing the reader releases that condition. 1873 sr, err := fs.Snapshot(5*time.Second, false, true) 1874 if err != nil { 1875 t.Fatalf("Error creating snapshot") 1876 } 1877 if _, err := fs.RemoveMsg(122); err != ErrStoreSnapshotInProgress { 1878 t.Fatalf("Did not get the correct error on remove during snapshot: %v", err) 1879 } 1880 if _, err := fs.EraseMsg(122); err != ErrStoreSnapshotInProgress { 1881 t.Fatalf("Did not get the correct error on remove during snapshot: %v", err) 1882 } 1883 1884 // Now make sure we can do these when we close the reader and release the snapshot condition. 1885 sr.Reader.Close() 1886 checkFor(t, time.Second, 10*time.Millisecond, func() error { 1887 if _, err := fs.RemoveMsg(122); err != nil { 1888 return fmt.Errorf("Got an error on remove after snapshot: %v", err) 1889 } 1890 return nil 1891 }) 1892 1893 // Make sure if we do not read properly then it will close the writer and report an error. 1894 sr, err = fs.Snapshot(25*time.Millisecond, false, false) 1895 if err != nil { 1896 t.Fatalf("Error creating snapshot") 1897 } 1898 1899 // Cause snapshot to timeout. 1900 time.Sleep(50 * time.Millisecond) 1901 // Read should fail 1902 var buf [32]byte 1903 if _, err := sr.Reader.Read(buf[:]); err != io.EOF { 1904 t.Fatalf("Expected read to produce an error, got none") 1905 } 1906 }) 1907 } 1908 1909 func TestFileStoreConsumer(t *testing.T) { 1910 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 1911 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 1912 require_NoError(t, err) 1913 defer fs.Stop() 1914 1915 o, err := fs.ConsumerStore("obs22", &ConsumerConfig{}) 1916 if err != nil { 1917 t.Fatalf("Unexpected error: %v", err) 1918 } 1919 if state, err := o.State(); err != nil || state.Delivered.Consumer != 0 { 1920 t.Fatalf("Unexpected state or error: %v", err) 1921 } 1922 1923 state := &ConsumerState{} 1924 1925 updateAndCheck := func() { 1926 t.Helper() 1927 if err := o.Update(state); err != nil { 1928 t.Fatalf("Unexpected error updating state: %v", err) 1929 } 1930 s2, err := o.State() 1931 if err != nil { 1932 t.Fatalf("Unexpected error getting state: %v", err) 1933 } 1934 if !reflect.DeepEqual(state, s2) { 1935 t.Fatalf("State is not the same: wanted %+v got %+v", state, s2) 1936 } 1937 } 1938 1939 shouldFail := func() { 1940 t.Helper() 1941 if err := o.Update(state); err == nil { 1942 t.Fatalf("Expected an error and got none") 1943 } 1944 } 1945 1946 state.Delivered.Consumer = 1 1947 state.Delivered.Stream = 22 1948 updateAndCheck() 1949 1950 state.Delivered.Consumer = 100 1951 state.Delivered.Stream = 122 1952 state.AckFloor.Consumer = 50 1953 state.AckFloor.Stream = 123 1954 // This should fail, bad state. 1955 shouldFail() 1956 // So should this. 1957 state.AckFloor.Consumer = 200 1958 state.AckFloor.Stream = 100 1959 shouldFail() 1960 1961 // Should succeed 1962 state.AckFloor.Consumer = 50 1963 state.AckFloor.Stream = 72 1964 updateAndCheck() 1965 1966 tn := time.Now().UnixNano() 1967 1968 // We should sanity check pending here as well, so will check if a pending value is below 1969 // ack floor or above delivered. 1970 state.Pending = map[uint64]*Pending{70: {70, tn}} 1971 shouldFail() 1972 state.Pending = map[uint64]*Pending{140: {140, tn}} 1973 shouldFail() 1974 state.Pending = map[uint64]*Pending{72: {72, tn}} // exact on floor should fail 1975 shouldFail() 1976 1977 // Put timestamps a second apart. 1978 // We will downsample to second resolution to save space. So setup our times 1979 // to reflect that. 1980 ago := time.Now().Add(-30 * time.Second).Truncate(time.Second) 1981 nt := func() *Pending { 1982 ago = ago.Add(time.Second) 1983 return &Pending{0, ago.UnixNano()} 1984 } 1985 // Should succeed. 1986 state.Pending = map[uint64]*Pending{75: nt(), 80: nt(), 83: nt(), 90: nt(), 111: nt()} 1987 updateAndCheck() 1988 1989 // Now do redlivery, but first with no pending. 1990 state.Pending = nil 1991 state.Redelivered = map[uint64]uint64{22: 3, 44: 8} 1992 updateAndCheck() 1993 1994 // All together. 1995 state.Pending = map[uint64]*Pending{75: nt(), 80: nt(), 83: nt(), 90: nt(), 111: nt()} 1996 updateAndCheck() 1997 1998 // Large one 1999 state.Delivered.Consumer = 10000 2000 state.Delivered.Stream = 10000 2001 state.AckFloor.Consumer = 100 2002 state.AckFloor.Stream = 100 2003 // Generate 8k pending. 2004 state.Pending = make(map[uint64]*Pending) 2005 for len(state.Pending) < 8192 { 2006 seq := uint64(rand.Intn(9890) + 101) 2007 if _, ok := state.Pending[seq]; !ok { 2008 state.Pending[seq] = nt() 2009 } 2010 } 2011 updateAndCheck() 2012 2013 state.Pending = nil 2014 state.AckFloor.Consumer = 10000 2015 state.AckFloor.Stream = 10000 2016 updateAndCheck() 2017 }) 2018 } 2019 2020 func TestFileStoreConsumerEncodeDecodeRedelivered(t *testing.T) { 2021 state := &ConsumerState{} 2022 2023 state.Delivered.Consumer = 100 2024 state.Delivered.Stream = 100 2025 state.AckFloor.Consumer = 50 2026 state.AckFloor.Stream = 50 2027 2028 state.Redelivered = map[uint64]uint64{122: 3, 144: 8} 2029 buf := encodeConsumerState(state) 2030 2031 rstate, err := decodeConsumerState(buf) 2032 if err != nil { 2033 t.Fatalf("Unexpected error: %v", err) 2034 } 2035 if !reflect.DeepEqual(state, rstate) { 2036 t.Fatalf("States do not match: %+v vs %+v", state, rstate) 2037 } 2038 } 2039 2040 func TestFileStoreConsumerEncodeDecodePendingBelowStreamAckFloor(t *testing.T) { 2041 state := &ConsumerState{} 2042 2043 state.Delivered.Consumer = 1192 2044 state.Delivered.Stream = 10185 2045 state.AckFloor.Consumer = 1189 2046 state.AckFloor.Stream = 10815 2047 2048 now := time.Now().Round(time.Second).Add(-10 * time.Second).UnixNano() 2049 state.Pending = map[uint64]*Pending{ 2050 10782: {1190, now}, 2051 10810: {1191, now + int64(time.Second)}, 2052 10815: {1192, now + int64(2*time.Second)}, 2053 } 2054 buf := encodeConsumerState(state) 2055 2056 rstate, err := decodeConsumerState(buf) 2057 if err != nil { 2058 t.Fatalf("Unexpected error: %v", err) 2059 } 2060 if len(rstate.Pending) != 3 { 2061 t.Fatalf("Invalid pending: %v", rstate.Pending) 2062 } 2063 for k, v := range state.Pending { 2064 rv, ok := rstate.Pending[k] 2065 if !ok { 2066 t.Fatalf("Did not find sseq=%v", k) 2067 } 2068 if !reflect.DeepEqual(v, rv) { 2069 t.Fatalf("Pending for sseq=%v should be %+v, got %+v", k, v, rv) 2070 } 2071 } 2072 state.Pending, rstate.Pending = nil, nil 2073 if !reflect.DeepEqual(*state, *rstate) { 2074 t.Fatalf("States do not match: %+v vs %+v", state, rstate) 2075 } 2076 } 2077 2078 func TestFileStoreWriteFailures(t *testing.T) { 2079 // This test should be run inside an environment where this directory 2080 // has a limited size. 2081 // E.g. Docker 2082 // docker run -ti --tmpfs /jswf_test:rw,size=32k --rm -v ~/Development/go/src:/go/src -w /go/src/github.com/nats-io/nats-server/ golang:1.21 /bin/bash 2083 tdir := filepath.Join("/", "jswf_test") 2084 if stat, err := os.Stat(tdir); err != nil || !stat.IsDir() { 2085 t.SkipNow() 2086 } 2087 2088 storeDir := filepath.Join(tdir, JetStreamStoreDir) 2089 os.MkdirAll(storeDir, 0755) 2090 2091 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2092 fcfg.StoreDir = storeDir 2093 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage} 2094 created := time.Now() 2095 2096 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 2097 require_NoError(t, err) 2098 defer fs.Stop() 2099 2100 subj, msg := "foo", []byte("Hello Write Failures!") 2101 2102 var lseq uint64 2103 // msz about will be ~54 bytes, so if limit is 32k trying to send 1000 will fail at some point. 2104 for i := 1; i <= 1000; i++ { 2105 if _, _, err := fs.StoreMsg(subj, nil, msg); err != nil { 2106 lseq = uint64(i) 2107 break 2108 } 2109 } 2110 if lseq == 0 { 2111 t.Fatalf("Expected to get a failure but did not") 2112 } 2113 2114 state := fs.State() 2115 2116 if state.LastSeq != lseq-1 { 2117 t.Fatalf("Expected last seq to be %d, got %d\n", lseq-1, state.LastSeq) 2118 } 2119 if state.Msgs != lseq-1 { 2120 t.Fatalf("Expected total msgs to be %d, got %d\n", lseq-1, state.Msgs) 2121 } 2122 if _, err := fs.LoadMsg(lseq, nil); err == nil { 2123 t.Fatalf("Expected error loading seq that failed, got none") 2124 } 2125 // Loading should still work. 2126 if _, err := fs.LoadMsg(1, nil); err != nil { 2127 t.Fatalf("Unexpected error: %v", err) 2128 } 2129 2130 // Make sure we recover same state. 2131 fs.Stop() 2132 2133 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 2134 require_NoError(t, err) 2135 defer fs.Stop() 2136 2137 state2 := fs.State() 2138 2139 // Ignore lost state. 2140 state.Lost, state2.Lost = nil, nil 2141 if !reflect.DeepEqual(state2, state) { 2142 t.Fatalf("Expected recovered state to be the same\n%+v\nvs\n%+v\n", state2, state) 2143 } 2144 2145 // We should still fail here. 2146 for i := 1; i <= 100; i++ { 2147 _, _, err = fs.StoreMsg(subj, nil, msg) 2148 if err != nil { 2149 break 2150 } 2151 } 2152 require_Error(t, err) 2153 2154 lseq = fs.State().LastSeq + 1 2155 2156 // Purge should help. 2157 if _, err := fs.Purge(); err != nil { 2158 t.Fatalf("Unexpected error: %v", err) 2159 } 2160 // Wait for purge to complete its out of band processing. 2161 time.Sleep(50 * time.Millisecond) 2162 2163 // Check we will fail again in same spot. 2164 for i := 1; i <= 1000; i++ { 2165 if _, _, err := fs.StoreMsg(subj, nil, msg); err != nil { 2166 if i != int(lseq) { 2167 t.Fatalf("Expected to fail after purge about the same spot, wanted %d got %d", lseq, i) 2168 } 2169 break 2170 } 2171 } 2172 }) 2173 } 2174 2175 func TestFileStorePerf(t *testing.T) { 2176 // Comment out to run, holding place for now. 2177 t.SkipNow() 2178 2179 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2180 fcfg.AsyncFlush = true 2181 2182 subj, msg := "foo", make([]byte, 1024-33) 2183 for i := 0; i < len(msg); i++ { 2184 msg[i] = 'D' 2185 } 2186 storedMsgSize := fileStoreMsgSize(subj, nil, msg) 2187 2188 // 5GB 2189 toStore := 5 * 1024 * 1024 * 1024 / storedMsgSize 2190 2191 fmt.Printf("storing %d msgs of %s each, totalling %s\n", 2192 toStore, 2193 friendlyBytes(int64(storedMsgSize)), 2194 friendlyBytes(int64(toStore*storedMsgSize)), 2195 ) 2196 2197 created := time.Now() 2198 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 2199 require_NoError(t, err) 2200 defer fs.Stop() 2201 2202 start := time.Now() 2203 for i := 0; i < int(toStore); i++ { 2204 fs.StoreMsg(subj, nil, msg) 2205 } 2206 fs.Stop() 2207 2208 tt := time.Since(start) 2209 fmt.Printf("time to store is %v\n", tt) 2210 fmt.Printf("%.0f msgs/sec\n", float64(toStore)/tt.Seconds()) 2211 fmt.Printf("%s per sec\n", friendlyBytes(int64(float64(toStore*storedMsgSize)/tt.Seconds()))) 2212 2213 fmt.Printf("Filesystem cache flush, paused 5 seconds.\n\n") 2214 time.Sleep(5 * time.Second) 2215 2216 fmt.Printf("Restoring..\n") 2217 start = time.Now() 2218 fcfg.AsyncFlush = false 2219 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 2220 require_NoError(t, err) 2221 defer fs.Stop() 2222 2223 fmt.Printf("time to restore is %v\n\n", time.Since(start)) 2224 2225 fmt.Printf("LOAD: reading %d msgs of %s each, totalling %s\n", 2226 toStore, 2227 friendlyBytes(int64(storedMsgSize)), 2228 friendlyBytes(int64(toStore*storedMsgSize)), 2229 ) 2230 2231 var smv StoreMsg 2232 start = time.Now() 2233 for i := uint64(1); i <= toStore; i++ { 2234 if _, err := fs.LoadMsg(i, &smv); err != nil { 2235 t.Fatalf("Error loading %d: %v", i, err) 2236 } 2237 } 2238 2239 tt = time.Since(start) 2240 fmt.Printf("time to read all back messages is %v\n", tt) 2241 fmt.Printf("%.0f msgs/sec\n", float64(toStore)/tt.Seconds()) 2242 fmt.Printf("%s per sec\n", friendlyBytes(int64(float64(toStore*storedMsgSize)/tt.Seconds()))) 2243 2244 // Do again to test skip for hash.. 2245 fmt.Printf("\nSKIP CHECKSUM: reading %d msgs of %s each, totalling %s\n", 2246 toStore, 2247 friendlyBytes(int64(storedMsgSize)), 2248 friendlyBytes(int64(toStore*storedMsgSize)), 2249 ) 2250 2251 start = time.Now() 2252 for i := uint64(1); i <= toStore; i++ { 2253 if _, err := fs.LoadMsg(i, &smv); err != nil { 2254 t.Fatalf("Error loading %d: %v", i, err) 2255 } 2256 } 2257 2258 tt = time.Since(start) 2259 fmt.Printf("time to read all back messages is %v\n", tt) 2260 fmt.Printf("%.0f msgs/sec\n", float64(toStore)/tt.Seconds()) 2261 fmt.Printf("%s per sec\n", friendlyBytes(int64(float64(toStore*storedMsgSize)/tt.Seconds()))) 2262 2263 fs.Stop() 2264 2265 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 2266 require_NoError(t, err) 2267 defer fs.Stop() 2268 2269 fmt.Printf("\nremoving [in order] %d msgs of %s each, totalling %s\n", 2270 toStore, 2271 friendlyBytes(int64(storedMsgSize)), 2272 friendlyBytes(int64(toStore*storedMsgSize)), 2273 ) 2274 2275 start = time.Now() 2276 // For reverse order. 2277 //for i := toStore; i > 0; i-- { 2278 for i := uint64(1); i <= toStore; i++ { 2279 fs.RemoveMsg(i) 2280 } 2281 fs.Stop() 2282 2283 tt = time.Since(start) 2284 fmt.Printf("time to remove all messages is %v\n", tt) 2285 fmt.Printf("%.0f msgs/sec\n", float64(toStore)/tt.Seconds()) 2286 fmt.Printf("%s per sec\n", friendlyBytes(int64(float64(toStore*storedMsgSize)/tt.Seconds()))) 2287 2288 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 2289 require_NoError(t, err) 2290 defer fs.Stop() 2291 2292 state := fs.State() 2293 if state.Msgs != 0 { 2294 t.Fatalf("Expected no msgs, got %d", state.Msgs) 2295 } 2296 if state.Bytes != 0 { 2297 t.Fatalf("Expected no bytes, got %d", state.Bytes) 2298 } 2299 }) 2300 } 2301 2302 func TestFileStoreReadBackMsgPerf(t *testing.T) { 2303 // Comment out to run, holding place for now. 2304 t.SkipNow() 2305 2306 subj := "foo" 2307 msg := []byte("ABCDEFGH") // Smaller shows problems more. 2308 2309 storedMsgSize := fileStoreMsgSize(subj, nil, msg) 2310 2311 // Make sure we store 2 blocks. 2312 toStore := defaultLargeBlockSize * 2 / storedMsgSize 2313 2314 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2315 fmt.Printf("storing %d msgs of %s each, totalling %s\n", 2316 toStore, 2317 friendlyBytes(int64(storedMsgSize)), 2318 friendlyBytes(int64(toStore*storedMsgSize)), 2319 ) 2320 2321 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 2322 require_NoError(t, err) 2323 defer fs.Stop() 2324 2325 start := time.Now() 2326 for i := 0; i < int(toStore); i++ { 2327 fs.StoreMsg(subj, nil, msg) 2328 } 2329 2330 tt := time.Since(start) 2331 fmt.Printf("time to store is %v\n", tt) 2332 fmt.Printf("%.0f msgs/sec\n", float64(toStore)/tt.Seconds()) 2333 fmt.Printf("%s per sec\n", friendlyBytes(int64(float64(toStore*storedMsgSize)/tt.Seconds()))) 2334 2335 // We should not have cached here with no reads. 2336 // Pick something towards end of the block. 2337 index := defaultLargeBlockSize/storedMsgSize - 22 2338 start = time.Now() 2339 fs.LoadMsg(index, nil) 2340 fmt.Printf("Time to load first msg [%d] = %v\n", index, time.Since(start)) 2341 2342 start = time.Now() 2343 fs.LoadMsg(index+2, nil) 2344 fmt.Printf("Time to load second msg [%d] = %v\n", index+2, time.Since(start)) 2345 }) 2346 } 2347 2348 // This test is testing an upper level stream with a message or byte limit. 2349 // Even though this is 1, any limit would trigger same behavior once the limit was reached 2350 // and we were adding and removing. 2351 func TestFileStoreStoreLimitRemovePerf(t *testing.T) { 2352 // Comment out to run, holding place for now. 2353 t.SkipNow() 2354 2355 subj, msg := "foo", make([]byte, 1024-33) 2356 for i := 0; i < len(msg); i++ { 2357 msg[i] = 'D' 2358 } 2359 storedMsgSize := fileStoreMsgSize(subj, nil, msg) 2360 2361 // 1GB 2362 toStore := 1 * 1024 * 1024 * 1024 / storedMsgSize 2363 2364 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2365 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 2366 require_NoError(t, err) 2367 defer fs.Stop() 2368 2369 fs.RegisterStorageUpdates(func(md, bd int64, seq uint64, subj string) {}) 2370 2371 fmt.Printf("storing and removing (limit 1) %d msgs of %s each, totalling %s\n", 2372 toStore, 2373 friendlyBytes(int64(storedMsgSize)), 2374 friendlyBytes(int64(toStore*storedMsgSize)), 2375 ) 2376 2377 start := time.Now() 2378 for i := 0; i < int(toStore); i++ { 2379 seq, _, err := fs.StoreMsg(subj, nil, msg) 2380 if err != nil { 2381 t.Fatalf("Unexpected error storing message: %v", err) 2382 } 2383 if i > 0 { 2384 fs.RemoveMsg(seq - 1) 2385 } 2386 } 2387 fs.Stop() 2388 2389 tt := time.Since(start) 2390 fmt.Printf("time to store and remove all messages is %v\n", tt) 2391 fmt.Printf("%.0f msgs/sec\n", float64(toStore)/tt.Seconds()) 2392 fmt.Printf("%s per sec\n", friendlyBytes(int64(float64(toStore*storedMsgSize)/tt.Seconds()))) 2393 }) 2394 } 2395 2396 func TestFileStorePubPerfWithSmallBlkSize(t *testing.T) { 2397 // Comment out to run, holding place for now. 2398 t.SkipNow() 2399 2400 subj, msg := "foo", make([]byte, 1024-33) 2401 for i := 0; i < len(msg); i++ { 2402 msg[i] = 'D' 2403 } 2404 storedMsgSize := fileStoreMsgSize(subj, nil, msg) 2405 2406 // 1GB 2407 toStore := 1 * 1024 * 1024 * 1024 / storedMsgSize 2408 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2409 fmt.Printf("storing %d msgs of %s each, totalling %s\n", 2410 toStore, 2411 friendlyBytes(int64(storedMsgSize)), 2412 friendlyBytes(int64(toStore*storedMsgSize)), 2413 ) 2414 2415 fcfg.BlockSize = FileStoreMinBlkSize 2416 2417 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 2418 require_NoError(t, err) 2419 defer fs.Stop() 2420 2421 start := time.Now() 2422 for i := 0; i < int(toStore); i++ { 2423 fs.StoreMsg(subj, nil, msg) 2424 } 2425 fs.Stop() 2426 2427 tt := time.Since(start) 2428 fmt.Printf("time to store is %v\n", tt) 2429 fmt.Printf("%.0f msgs/sec\n", float64(toStore)/tt.Seconds()) 2430 fmt.Printf("%s per sec\n", friendlyBytes(int64(float64(toStore*storedMsgSize)/tt.Seconds()))) 2431 }) 2432 } 2433 2434 // Saw this manifest from a restart test with max delivered set for JetStream. 2435 func TestFileStoreConsumerRedeliveredLost(t *testing.T) { 2436 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2437 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 2438 require_NoError(t, err) 2439 defer fs.Stop() 2440 2441 cfg := &ConsumerConfig{AckPolicy: AckExplicit} 2442 o, err := fs.ConsumerStore("o22", cfg) 2443 if err != nil { 2444 t.Fatalf("Unexpected error: %v", err) 2445 } 2446 2447 restartConsumer := func() { 2448 t.Helper() 2449 o.Stop() 2450 time.Sleep(20 * time.Millisecond) // Wait for all things to settle. 2451 o, err = fs.ConsumerStore("o22", cfg) 2452 if err != nil { 2453 t.Fatalf("Unexpected error: %v", err) 2454 } 2455 // Make sure we recovered Redelivered. 2456 state, err := o.State() 2457 if err != nil { 2458 t.Fatalf("Unexpected error: %v", err) 2459 } 2460 if state == nil { 2461 t.Fatalf("Did not recover state") 2462 } 2463 if len(state.Redelivered) == 0 { 2464 t.Fatalf("Did not recover redelivered") 2465 } 2466 } 2467 2468 ts := time.Now().UnixNano() 2469 o.UpdateDelivered(1, 1, 1, ts) 2470 o.UpdateDelivered(2, 1, 2, ts) 2471 o.UpdateDelivered(3, 1, 3, ts) 2472 o.UpdateDelivered(4, 1, 4, ts) 2473 o.UpdateDelivered(5, 2, 1, ts) 2474 2475 restartConsumer() 2476 2477 o.UpdateDelivered(6, 2, 2, ts) 2478 o.UpdateDelivered(7, 3, 1, ts) 2479 2480 restartConsumer() 2481 if state, _ := o.State(); len(state.Pending) != 3 { 2482 t.Fatalf("Did not recover pending correctly") 2483 } 2484 2485 o.UpdateAcks(7, 3) 2486 o.UpdateAcks(6, 2) 2487 2488 restartConsumer() 2489 o.UpdateAcks(4, 1) 2490 2491 state, _ := o.State() 2492 if len(state.Pending) != 0 { 2493 t.Fatalf("Did not clear pending correctly") 2494 } 2495 if len(state.Redelivered) != 0 { 2496 t.Fatalf("Did not clear redelivered correctly") 2497 } 2498 }) 2499 } 2500 2501 func TestFileStoreConsumerFlusher(t *testing.T) { 2502 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2503 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 2504 require_NoError(t, err) 2505 defer fs.Stop() 2506 2507 o, err := fs.ConsumerStore("o22", &ConsumerConfig{}) 2508 if err != nil { 2509 t.Fatalf("Unexpected error: %v", err) 2510 } 2511 // Get the underlying impl. 2512 oc := o.(*consumerFileStore) 2513 // Wait for flusher to be running. 2514 checkFor(t, time.Second, 20*time.Millisecond, func() error { 2515 if !oc.inFlusher() { 2516 return fmt.Errorf("Flusher not running") 2517 } 2518 return nil 2519 }) 2520 // Stop and make sure the flusher goes away 2521 o.Stop() 2522 // Wait for flusher to stop. 2523 checkFor(t, time.Second, 20*time.Millisecond, func() error { 2524 if oc.inFlusher() { 2525 return fmt.Errorf("Flusher still running") 2526 } 2527 return nil 2528 }) 2529 }) 2530 } 2531 2532 func TestFileStoreConsumerDeliveredUpdates(t *testing.T) { 2533 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2534 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 2535 require_NoError(t, err) 2536 defer fs.Stop() 2537 2538 // Simple consumer, no ack policy configured. 2539 o, err := fs.ConsumerStore("o22", &ConsumerConfig{}) 2540 if err != nil { 2541 t.Fatalf("Unexpected error: %v", err) 2542 } 2543 defer o.Stop() 2544 2545 testDelivered := func(dseq, sseq uint64) { 2546 t.Helper() 2547 ts := time.Now().UnixNano() 2548 if err := o.UpdateDelivered(dseq, sseq, 1, ts); err != nil { 2549 t.Fatalf("Unexpected error: %v", err) 2550 } 2551 state, err := o.State() 2552 if err != nil { 2553 t.Fatalf("Error getting state: %v", err) 2554 } 2555 if state == nil { 2556 t.Fatalf("No state available") 2557 } 2558 expected := SequencePair{dseq, sseq} 2559 if state.Delivered != expected { 2560 t.Fatalf("Unexpected state, wanted %+v, got %+v", expected, state.Delivered) 2561 } 2562 if state.AckFloor != expected { 2563 t.Fatalf("Unexpected ack floor state, wanted %+v, got %+v", expected, state.AckFloor) 2564 } 2565 if len(state.Pending) != 0 { 2566 t.Fatalf("Did not expect any pending, got %d pending", len(state.Pending)) 2567 } 2568 } 2569 2570 testDelivered(1, 100) 2571 testDelivered(2, 110) 2572 testDelivered(5, 130) 2573 2574 // If we try to do an ack this should err since we are not configured with ack policy. 2575 if err := o.UpdateAcks(1, 100); err != ErrNoAckPolicy { 2576 t.Fatalf("Expected a no ack policy error on update acks, got %v", err) 2577 } 2578 // Also if we do an update with a delivery count of anything but 1 here should also give same error. 2579 ts := time.Now().UnixNano() 2580 if err := o.UpdateDelivered(5, 130, 2, ts); err != ErrNoAckPolicy { 2581 t.Fatalf("Expected a no ack policy error on update delivered with dc > 1, got %v", err) 2582 } 2583 }) 2584 } 2585 2586 func TestFileStoreConsumerDeliveredAndAckUpdates(t *testing.T) { 2587 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2588 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 2589 require_NoError(t, err) 2590 defer fs.Stop() 2591 2592 // Simple consumer, no ack policy configured. 2593 o, err := fs.ConsumerStore("o22", &ConsumerConfig{AckPolicy: AckExplicit}) 2594 if err != nil { 2595 t.Fatalf("Unexpected error: %v", err) 2596 } 2597 defer o.Stop() 2598 2599 // Track pending. 2600 var pending int 2601 2602 testDelivered := func(dseq, sseq uint64) { 2603 t.Helper() 2604 ts := time.Now().Round(time.Second).UnixNano() 2605 if err := o.UpdateDelivered(dseq, sseq, 1, ts); err != nil { 2606 t.Fatalf("Unexpected error: %v", err) 2607 } 2608 pending++ 2609 state, err := o.State() 2610 if err != nil { 2611 t.Fatalf("Error getting state: %v", err) 2612 } 2613 if state == nil { 2614 t.Fatalf("No state available") 2615 } 2616 expected := SequencePair{dseq, sseq} 2617 if state.Delivered != expected { 2618 t.Fatalf("Unexpected delivered state, wanted %+v, got %+v", expected, state.Delivered) 2619 } 2620 if len(state.Pending) != pending { 2621 t.Fatalf("Expected %d pending, got %d pending", pending, len(state.Pending)) 2622 } 2623 } 2624 2625 testDelivered(1, 100) 2626 testDelivered(2, 110) 2627 testDelivered(3, 130) 2628 testDelivered(4, 150) 2629 testDelivered(5, 165) 2630 2631 testBadAck := func(dseq, sseq uint64) { 2632 t.Helper() 2633 if err := o.UpdateAcks(dseq, sseq); err == nil { 2634 t.Fatalf("Expected error but got none") 2635 } 2636 } 2637 testBadAck(3, 101) 2638 testBadAck(1, 1) 2639 2640 testAck := func(dseq, sseq, dflr, sflr uint64) { 2641 t.Helper() 2642 if err := o.UpdateAcks(dseq, sseq); err != nil { 2643 t.Fatalf("Unexpected error: %v", err) 2644 } 2645 pending-- 2646 state, err := o.State() 2647 if err != nil { 2648 t.Fatalf("Error getting state: %v", err) 2649 } 2650 if state == nil { 2651 t.Fatalf("No state available") 2652 } 2653 if len(state.Pending) != pending { 2654 t.Fatalf("Expected %d pending, got %d pending", pending, len(state.Pending)) 2655 } 2656 eflr := SequencePair{dflr, sflr} 2657 if state.AckFloor != eflr { 2658 t.Fatalf("Unexpected ack floor state, wanted %+v, got %+v", eflr, state.AckFloor) 2659 } 2660 } 2661 2662 testAck(1, 100, 1, 109) 2663 testAck(3, 130, 1, 109) 2664 testAck(2, 110, 3, 149) // We do not track explicit state on previous stream floors, so we take last known -1 2665 testAck(5, 165, 3, 149) 2666 testAck(4, 150, 5, 165) 2667 2668 testDelivered(6, 170) 2669 testDelivered(7, 171) 2670 testDelivered(8, 172) 2671 testDelivered(9, 173) 2672 testDelivered(10, 200) 2673 2674 testAck(7, 171, 5, 165) 2675 testAck(8, 172, 5, 165) 2676 2677 state, err := o.State() 2678 if err != nil { 2679 t.Fatalf("Unexpected error getting state: %v", err) 2680 } 2681 o.Stop() 2682 2683 o, err = fs.ConsumerStore("o22", &ConsumerConfig{AckPolicy: AckExplicit}) 2684 if err != nil { 2685 t.Fatalf("Unexpected error: %v", err) 2686 } 2687 defer o.Stop() 2688 2689 nstate, err := o.State() 2690 if err != nil { 2691 t.Fatalf("Unexpected error getting state: %v", err) 2692 } 2693 if !reflect.DeepEqual(nstate, state) { 2694 t.Fatalf("States don't match! NEW %+v OLD %+v", nstate, state) 2695 } 2696 }) 2697 } 2698 2699 func TestFileStoreStreamStateDeleted(t *testing.T) { 2700 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2701 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 2702 require_NoError(t, err) 2703 defer fs.Stop() 2704 2705 subj, toStore := "foo", uint64(10) 2706 for i := uint64(1); i <= toStore; i++ { 2707 msg := []byte(fmt.Sprintf("[%08d] Hello World!", i)) 2708 if _, _, err := fs.StoreMsg(subj, nil, msg); err != nil { 2709 t.Fatalf("Error storing msg: %v", err) 2710 } 2711 } 2712 state := fs.State() 2713 if len(state.Deleted) != 0 { 2714 t.Fatalf("Expected deleted to be empty") 2715 } 2716 // Now remove some interior messages. 2717 var expected []uint64 2718 for seq := uint64(2); seq < toStore; seq += 2 { 2719 fs.RemoveMsg(seq) 2720 expected = append(expected, seq) 2721 } 2722 state = fs.State() 2723 if !reflect.DeepEqual(state.Deleted, expected) { 2724 t.Fatalf("Expected deleted to be %+v, got %+v\n", expected, state.Deleted) 2725 } 2726 // Now fill the gap by deleting 1 and 3 2727 fs.RemoveMsg(1) 2728 fs.RemoveMsg(3) 2729 expected = expected[2:] 2730 state = fs.State() 2731 if !reflect.DeepEqual(state.Deleted, expected) { 2732 t.Fatalf("Expected deleted to be %+v, got %+v\n", expected, state.Deleted) 2733 } 2734 if state.FirstSeq != 5 { 2735 t.Fatalf("Expected first seq to be 5, got %d", state.FirstSeq) 2736 } 2737 fs.Purge() 2738 if state = fs.State(); len(state.Deleted) != 0 { 2739 t.Fatalf("Expected no deleted after purge, got %+v\n", state.Deleted) 2740 } 2741 }) 2742 } 2743 2744 // We have reports that sometimes under load a stream could complain about a storage directory 2745 // not being empty. 2746 func TestFileStoreStreamDeleteDirNotEmpty(t *testing.T) { 2747 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2748 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 2749 require_NoError(t, err) 2750 defer fs.Stop() 2751 2752 subj, toStore := "foo", uint64(10) 2753 for i := uint64(1); i <= toStore; i++ { 2754 msg := []byte(fmt.Sprintf("[%08d] Hello World!", i)) 2755 if _, _, err := fs.StoreMsg(subj, nil, msg); err != nil { 2756 t.Fatalf("Error storing msg: %v", err) 2757 } 2758 } 2759 2760 ready := make(chan bool) 2761 go func() { 2762 g := filepath.Join(fcfg.StoreDir, "g") 2763 ready <- true 2764 for i := 0; i < 100; i++ { 2765 os.WriteFile(g, []byte("OK"), defaultFilePerms) 2766 } 2767 }() 2768 2769 <-ready 2770 if err := fs.Delete(); err != nil { 2771 t.Fatalf("Delete returned an error: %v", err) 2772 } 2773 }) 2774 } 2775 2776 func TestFileStoreConsumerPerf(t *testing.T) { 2777 // Comment out to run, holding place for now. 2778 t.SkipNow() 2779 2780 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2781 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 2782 require_NoError(t, err) 2783 defer fs.Stop() 2784 2785 o, err := fs.ConsumerStore("o22", &ConsumerConfig{AckPolicy: AckExplicit}) 2786 if err != nil { 2787 t.Fatalf("Unexpected error: %v", err) 2788 } 2789 // Get the underlying impl. 2790 oc := o.(*consumerFileStore) 2791 // Wait for flusher to br running 2792 checkFor(t, time.Second, 20*time.Millisecond, func() error { 2793 if !oc.inFlusher() { 2794 return fmt.Errorf("not in flusher") 2795 } 2796 return nil 2797 }) 2798 2799 // Stop flusher for this benchmark since we will invoke directly. 2800 oc.mu.Lock() 2801 qch := oc.qch 2802 oc.qch = nil 2803 oc.mu.Unlock() 2804 close(qch) 2805 2806 checkFor(t, time.Second, 20*time.Millisecond, func() error { 2807 if oc.inFlusher() { 2808 return fmt.Errorf("still in flusher") 2809 } 2810 return nil 2811 }) 2812 2813 toStore := uint64(1_000_000) 2814 2815 start := time.Now() 2816 2817 ts := start.UnixNano() 2818 2819 for i := uint64(1); i <= toStore; i++ { 2820 if err := o.UpdateDelivered(i, i, 1, ts); err != nil { 2821 t.Fatalf("Unexpected error: %v", err) 2822 } 2823 } 2824 tt := time.Since(start) 2825 fmt.Printf("time to update %d is %v\n", toStore, tt) 2826 fmt.Printf("%.0f updates/sec\n", float64(toStore)/tt.Seconds()) 2827 2828 start = time.Now() 2829 oc.mu.Lock() 2830 buf, err := oc.encodeState() 2831 oc.mu.Unlock() 2832 if err != nil { 2833 t.Fatalf("Error encoding state: %v", err) 2834 } 2835 fmt.Printf("time to encode %d bytes is %v\n", len(buf), time.Since(start)) 2836 start = time.Now() 2837 oc.writeState(buf) 2838 fmt.Printf("time to write is %v\n", time.Since(start)) 2839 }) 2840 } 2841 2842 // Reported by Ivan. 2843 func TestFileStoreStreamDeleteCacheBug(t *testing.T) { 2844 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2845 fcfg.CacheExpire = 50 * time.Millisecond 2846 2847 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 2848 require_NoError(t, err) 2849 defer fs.Stop() 2850 2851 subj, msg := "foo", []byte("Hello World") 2852 2853 if _, _, err := fs.StoreMsg(subj, nil, msg); err != nil { 2854 t.Fatalf("Unexpected error: %v", err) 2855 } 2856 if _, _, err := fs.StoreMsg(subj, nil, msg); err != nil { 2857 t.Fatalf("Unexpected error: %v", err) 2858 } 2859 if _, err := fs.EraseMsg(1); err != nil { 2860 t.Fatalf("Got an error on remove of %d: %v", 1, err) 2861 } 2862 time.Sleep(100 * time.Millisecond) 2863 if _, err := fs.LoadMsg(2, nil); err != nil { 2864 t.Fatalf("Unexpected error looking up msg: %v", err) 2865 } 2866 }) 2867 } 2868 2869 // rip 2870 func TestFileStoreStreamFailToRollBug(t *testing.T) { 2871 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2872 fcfg.BlockSize = 512 2873 2874 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage, MaxBytes: 300}, time.Now(), prf(&fcfg), nil) 2875 require_NoError(t, err) 2876 defer fs.Stop() 2877 2878 // Make sure we properly roll underlying blocks. 2879 n, msg := 200, bytes.Repeat([]byte("ABC"), 33) // ~100bytes 2880 for i := 0; i < n; i++ { 2881 if _, _, err := fs.StoreMsg("zzz", nil, msg); err != nil { 2882 t.Fatalf("Unexpected error: %v", err) 2883 } 2884 } 2885 2886 // Grab some info for introspection. 2887 fs.mu.RLock() 2888 numBlks := len(fs.blks) 2889 var index uint32 2890 var blkSize int64 2891 if numBlks > 0 { 2892 mb := fs.blks[0] 2893 mb.mu.RLock() 2894 index = mb.index 2895 if fi, _ := os.Stat(mb.mfn); fi != nil { 2896 blkSize = fi.Size() 2897 } 2898 mb.mu.RUnlock() 2899 } 2900 fs.mu.RUnlock() 2901 2902 if numBlks != 1 { 2903 t.Fatalf("Expected only one block, got %d", numBlks) 2904 } 2905 if index < 60 { 2906 t.Fatalf("Expected a block index > 60, got %d", index) 2907 } 2908 if blkSize > 512 { 2909 t.Fatalf("Expected block to be <= 512, got %d", blkSize) 2910 } 2911 }) 2912 } 2913 2914 // We had a case where a consumer state had a redelivered record that had seq of 0. 2915 // This was causing the server to panic. 2916 func TestFileStoreBadConsumerState(t *testing.T) { 2917 bs := []byte("\x16\x02\x01\x01\x03\x02\x01\x98\xf4\x8a\x8a\f\x01\x03\x86\xfa\n\x01\x00\x01") 2918 if cs, err := decodeConsumerState(bs); err != nil || cs == nil { 2919 t.Fatalf("Expected to not throw error, got %v and %+v", err, cs) 2920 } 2921 } 2922 2923 func TestFileStoreExpireMsgsOnStart(t *testing.T) { 2924 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 2925 fcfg.BlockSize = 8 * 1024 2926 ttl := 250 * time.Millisecond 2927 cfg := StreamConfig{Name: "ORDERS", Subjects: []string{"orders.*"}, Storage: FileStorage, MaxAge: ttl} 2928 var fs *fileStore 2929 2930 startFS := func() *fileStore { 2931 t.Helper() 2932 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 2933 require_NoError(t, err) 2934 return fs 2935 } 2936 2937 newFS := func() *fileStore { 2938 t.Helper() 2939 if fs != nil { 2940 fs.Stop() 2941 fs = nil 2942 } 2943 removeDir(t, fcfg.StoreDir) 2944 return startFS() 2945 } 2946 2947 restartFS := func(delay time.Duration) *fileStore { 2948 if fs != nil { 2949 fs.Stop() 2950 fs = nil 2951 time.Sleep(delay) 2952 } 2953 fs = startFS() 2954 return fs 2955 } 2956 2957 fs = newFS() 2958 defer fs.Stop() 2959 2960 msg := bytes.Repeat([]byte("ABC"), 33) // ~100bytes 2961 loadMsgs := func(n int) { 2962 t.Helper() 2963 for i := 1; i <= n; i++ { 2964 if _, _, err := fs.StoreMsg(fmt.Sprintf("orders.%d", i%10), nil, msg); err != nil { 2965 t.Fatalf("Unexpected error: %v", err) 2966 } 2967 } 2968 } 2969 2970 checkState := func(msgs, first, last uint64) { 2971 t.Helper() 2972 if fs == nil { 2973 t.Fatalf("No fs") 2974 return 2975 } 2976 state := fs.State() 2977 if state.Msgs != msgs { 2978 t.Fatalf("Expected %d msgs, got %d", msgs, state.Msgs) 2979 } 2980 if state.FirstSeq != first { 2981 t.Fatalf("Expected %d as first, got %d", first, state.FirstSeq) 2982 } 2983 if state.LastSeq != last { 2984 t.Fatalf("Expected %d as last, got %d", last, state.LastSeq) 2985 } 2986 } 2987 2988 checkNumBlks := func(expected int) { 2989 t.Helper() 2990 fs.mu.RLock() 2991 n := len(fs.blks) 2992 fs.mu.RUnlock() 2993 if n != expected { 2994 t.Fatalf("Expected %d msg blks, got %d", expected, n) 2995 } 2996 } 2997 2998 // Check the filtered subject state and make sure that is tracked properly. 2999 checkFiltered := func(subject string, ss SimpleState) { 3000 t.Helper() 3001 fss := fs.FilteredState(1, subject) 3002 if fss != ss { 3003 t.Fatalf("Expected FilteredState of %+v, got %+v", ss, fss) 3004 } 3005 } 3006 3007 // Make sure state on disk matches (e.g. writeIndexInfo properly called) 3008 checkBlkState := func(index int) { 3009 t.Helper() 3010 fs.mu.RLock() 3011 if index >= len(fs.blks) { 3012 t.Fatalf("Out of range, wanted %d but only %d blks", index, len(fs.blks)) 3013 } 3014 fs.mu.RUnlock() 3015 } 3016 3017 lastSeqForBlk := func() uint64 { 3018 t.Helper() 3019 fs.mu.RLock() 3020 defer fs.mu.RUnlock() 3021 if len(fs.blks) == 0 { 3022 t.Fatalf("No blocks?") 3023 } 3024 mb := fs.blks[0] 3025 mb.mu.RLock() 3026 defer mb.mu.RUnlock() 3027 return mb.last.seq 3028 } 3029 3030 // Actual testing here. 3031 3032 loadMsgs(500) 3033 restartFS(ttl + 100*time.Millisecond) 3034 checkState(0, 501, 500) 3035 // We actually hold onto the last one now to remember our starting sequence. 3036 checkNumBlks(1) 3037 3038 // Now check partial expires and the fss tracking state. 3039 // Small numbers is to keep them in one block. 3040 fs = newFS() 3041 loadMsgs(10) 3042 time.Sleep(100 * time.Millisecond) 3043 loadMsgs(10) 3044 checkFiltered("orders.*", SimpleState{Msgs: 20, First: 1, Last: 20}) 3045 3046 restartFS(ttl - 100*time.Millisecond + 25*time.Millisecond) // Just want half 3047 checkState(10, 11, 20) 3048 checkNumBlks(1) 3049 checkFiltered("orders.*", SimpleState{Msgs: 10, First: 11, Last: 20}) 3050 checkFiltered("orders.5", SimpleState{Msgs: 1, First: 15, Last: 15}) 3051 checkBlkState(0) 3052 3053 fs = newFS() 3054 loadMsgs(5) 3055 time.Sleep(100 * time.Millisecond) 3056 loadMsgs(15) 3057 restartFS(ttl - 100*time.Millisecond + 25*time.Millisecond) // Just want half 3058 checkState(15, 6, 20) 3059 checkFiltered("orders.*", SimpleState{Msgs: 15, First: 6, Last: 20}) 3060 checkFiltered("orders.5", SimpleState{Msgs: 2, First: 10, Last: 20}) 3061 3062 // Now we want to test that if the end of a msg block is all deletes msgs that we do the right thing. 3063 fs = newFS() 3064 loadMsgs(150) 3065 time.Sleep(100 * time.Millisecond) 3066 loadMsgs(100) 3067 3068 checkNumBlks(5) 3069 3070 // Now delete 10 messages from the end of the first block which we will expire on restart. 3071 // We will expire up to seq 100, so delete 91-100. 3072 lseq := lastSeqForBlk() 3073 for seq := lseq; seq > lseq-10; seq-- { 3074 removed, err := fs.RemoveMsg(seq) 3075 if err != nil || !removed { 3076 t.Fatalf("Error removing message: %v", err) 3077 } 3078 } 3079 restartFS(ttl - 100*time.Millisecond + 25*time.Millisecond) // Just want half 3080 checkState(100, 151, 250) 3081 checkNumBlks(3) // We should only have 3 blks left. 3082 checkBlkState(0) 3083 3084 // Now make sure that we properly clean up any internal dmap entries (sparse) when expiring. 3085 fs = newFS() 3086 loadMsgs(10) 3087 // Remove some in sparse fashion, adding to dmap. 3088 fs.RemoveMsg(2) 3089 fs.RemoveMsg(4) 3090 fs.RemoveMsg(6) 3091 time.Sleep(100 * time.Millisecond) 3092 loadMsgs(10) 3093 restartFS(ttl - 100*time.Millisecond + 25*time.Millisecond) // Just want half 3094 checkState(10, 11, 20) 3095 checkNumBlks(1) 3096 checkBlkState(0) 3097 3098 // Make sure expiring a block with tail deleted messages removes the message block etc. 3099 fs = newFS() 3100 loadMsgs(7) 3101 time.Sleep(100 * time.Millisecond) 3102 loadMsgs(3) 3103 fs.RemoveMsg(8) 3104 fs.RemoveMsg(9) 3105 fs.RemoveMsg(10) 3106 restartFS(ttl - 100*time.Millisecond + 25*time.Millisecond) 3107 checkState(0, 11, 10) 3108 3109 fs.Stop() 3110 // Not for start per se but since we have all the test tooling here check that Compact() does right thing as well. 3111 fs = newFS() 3112 defer fs.Stop() 3113 loadMsgs(100) 3114 checkFiltered("orders.*", SimpleState{Msgs: 100, First: 1, Last: 100}) 3115 checkFiltered("orders.5", SimpleState{Msgs: 10, First: 5, Last: 95}) 3116 // Check that Compact keeps fss updated, does dmap etc. 3117 fs.Compact(51) 3118 checkFiltered("orders.*", SimpleState{Msgs: 50, First: 51, Last: 100}) 3119 checkFiltered("orders.5", SimpleState{Msgs: 5, First: 55, Last: 95}) 3120 checkBlkState(0) 3121 }) 3122 } 3123 3124 func TestFileStoreSparseCompaction(t *testing.T) { 3125 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3126 fcfg.BlockSize = 1024 * 1024 3127 cfg := StreamConfig{Name: "KV", Subjects: []string{"kv.>"}, Storage: FileStorage} 3128 3129 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 3130 require_NoError(t, err) 3131 defer fs.Stop() 3132 3133 msg := bytes.Repeat([]byte("ABC"), 33) // ~100bytes 3134 loadMsgs := func(n int) { 3135 t.Helper() 3136 for i := 1; i <= n; i++ { 3137 if _, _, err := fs.StoreMsg(fmt.Sprintf("kv.%d", i%10), nil, msg); err != nil { 3138 t.Fatalf("Unexpected error: %v", err) 3139 } 3140 } 3141 } 3142 3143 checkState := func(msgs, first, last uint64) { 3144 t.Helper() 3145 if fs == nil { 3146 t.Fatalf("No fs") 3147 return 3148 } 3149 state := fs.State() 3150 if state.Msgs != msgs { 3151 t.Fatalf("Expected %d msgs, got %d", msgs, state.Msgs) 3152 } 3153 if state.FirstSeq != first { 3154 t.Fatalf("Expected %d as first, got %d", first, state.FirstSeq) 3155 } 3156 if state.LastSeq != last { 3157 t.Fatalf("Expected %d as last, got %d", last, state.LastSeq) 3158 } 3159 } 3160 3161 deleteMsgs := func(seqs ...uint64) { 3162 t.Helper() 3163 for _, seq := range seqs { 3164 removed, err := fs.RemoveMsg(seq) 3165 if err != nil || !removed { 3166 t.Fatalf("Got an error on remove of %d: %v", seq, err) 3167 } 3168 } 3169 } 3170 3171 eraseMsgs := func(seqs ...uint64) { 3172 t.Helper() 3173 for _, seq := range seqs { 3174 removed, err := fs.EraseMsg(seq) 3175 if err != nil || !removed { 3176 t.Fatalf("Got an error on erase of %d: %v", seq, err) 3177 } 3178 } 3179 } 3180 3181 compact := func() { 3182 t.Helper() 3183 var ssb, ssa StreamState 3184 fs.FastState(&ssb) 3185 tb, ub, _ := fs.Utilization() 3186 3187 fs.mu.RLock() 3188 if len(fs.blks) == 0 { 3189 t.Fatalf("No blocks?") 3190 } 3191 mb := fs.blks[0] 3192 fs.mu.RUnlock() 3193 3194 mb.mu.Lock() 3195 mb.compact() 3196 mb.mu.Unlock() 3197 3198 fs.FastState(&ssa) 3199 if !reflect.DeepEqual(ssb, ssa) { 3200 t.Fatalf("States do not match; %+v vs %+v", ssb, ssa) 3201 } 3202 ta, ua, _ := fs.Utilization() 3203 if ub != ua { 3204 t.Fatalf("Expected used to be the same, got %d vs %d", ub, ua) 3205 } 3206 if ta >= tb { 3207 t.Fatalf("Expected total after to be less then before, got %d vs %d", tb, ta) 3208 } 3209 } 3210 3211 // Actual testing here. 3212 loadMsgs(1000) 3213 checkState(1000, 1, 1000) 3214 3215 // Now delete a few messages. 3216 deleteMsgs(1) 3217 compact() 3218 3219 deleteMsgs(1000, 999, 998, 997) 3220 compact() 3221 3222 eraseMsgs(500, 502, 504, 506, 508, 510) 3223 compact() 3224 }) 3225 } 3226 3227 func TestFileStoreSparseCompactionWithInteriorDeletes(t *testing.T) { 3228 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3229 cfg := StreamConfig{Name: "KV", Subjects: []string{"kv.>"}, Storage: FileStorage} 3230 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 3231 require_NoError(t, err) 3232 defer fs.Stop() 3233 3234 for i := 1; i <= 1000; i++ { 3235 if _, _, err := fs.StoreMsg(fmt.Sprintf("kv.%d", i%10), nil, []byte("OK")); err != nil { 3236 t.Fatalf("Unexpected error: %v", err) 3237 } 3238 } 3239 3240 // Now do interior deletes. 3241 for _, seq := range []uint64{500, 600, 700, 800} { 3242 removed, err := fs.RemoveMsg(seq) 3243 if err != nil || !removed { 3244 t.Fatalf("Got an error on remove of %d: %v", seq, err) 3245 } 3246 } 3247 3248 _, err = fs.LoadMsg(900, nil) 3249 if err != nil { 3250 t.Fatalf("Unexpected error: %v", err) 3251 } 3252 3253 // Do compact by hand, make sure we can still access msgs past the interior deletes. 3254 fs.mu.RLock() 3255 lmb := fs.lmb 3256 lmb.dirtyCloseWithRemove(false) 3257 lmb.compact() 3258 fs.mu.RUnlock() 3259 3260 if _, err = fs.LoadMsg(900, nil); err != nil { 3261 t.Fatalf("Unexpected error: %v", err) 3262 } 3263 }) 3264 } 3265 3266 // When messages span multiple blocks and we want to purge but keep some amount, say 1, we would remove all. 3267 // This is because we would not break out of iterator across more message blocks. 3268 // Issue #2622 3269 func TestFileStorePurgeExKeepOneBug(t *testing.T) { 3270 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3271 fcfg.BlockSize = 128 3272 cfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage} 3273 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 3274 require_NoError(t, err) 3275 defer fs.Stop() 3276 3277 fill := bytes.Repeat([]byte("X"), 128) 3278 3279 fs.StoreMsg("A", nil, []byte("META")) 3280 fs.StoreMsg("B", nil, fill) 3281 fs.StoreMsg("A", nil, []byte("META")) 3282 fs.StoreMsg("B", nil, fill) 3283 3284 if fss := fs.FilteredState(1, "A"); fss.Msgs != 2 { 3285 t.Fatalf("Expected to find 2 `A` msgs, got %d", fss.Msgs) 3286 } 3287 3288 n, err := fs.PurgeEx("A", 0, 1) 3289 if err != nil { 3290 t.Fatalf("Unexpected error: %v", err) 3291 } 3292 if n != 1 { 3293 t.Fatalf("Expected PurgeEx to remove 1 `A` msgs, got %d", n) 3294 } 3295 if fss := fs.FilteredState(1, "A"); fss.Msgs != 1 { 3296 t.Fatalf("Expected to find 1 `A` msgs, got %d", fss.Msgs) 3297 } 3298 }) 3299 } 3300 3301 func TestFileStoreFilteredPendingBug(t *testing.T) { 3302 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3303 cfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage} 3304 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 3305 require_NoError(t, err) 3306 defer fs.Stop() 3307 3308 fs.StoreMsg("foo", nil, []byte("msg")) 3309 fs.StoreMsg("bar", nil, []byte("msg")) 3310 fs.StoreMsg("baz", nil, []byte("msg")) 3311 3312 fs.mu.Lock() 3313 mb := fs.lmb 3314 fs.mu.Unlock() 3315 3316 total, f, l := mb.filteredPending("foo", false, 3) 3317 if total != 0 { 3318 t.Fatalf("Expected total of 0 but got %d", total) 3319 } 3320 if f != 0 || l != 0 { 3321 t.Fatalf("Expected first and last to be 0 as well, but got %d %d", f, l) 3322 } 3323 }) 3324 } 3325 3326 // Test to optimize the selectMsgBlock with lots of blocks. 3327 func TestFileStoreFetchPerf(t *testing.T) { 3328 // Comment out to run. 3329 t.SkipNow() 3330 3331 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3332 fcfg.BlockSize = 8192 3333 fcfg.AsyncFlush = true 3334 cfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage} 3335 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 3336 require_NoError(t, err) 3337 defer fs.Stop() 3338 3339 // Will create 25k msg blocks. 3340 n, subj, msg := 100_000, "zzz", bytes.Repeat([]byte("ABC"), 600) 3341 for i := 0; i < n; i++ { 3342 if _, _, err := fs.StoreMsg(subj, nil, msg); err != nil { 3343 t.Fatalf("Unexpected error: %v", err) 3344 } 3345 } 3346 3347 // Time how long it takes us to load all messages. 3348 var smv StoreMsg 3349 now := time.Now() 3350 for i := 0; i < n; i++ { 3351 _, err := fs.LoadMsg(uint64(i), &smv) 3352 if err != nil { 3353 t.Fatalf("Unexpected error looking up seq %d: %v", i, err) 3354 } 3355 } 3356 fmt.Printf("Elapsed to load all messages is %v\n", time.Since(now)) 3357 }) 3358 } 3359 3360 // For things like raft log when we compact and have a message block that could reclaim > 50% of space for block we want to do that. 3361 // https://github.com/nats-io/nats-server/issues/2936 3362 func TestFileStoreCompactReclaimHeadSpace(t *testing.T) { 3363 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3364 fcfg.BlockSize = 4 * 1024 * 1024 3365 cfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage} 3366 created := time.Now() 3367 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 3368 require_NoError(t, err) 3369 defer fs.Stop() 3370 3371 // Create random bytes for payload to test for corruption vs repeated. 3372 msg := make([]byte, 64*1024) 3373 crand.Read(msg) 3374 3375 // This gives us ~63 msgs in first and ~37 in second. 3376 n, subj := 100, "z" 3377 for i := 0; i < n; i++ { 3378 _, _, err := fs.StoreMsg(subj, nil, msg) 3379 require_NoError(t, err) 3380 } 3381 3382 checkNumBlocks := func(n int) { 3383 t.Helper() 3384 fs.mu.RLock() 3385 defer fs.mu.RUnlock() 3386 if len(fs.blks) != n { 3387 t.Fatalf("Expected to have %d blocks, got %d", n, len(fs.blks)) 3388 } 3389 } 3390 3391 getBlock := func(index int) *msgBlock { 3392 t.Helper() 3393 fs.mu.RLock() 3394 defer fs.mu.RUnlock() 3395 return fs.blks[index] 3396 } 3397 3398 // Check that we did right thing and actually reclaimed since > 50% 3399 checkBlock := func(mb *msgBlock) { 3400 t.Helper() 3401 3402 mb.mu.RLock() 3403 nbytes, rbytes, mfn := mb.bytes, mb.rbytes, mb.mfn 3404 fseq, lseq := mb.first.seq, mb.last.seq 3405 mb.mu.RUnlock() 3406 3407 // Check that sizes match as long as we are not doing compression. 3408 if fcfg.Compression == NoCompression { 3409 // Check rbytes then the actual file as well. 3410 if nbytes != rbytes { 3411 t.Fatalf("Expected to reclaim and have bytes == rbytes, got %d vs %d", nbytes, rbytes) 3412 } 3413 file, err := os.Open(mfn) 3414 require_NoError(t, err) 3415 defer file.Close() 3416 fi, err := file.Stat() 3417 require_NoError(t, err) 3418 if rbytes != uint64(fi.Size()) { 3419 t.Fatalf("Expected to rbytes == fi.Size, got %d vs %d", rbytes, fi.Size()) 3420 } 3421 } 3422 3423 // Make sure we can pull messages and that they are ok. 3424 var smv StoreMsg 3425 sm, err := fs.LoadMsg(fseq, &smv) 3426 require_NoError(t, err) 3427 if !bytes.Equal(sm.msg, msg) { 3428 t.Fatalf("Msgs don't match, original %q vs %q", msg, sm.msg) 3429 } 3430 sm, err = fs.LoadMsg(lseq, &smv) 3431 require_NoError(t, err) 3432 if !bytes.Equal(sm.msg, msg) { 3433 t.Fatalf("Msgs don't match, original %q vs %q", msg, sm.msg) 3434 } 3435 } 3436 3437 checkNumBlocks(2) 3438 _, err = fs.Compact(33) 3439 require_NoError(t, err) 3440 3441 checkNumBlocks(2) 3442 checkBlock(getBlock(0)) 3443 checkBlock(getBlock(1)) 3444 3445 _, err = fs.Compact(85) 3446 require_NoError(t, err) 3447 3448 checkNumBlocks(1) 3449 checkBlock(getBlock(0)) 3450 3451 // Make sure we can write. 3452 _, _, err = fs.StoreMsg(subj, nil, msg) 3453 require_NoError(t, err) 3454 3455 checkNumBlocks(1) 3456 checkBlock(getBlock(0)) 3457 3458 // Stop and start again. 3459 fs.Stop() 3460 3461 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 3462 require_NoError(t, err) 3463 defer fs.Stop() 3464 3465 checkNumBlocks(1) 3466 checkBlock(getBlock(0)) 3467 3468 // Make sure we can write. 3469 _, _, err = fs.StoreMsg(subj, nil, msg) 3470 require_NoError(t, err) 3471 }) 3472 } 3473 3474 func TestFileStoreRememberLastMsgTime(t *testing.T) { 3475 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3476 var fs *fileStore 3477 cfg := StreamConfig{Name: "TEST", Storage: FileStorage, MaxAge: 1 * time.Second} 3478 3479 getFS := func() *fileStore { 3480 t.Helper() 3481 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 3482 require_NoError(t, err) 3483 return fs 3484 } 3485 restartFS := func() { 3486 t.Helper() 3487 fs.Stop() 3488 fs = getFS() 3489 } 3490 3491 msg := bytes.Repeat([]byte("X"), 2*1024*1024) 3492 3493 // Get first one. 3494 fs = getFS() 3495 defer fs.Stop() 3496 3497 seq, ts, err := fs.StoreMsg("foo", nil, msg) 3498 require_NoError(t, err) 3499 // We will test that last msg time survives from delete, purge and expires after restart. 3500 removed, err := fs.RemoveMsg(seq) 3501 require_NoError(t, err) 3502 require_True(t, removed) 3503 3504 lt := time.Unix(0, ts).UTC() 3505 require_True(t, lt == fs.State().LastTime) 3506 3507 // Restart 3508 restartFS() 3509 3510 // Test that last time survived. 3511 require_True(t, lt == fs.State().LastTime) 3512 3513 seq, ts, err = fs.StoreMsg("foo", nil, msg) 3514 require_NoError(t, err) 3515 3516 var smv StoreMsg 3517 _, err = fs.LoadMsg(seq, &smv) 3518 require_NoError(t, err) 3519 3520 fs.Purge() 3521 3522 // Restart 3523 restartFS() 3524 3525 lt = time.Unix(0, ts).UTC() 3526 require_True(t, lt == fs.State().LastTime) 3527 3528 _, _, err = fs.StoreMsg("foo", nil, msg) 3529 require_NoError(t, err) 3530 seq, ts, err = fs.StoreMsg("foo", nil, msg) 3531 require_NoError(t, err) 3532 3533 require_True(t, seq == 4) 3534 3535 // Wait til messages expire. 3536 checkFor(t, 5*time.Second, time.Second, func() error { 3537 state := fs.State() 3538 if state.Msgs == 0 { 3539 return nil 3540 } 3541 return fmt.Errorf("Still has %d msgs", state.Msgs) 3542 }) 3543 3544 // Restart 3545 restartFS() 3546 3547 lt = time.Unix(0, ts).UTC() 3548 require_True(t, lt == fs.State().LastTime) 3549 3550 // Now make sure we retain the true last seq. 3551 _, _, err = fs.StoreMsg("foo", nil, msg) 3552 require_NoError(t, err) 3553 seq, ts, err = fs.StoreMsg("foo", nil, msg) 3554 require_NoError(t, err) 3555 3556 require_True(t, seq == 6) 3557 removed, err = fs.RemoveMsg(seq) 3558 require_NoError(t, err) 3559 require_True(t, removed) 3560 3561 removed, err = fs.RemoveMsg(seq - 1) 3562 require_NoError(t, err) 3563 require_True(t, removed) 3564 3565 // Restart 3566 restartFS() 3567 3568 lt = time.Unix(0, ts).UTC() 3569 require_True(t, lt == fs.State().LastTime) 3570 require_True(t, seq == 6) 3571 }) 3572 } 3573 3574 func (fs *fileStore) getFirstBlock() *msgBlock { 3575 fs.mu.RLock() 3576 defer fs.mu.RUnlock() 3577 if len(fs.blks) == 0 { 3578 return nil 3579 } 3580 return fs.blks[0] 3581 } 3582 3583 func TestFileStoreRebuildStateDmapAccountingBug(t *testing.T) { 3584 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3585 fcfg.BlockSize = 1024 * 1024 3586 3587 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 3588 require_NoError(t, err) 3589 defer fs.Stop() 3590 3591 for i := 0; i < 100; i++ { 3592 _, _, err = fs.StoreMsg("foo", nil, nil) 3593 require_NoError(t, err) 3594 } 3595 // Delete 2-40. 3596 for i := 2; i <= 40; i++ { 3597 _, err := fs.RemoveMsg(uint64(i)) 3598 require_NoError(t, err) 3599 } 3600 3601 mb := fs.getFirstBlock() 3602 require_True(t, mb != nil) 3603 3604 check := func() { 3605 t.Helper() 3606 mb.mu.RLock() 3607 defer mb.mu.RUnlock() 3608 dmapLen := uint64(mb.dmap.Size()) 3609 if mb.msgs != (mb.last.seq-mb.first.seq+1)-dmapLen { 3610 t.Fatalf("Consistency check failed: %d != %d -> last %d first %d len(dmap) %d", 3611 mb.msgs, (mb.last.seq-mb.first.seq+1)-dmapLen, mb.last.seq, mb.first.seq, dmapLen) 3612 } 3613 } 3614 3615 check() 3616 3617 mb.mu.Lock() 3618 mb.compact() 3619 mb.mu.Unlock() 3620 3621 // Now delete first. 3622 _, err = fs.RemoveMsg(1) 3623 require_NoError(t, err) 3624 3625 mb.mu.Lock() 3626 _, _, err = mb.rebuildStateLocked() 3627 mb.mu.Unlock() 3628 require_NoError(t, err) 3629 3630 check() 3631 }) 3632 } 3633 3634 func TestFileStorePurgeExWithSubject(t *testing.T) { 3635 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3636 fcfg.BlockSize = 1000 3637 cfg := StreamConfig{Name: "TEST", Subjects: []string{"foo.>"}, Storage: FileStorage} 3638 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 3639 require_NoError(t, err) 3640 defer fs.Stop() 3641 3642 payload := make([]byte, 20) 3643 total := 200 3644 for i := 0; i < total; i++ { 3645 _, _, err = fs.StoreMsg("foo.1", nil, payload) 3646 require_NoError(t, err) 3647 } 3648 _, _, err = fs.StoreMsg("foo.2", nil, []byte("xxxxxx")) 3649 require_NoError(t, err) 3650 3651 // This should purge all. 3652 p, err := fs.PurgeEx("foo.1", 1, 0) 3653 require_NoError(t, err) 3654 require_True(t, int(p) == total) 3655 require_True(t, int(p) == total) 3656 require_True(t, fs.State().Msgs == 1) 3657 require_True(t, fs.State().FirstSeq == 201) 3658 }) 3659 } 3660 3661 // When the N.idx file is shorter than the previous write we could fail to recover the idx properly. 3662 // For instance, with encryption and an expiring stream that has no messages, when a restart happens the decrypt will fail 3663 // since their are extra bytes, and this could lead to a stream sequence reset to zero. 3664 // 3665 // NOTE: We do not use idx files anymore, but keeping test. 3666 func TestFileStoreShortIndexWriteBug(t *testing.T) { 3667 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3668 // Encrypted mode shows, but could effect non-encrypted mode. 3669 cfg := StreamConfig{Name: "TEST", Storage: FileStorage, MaxAge: time.Second} 3670 created := time.Now() 3671 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 3672 require_NoError(t, err) 3673 defer fs.Stop() 3674 3675 for i := 0; i < 100; i++ { 3676 _, _, err = fs.StoreMsg("foo", nil, nil) 3677 require_NoError(t, err) 3678 } 3679 // Wait til messages all go away. 3680 checkFor(t, 5*time.Second, 200*time.Millisecond, func() error { 3681 if state := fs.State(); state.Msgs != 0 { 3682 return fmt.Errorf("Expected no msgs, got %d", state.Msgs) 3683 } 3684 return nil 3685 }) 3686 3687 if state := fs.State(); state.FirstSeq != 101 { 3688 t.Fatalf("Expected first sequence of 101 vs %d", state.FirstSeq) 3689 } 3690 3691 // Now restart.. 3692 fs.Stop() 3693 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 3694 require_NoError(t, err) 3695 defer fs.Stop() 3696 3697 if state := fs.State(); state.FirstSeq != 101 || state.LastSeq != 100 { 3698 t.Fatalf("Expected first sequence of 101 vs %d", state.FirstSeq) 3699 } 3700 }) 3701 } 3702 3703 func TestFileStoreDoubleCompactWithWriteInBetweenEncryptedBug(t *testing.T) { 3704 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3705 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 3706 require_NoError(t, err) 3707 defer fs.Stop() 3708 3709 subj, msg := "foo", []byte("ouch") 3710 for i := 0; i < 5; i++ { 3711 fs.StoreMsg(subj, nil, msg) 3712 } 3713 _, err = fs.Compact(5) 3714 require_NoError(t, err) 3715 3716 if state := fs.State(); state.LastSeq != 5 { 3717 t.Fatalf("Expected last sequence to be 5 but got %d", state.LastSeq) 3718 } 3719 for i := 0; i < 5; i++ { 3720 fs.StoreMsg(subj, nil, msg) 3721 } 3722 _, err = fs.Compact(10) 3723 require_NoError(t, err) 3724 3725 if state := fs.State(); state.LastSeq != 10 { 3726 t.Fatalf("Expected last sequence to be 10 but got %d", state.LastSeq) 3727 } 3728 }) 3729 } 3730 3731 // When we kept the empty block for tracking sequence, we needed to reset the bek 3732 // counter when encrypted for subsequent writes to be correct. The bek in place could 3733 // possibly still have a non-zero counter from previous writes. 3734 // Happens when all messages expire and the are flushed and then subsequent writes occur. 3735 func TestFileStoreEncryptedKeepIndexNeedBekResetBug(t *testing.T) { 3736 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3737 ttl := 1 * time.Second 3738 cfg := StreamConfig{Name: "zzz", Storage: FileStorage, MaxAge: ttl} 3739 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 3740 require_NoError(t, err) 3741 defer fs.Stop() 3742 3743 subj, msg := "foo", []byte("ouch") 3744 for i := 0; i < 5; i++ { 3745 fs.StoreMsg(subj, nil, msg) 3746 } 3747 3748 // Want to go to 0. 3749 // This will leave the marker. 3750 checkFor(t, 5*time.Second, ttl, func() error { 3751 if state := fs.State(); state.Msgs != 0 { 3752 return fmt.Errorf("Expected no msgs, got %d", state.Msgs) 3753 } 3754 return nil 3755 }) 3756 3757 // Now write additional messages. 3758 for i := 0; i < 5; i++ { 3759 fs.StoreMsg(subj, nil, msg) 3760 } 3761 3762 // Make sure the buffer is cleared. 3763 fs.mu.RLock() 3764 mb := fs.lmb 3765 fs.mu.RUnlock() 3766 mb.mu.Lock() 3767 mb.clearCacheAndOffset() 3768 mb.mu.Unlock() 3769 3770 // Now make sure we can read. 3771 var smv StoreMsg 3772 _, err = fs.LoadMsg(10, &smv) 3773 require_NoError(t, err) 3774 }) 3775 } 3776 3777 func (fs *fileStore) reportMeta() (hasPSIM, hasAnyFSS bool) { 3778 fs.mu.RLock() 3779 defer fs.mu.RUnlock() 3780 3781 hasPSIM = fs.psim != nil 3782 for _, mb := range fs.blks { 3783 mb.mu.RLock() 3784 hasAnyFSS = hasAnyFSS || mb.fss != nil 3785 mb.mu.RUnlock() 3786 if hasAnyFSS { 3787 break 3788 } 3789 } 3790 return hasPSIM, hasAnyFSS 3791 } 3792 3793 func TestFileStoreExpireSubjectMeta(t *testing.T) { 3794 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3795 fcfg.BlockSize = 1024 3796 fcfg.CacheExpire = time.Second 3797 fcfg.SyncInterval = time.Second 3798 cfg := StreamConfig{Name: "zzz", Subjects: []string{"kv.>"}, Storage: FileStorage, MaxMsgsPer: 1} 3799 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 3800 require_NoError(t, err) 3801 defer fs.Stop() 3802 3803 ns := 100 3804 for i := 1; i <= ns; i++ { 3805 subj := fmt.Sprintf("kv.%d", i) 3806 _, _, err := fs.StoreMsg(subj, nil, []byte("value")) 3807 require_NoError(t, err) 3808 } 3809 3810 // Test that on restart we do not have extensize metadata but do have correct number of subjects/keys. 3811 // Only thing really needed for store state / stream info. 3812 fs.Stop() 3813 fs, err = newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 3814 require_NoError(t, err) 3815 defer fs.Stop() 3816 3817 var ss StreamState 3818 fs.FastState(&ss) 3819 if ss.NumSubjects != ns { 3820 t.Fatalf("Expected NumSubjects of %d, got %d", ns, ss.NumSubjects) 3821 } 3822 3823 // Make sure we clear mb fss meta 3824 checkFor(t, 10*time.Second, 500*time.Millisecond, func() error { 3825 if _, hasAnyFSS := fs.reportMeta(); hasAnyFSS { 3826 return fmt.Errorf("Still have mb fss state") 3827 } 3828 return nil 3829 }) 3830 3831 // LoadLast, which is what KV uses, should load meta and succeed. 3832 _, err = fs.LoadLastMsg("kv.22", nil) 3833 require_NoError(t, err) 3834 // Make sure we clear mb fss meta 3835 checkFor(t, 10*time.Second, 500*time.Millisecond, func() error { 3836 if _, hasAnyFSS := fs.reportMeta(); hasAnyFSS { 3837 return fmt.Errorf("Still have mb fss state") 3838 } 3839 return nil 3840 }) 3841 }) 3842 } 3843 3844 func TestFileStoreMaxMsgsPerSubject(t *testing.T) { 3845 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3846 fcfg.BlockSize = 128 3847 fcfg.CacheExpire = time.Second 3848 cfg := StreamConfig{Name: "zzz", Subjects: []string{"kv.>"}, Storage: FileStorage, MaxMsgsPer: 1} 3849 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 3850 require_NoError(t, err) 3851 defer fs.Stop() 3852 3853 ns := 100 3854 for i := 1; i <= ns; i++ { 3855 subj := fmt.Sprintf("kv.%d", i) 3856 _, _, err := fs.StoreMsg(subj, nil, []byte("value")) 3857 require_NoError(t, err) 3858 } 3859 3860 for i := 1; i <= ns; i++ { 3861 subj := fmt.Sprintf("kv.%d", i) 3862 _, _, err := fs.StoreMsg(subj, nil, []byte("value")) 3863 require_NoError(t, err) 3864 } 3865 3866 if state := fs.State(); state.Msgs != 100 || state.FirstSeq != 101 || state.LastSeq != 200 || len(state.Deleted) != 0 { 3867 t.Fatalf("Bad state: %+v", state) 3868 } 3869 3870 if nb := fs.numMsgBlocks(); nb != 34 { 3871 t.Fatalf("Expected 34 blocks, got %d", nb) 3872 } 3873 }) 3874 } 3875 3876 // Testing the case in https://github.com/nats-io/nats-server/issues/4247 3877 func TestFileStoreMaxMsgsAndMaxMsgsPerSubject(t *testing.T) { 3878 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3879 fcfg.BlockSize = 128 3880 fcfg.CacheExpire = time.Second 3881 cfg := StreamConfig{ 3882 Name: "zzz", 3883 Subjects: []string{"kv.>"}, 3884 Storage: FileStorage, 3885 Discard: DiscardNew, MaxMsgs: 100, // Total stream policy 3886 DiscardNewPer: true, MaxMsgsPer: 1, // Per-subject policy 3887 } 3888 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 3889 require_NoError(t, err) 3890 defer fs.Stop() 3891 3892 for i := 1; i <= 101; i++ { 3893 subj := fmt.Sprintf("kv.%d", i) 3894 _, _, err := fs.StoreMsg(subj, nil, []byte("value")) 3895 if i == 101 { 3896 // The 101th iteration should fail because MaxMsgs is set to 3897 // 100 and the policy is DiscardNew. 3898 require_Error(t, err) 3899 } else { 3900 require_NoError(t, err) 3901 } 3902 } 3903 3904 for i := 1; i <= 100; i++ { 3905 subj := fmt.Sprintf("kv.%d", i) 3906 _, _, err := fs.StoreMsg(subj, nil, []byte("value")) 3907 // All of these iterations should fail because MaxMsgsPer is set 3908 // to 1 and DiscardNewPer is set to true, forcing us to reject 3909 // cases where there is already a message on this subject. 3910 require_Error(t, err) 3911 } 3912 3913 if state := fs.State(); state.Msgs != 100 || state.FirstSeq != 1 || state.LastSeq != 100 || len(state.Deleted) != 0 { 3914 // There should be 100 messages exactly, as the 101st subject 3915 // should have been rejected in the first loop, and any duplicates 3916 // on the other subjects should have been rejected in the second loop. 3917 t.Fatalf("Bad state: %+v", state) 3918 } 3919 }) 3920 } 3921 3922 func TestFileStoreSubjectStateCacheExpiration(t *testing.T) { 3923 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3924 fcfg.BlockSize = 32 3925 fcfg.CacheExpire = time.Second 3926 fcfg.SyncInterval = time.Second 3927 cfg := StreamConfig{Name: "zzz", Subjects: []string{"kv.>"}, Storage: FileStorage, MaxMsgsPer: 2} 3928 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 3929 require_NoError(t, err) 3930 defer fs.Stop() 3931 3932 for i := 1; i <= 100; i++ { 3933 subj := fmt.Sprintf("kv.foo.%d", i) 3934 _, _, err := fs.StoreMsg(subj, nil, []byte("value")) 3935 require_NoError(t, err) 3936 } 3937 for i := 1; i <= 100; i++ { 3938 subj := fmt.Sprintf("kv.bar.%d", i) 3939 _, _, err := fs.StoreMsg(subj, nil, []byte("value")) 3940 require_NoError(t, err) 3941 } 3942 3943 // Make sure we clear mb fss meta before asking for SubjectState. 3944 checkFor(t, 10*time.Second, 500*time.Millisecond, func() error { 3945 if _, hasAnyFSS := fs.reportMeta(); hasAnyFSS { 3946 return fmt.Errorf("Still have mb fss state") 3947 } 3948 return nil 3949 }) 3950 3951 if fss := fs.SubjectsState("kv.bar.>"); len(fss) != 100 { 3952 t.Fatalf("Expected 100 entries but got %d", len(fss)) 3953 } 3954 3955 fss := fs.SubjectsState("kv.bar.99") 3956 if len(fss) != 1 { 3957 t.Fatalf("Expected 1 entry but got %d", len(fss)) 3958 } 3959 expected := SimpleState{Msgs: 1, First: 199, Last: 199} 3960 if ss := fss["kv.bar.99"]; ss != expected { 3961 t.Fatalf("Bad subject state, expected %+v but got %+v", expected, ss) 3962 } 3963 3964 // Now add one to end and check as well for non-wildcard. 3965 _, _, err = fs.StoreMsg("kv.foo.1", nil, []byte("value22")) 3966 require_NoError(t, err) 3967 3968 if state := fs.State(); state.Msgs != 201 { 3969 t.Fatalf("Expected 201 msgs but got %+v", state) 3970 } 3971 3972 fss = fs.SubjectsState("kv.foo.1") 3973 if len(fss) != 1 { 3974 t.Fatalf("Expected 1 entry but got %d", len(fss)) 3975 } 3976 expected = SimpleState{Msgs: 2, First: 1, Last: 201} 3977 if ss := fss["kv.foo.1"]; ss != expected { 3978 t.Fatalf("Bad subject state, expected %+v but got %+v", expected, ss) 3979 } 3980 }) 3981 } 3982 3983 func TestFileStoreEncrypted(t *testing.T) { 3984 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 3985 created := time.Now() 3986 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 3987 require_NoError(t, err) 3988 defer fs.Stop() 3989 3990 subj, msg := "foo", []byte("aes ftw") 3991 for i := 0; i < 50; i++ { 3992 fs.StoreMsg(subj, nil, msg) 3993 } 3994 3995 o, err := fs.ConsumerStore("o22", &ConsumerConfig{}) 3996 require_NoError(t, err) 3997 3998 state := &ConsumerState{} 3999 state.Delivered.Consumer = 22 4000 state.Delivered.Stream = 22 4001 state.AckFloor.Consumer = 11 4002 state.AckFloor.Stream = 11 4003 err = o.Update(state) 4004 require_NoError(t, err) 4005 4006 fs.Stop() 4007 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 4008 require_NoError(t, err) 4009 defer fs.Stop() 4010 4011 // Now make sure we can read. 4012 var smv StoreMsg 4013 sm, err := fs.LoadMsg(10, &smv) 4014 require_NoError(t, err) 4015 require_True(t, string(sm.msg) == "aes ftw") 4016 4017 o, err = fs.ConsumerStore("o22", &ConsumerConfig{}) 4018 require_NoError(t, err) 4019 rstate, err := o.State() 4020 require_NoError(t, err) 4021 4022 if rstate.Delivered != state.Delivered || rstate.AckFloor != state.AckFloor { 4023 t.Fatalf("Bad recovered consumer state, expected %+v got %+v", state, rstate) 4024 } 4025 }) 4026 } 4027 4028 // Make sure we do not go through block loads when we know no subjects will exists, e.g. raft. 4029 func TestFileStoreNoFSSWhenNoSubjects(t *testing.T) { 4030 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4031 created := time.Now() 4032 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 4033 require_NoError(t, err) 4034 defer fs.Stop() 4035 4036 n, msg := 100, []byte("raft state") 4037 for i := 0; i < n; i++ { 4038 _, _, err := fs.StoreMsg(_EMPTY_, nil, msg) 4039 require_NoError(t, err) 4040 } 4041 4042 state := fs.State() 4043 require_True(t, state.Msgs == uint64(n)) 4044 4045 fs.Stop() 4046 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, created, prf(&fcfg), nil) 4047 require_NoError(t, err) 4048 defer fs.Stop() 4049 4050 // Make sure we did not load the block trying to generate fss. 4051 fs.mu.RLock() 4052 mb := fs.blks[0] 4053 fs.mu.RUnlock() 4054 4055 mb.mu.Lock() 4056 defer mb.mu.Unlock() 4057 4058 if mb.cloads > 0 { 4059 t.Fatalf("Expected no cache loads but got %d", mb.cloads) 4060 } 4061 if mb.fss != nil { 4062 t.Fatalf("Expected fss to be nil") 4063 } 4064 }) 4065 } 4066 4067 func TestFileStoreNoFSSBugAfterRemoveFirst(t *testing.T) { 4068 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4069 fcfg.BlockSize = 8 * 1024 * 1024 4070 fcfg.CacheExpire = 200 * time.Millisecond 4071 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo.bar.*"}, Storage: FileStorage} 4072 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4073 require_NoError(t, err) 4074 defer fs.Stop() 4075 4076 n, msg := 100, bytes.Repeat([]byte("ZZZ"), 33) // ~100bytes 4077 for i := 0; i < n; i++ { 4078 subj := fmt.Sprintf("foo.bar.%d", i) 4079 _, _, err := fs.StoreMsg(subj, nil, msg) 4080 require_NoError(t, err) 4081 } 4082 4083 state := fs.State() 4084 require_True(t, state.Msgs == uint64(n)) 4085 4086 // Let fss expire. 4087 time.Sleep(250 * time.Millisecond) 4088 4089 _, err = fs.RemoveMsg(1) 4090 require_NoError(t, err) 4091 4092 sm, _, err := fs.LoadNextMsg("foo.>", true, 1, nil) 4093 require_NoError(t, err) 4094 require_True(t, sm.subj == "foo.bar.1") 4095 4096 // Make sure mb.fss does not have the entry for foo.bar.0 4097 fs.mu.Lock() 4098 mb := fs.blks[0] 4099 fs.mu.Unlock() 4100 mb.mu.RLock() 4101 ss := mb.fss["foo.bar.0"] 4102 mb.mu.RUnlock() 4103 4104 if ss != nil { 4105 t.Fatalf("Expected no state for %q, but got %+v\n", "foo.bar.0", ss) 4106 } 4107 }) 4108 } 4109 4110 // NOTE: We do not use fss files anymore, but leaving test in place. 4111 func TestFileStoreNoFSSAfterRecover(t *testing.T) { 4112 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4113 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage} 4114 created := time.Now() 4115 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 4116 require_NoError(t, err) 4117 defer fs.Stop() 4118 4119 n, msg := 100, []byte("no fss for you!") 4120 for i := 0; i < n; i++ { 4121 _, _, err := fs.StoreMsg(_EMPTY_, nil, msg) 4122 require_NoError(t, err) 4123 } 4124 4125 state := fs.State() 4126 require_True(t, state.Msgs == uint64(n)) 4127 4128 fs.Stop() 4129 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 4130 require_NoError(t, err) 4131 defer fs.Stop() 4132 4133 // Make sure we did not load the block trying to generate fss. 4134 fs.mu.RLock() 4135 mb := fs.blks[0] 4136 fs.mu.RUnlock() 4137 4138 mb.mu.Lock() 4139 defer mb.mu.Unlock() 4140 4141 if mb.fss != nil { 4142 t.Fatalf("Expected no fss post recover") 4143 } 4144 }) 4145 } 4146 4147 func TestFileStoreFSSCloseAndKeepOnExpireOnRecoverBug(t *testing.T) { 4148 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4149 ttl := 100 * time.Millisecond 4150 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage, MaxAge: ttl} 4151 created := time.Now() 4152 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 4153 require_NoError(t, err) 4154 defer fs.Stop() 4155 4156 _, _, err = fs.StoreMsg("foo", nil, nil) 4157 require_NoError(t, err) 4158 4159 fs.Stop() 4160 time.Sleep(2 * ttl) 4161 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 4162 require_NoError(t, err) 4163 defer fs.Stop() 4164 4165 if state := fs.State(); state.NumSubjects != 0 { 4166 t.Fatalf("Expected no subjects with no messages, got %d", state.NumSubjects) 4167 } 4168 }) 4169 } 4170 4171 func TestFileStoreExpireOnRecoverSubjectAccounting(t *testing.T) { 4172 const msgLen = 19 4173 msg := bytes.Repeat([]byte("A"), msgLen) 4174 4175 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4176 fcfg.BlockSize = 100 4177 ttl := 200 * time.Millisecond 4178 cfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage, MaxAge: ttl} 4179 created := time.Now() 4180 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 4181 require_NoError(t, err) 4182 defer fs.Stop() 4183 4184 // These are in first block. 4185 fs.StoreMsg("A", nil, msg) 4186 fs.StoreMsg("B", nil, msg) 4187 time.Sleep(ttl / 2) 4188 // This one in 2nd block. 4189 fs.StoreMsg("C", nil, msg) 4190 4191 fs.Stop() 4192 time.Sleep(ttl/2 + 10*time.Millisecond) 4193 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 4194 require_NoError(t, err) 4195 defer fs.Stop() 4196 4197 // Make sure we take into account PSIM when throwing a whole block away. 4198 if state := fs.State(); state.NumSubjects != 1 { 4199 t.Fatalf("Expected 1 subject, got %d", state.NumSubjects) 4200 } 4201 }) 4202 } 4203 4204 func TestFileStoreFSSExpireNumPendingBug(t *testing.T) { 4205 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4206 cexp := 100 * time.Millisecond 4207 fcfg.CacheExpire = cexp 4208 cfg := StreamConfig{Name: "zzz", Subjects: []string{"KV.>"}, MaxMsgsPer: 1, Storage: FileStorage} 4209 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4210 require_NoError(t, err) 4211 defer fs.Stop() 4212 4213 // Let FSS meta expire. 4214 time.Sleep(2 * cexp) 4215 4216 _, _, err = fs.StoreMsg("KV.X", nil, []byte("Y")) 4217 require_NoError(t, err) 4218 4219 if fss := fs.FilteredState(1, "KV.X"); fss.Msgs != 1 { 4220 t.Fatalf("Expected only 1 msg, got %d", fss.Msgs) 4221 } 4222 }) 4223 } 4224 4225 // https://github.com/nats-io/nats-server/issues/3484 4226 func TestFileStoreFilteredFirstMatchingBug(t *testing.T) { 4227 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4228 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo.>"}, Storage: FileStorage} 4229 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4230 require_NoError(t, err) 4231 defer fs.Stop() 4232 4233 _, _, err = fs.StoreMsg("foo.foo", nil, []byte("A")) 4234 require_NoError(t, err) 4235 4236 _, _, err = fs.StoreMsg("foo.foo", nil, []byte("B")) 4237 require_NoError(t, err) 4238 4239 _, _, err = fs.StoreMsg("foo.foo", nil, []byte("C")) 4240 require_NoError(t, err) 4241 4242 fs.mu.RLock() 4243 mb := fs.lmb 4244 fs.mu.RUnlock() 4245 4246 mb.mu.Lock() 4247 // Simulate swapping out the fss state and reading it back in with only one subject 4248 // present in the block. 4249 if mb.fss != nil { 4250 mb.fss = nil 4251 } 4252 // Now load info back in. 4253 mb.generatePerSubjectInfo() 4254 mb.mu.Unlock() 4255 4256 // Now add in a different subject. 4257 _, _, err = fs.StoreMsg("foo.bar", nil, []byte("X")) 4258 require_NoError(t, err) 4259 4260 // Now see if a filtered load would incorrectly succeed. 4261 sm, _, err := fs.LoadNextMsg("foo.foo", false, 4, nil) 4262 if err == nil || sm != nil { 4263 t.Fatalf("Loaded filtered message with wrong subject, wanted %q got %q", "foo.foo", sm.subj) 4264 } 4265 }) 4266 } 4267 4268 func TestFileStoreOutOfSpaceRebuildState(t *testing.T) { 4269 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4270 cfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage} 4271 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4272 require_NoError(t, err) 4273 defer fs.Stop() 4274 4275 _, _, err = fs.StoreMsg("foo", nil, []byte("A")) 4276 require_NoError(t, err) 4277 4278 _, _, err = fs.StoreMsg("bar", nil, []byte("B")) 4279 require_NoError(t, err) 4280 4281 // Grab state. 4282 state := fs.State() 4283 ss := fs.SubjectsState(">") 4284 4285 // Set mock out of space error to trip. 4286 fs.mu.RLock() 4287 mb := fs.lmb 4288 fs.mu.RUnlock() 4289 4290 mb.mu.Lock() 4291 mb.mockWriteErr = true 4292 mb.mu.Unlock() 4293 4294 _, _, err = fs.StoreMsg("baz", nil, []byte("C")) 4295 require_Error(t, err, errors.New("mock write error")) 4296 4297 nstate := fs.State() 4298 nss := fs.SubjectsState(">") 4299 4300 if !reflect.DeepEqual(state, nstate) { 4301 t.Fatalf("State expected to be\n %+v\nvs\n %+v", state, nstate) 4302 } 4303 4304 if !reflect.DeepEqual(ss, nss) { 4305 t.Fatalf("Subject state expected to be\n %+v\nvs\n %+v", ss, nss) 4306 } 4307 }) 4308 } 4309 4310 func TestFileStoreRebuildStateProperlyWithMaxMsgsPerSubject(t *testing.T) { 4311 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4312 fcfg.BlockSize = 4096 4313 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo", "bar", "baz"}, Storage: FileStorage, MaxMsgsPer: 1} 4314 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4315 require_NoError(t, err) 4316 defer fs.Stop() 4317 4318 // Send one to baz at beginning. 4319 _, _, err = fs.StoreMsg("baz", nil, nil) 4320 require_NoError(t, err) 4321 4322 ns := 1000 4323 for i := 1; i <= ns; i++ { 4324 _, _, err := fs.StoreMsg("foo", nil, nil) 4325 require_NoError(t, err) 4326 _, _, err = fs.StoreMsg("bar", nil, nil) 4327 require_NoError(t, err) 4328 } 4329 4330 var ss StreamState 4331 fs.FastState(&ss) 4332 if ss.NumSubjects != 3 { 4333 t.Fatalf("Expected NumSubjects of 3, got %d", ss.NumSubjects) 4334 } 4335 if ss.Msgs != 3 { 4336 t.Fatalf("Expected NumMsgs of 3, got %d", ss.Msgs) 4337 } 4338 }) 4339 } 4340 4341 func TestFileStoreUpdateMaxMsgsPerSubject(t *testing.T) { 4342 cfg := StreamConfig{ 4343 Name: "TEST", 4344 Storage: FileStorage, 4345 Subjects: []string{"foo"}, 4346 MaxMsgsPer: 10, 4347 } 4348 4349 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4350 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4351 require_NoError(t, err) 4352 defer fs.Stop() 4353 4354 // Make sure this is honored on an update. 4355 cfg.MaxMsgsPer = 50 4356 err = fs.UpdateConfig(&cfg) 4357 require_NoError(t, err) 4358 4359 numStored := 22 4360 for i := 0; i < numStored; i++ { 4361 _, _, err = fs.StoreMsg("foo", nil, nil) 4362 require_NoError(t, err) 4363 } 4364 4365 ss := fs.SubjectsState("foo")["foo"] 4366 if ss.Msgs != uint64(numStored) { 4367 t.Fatalf("Expected to have %d stored, got %d", numStored, ss.Msgs) 4368 } 4369 4370 // Now make sure we trunk if setting to lower value. 4371 cfg.MaxMsgsPer = 10 4372 err = fs.UpdateConfig(&cfg) 4373 require_NoError(t, err) 4374 4375 ss = fs.SubjectsState("foo")["foo"] 4376 if ss.Msgs != 10 { 4377 t.Fatalf("Expected to have %d stored, got %d", 10, ss.Msgs) 4378 } 4379 }) 4380 } 4381 4382 func TestFileStoreBadFirstAndFailedExpireAfterRestart(t *testing.T) { 4383 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4384 fcfg.BlockSize = 256 4385 ttl := time.Second 4386 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage, MaxAge: ttl} 4387 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4388 require_NoError(t, err) 4389 defer fs.Stop() 4390 4391 // With block size of 256 and subject and message below, seq 8 starts new block. 4392 // Will double check and fail test if not the case since test depends on this. 4393 subj, msg := "foo", []byte("ZZ") 4394 // These are all instant and will expire after 1 sec. 4395 start := time.Now() 4396 for i := 0; i < 7; i++ { 4397 _, _, err := fs.StoreMsg(subj, nil, msg) 4398 require_NoError(t, err) 4399 } 4400 4401 // Put two more after a delay. 4402 time.Sleep(1500 * time.Millisecond) 4403 seq, _, err := fs.StoreMsg(subj, nil, msg) 4404 require_NoError(t, err) 4405 _, _, err = fs.StoreMsg(subj, nil, msg) 4406 require_NoError(t, err) 4407 4408 // Make sure that sequence 8 is first in second block, and break test if that is not true. 4409 fs.mu.RLock() 4410 lmb := fs.lmb 4411 fs.mu.RUnlock() 4412 lmb.mu.RLock() 4413 first := lmb.first.seq 4414 lmb.mu.RUnlock() 4415 require_True(t, first == 8) 4416 4417 // Instantly remove first one from second block. 4418 // On restart this will trigger expire on recover which will set fs.FirstSeq to the deleted one. 4419 fs.RemoveMsg(seq) 4420 4421 // Stop the filstore and wait til first block expires. 4422 fs.Stop() 4423 time.Sleep(ttl - time.Since(start) + (time.Second)) 4424 fs, err = newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4425 require_NoError(t, err) 4426 defer fs.Stop() 4427 4428 // Check that state is correct for first message which should be 9 and have a proper timestamp. 4429 var state StreamState 4430 fs.FastState(&state) 4431 ts := state.FirstTime 4432 require_True(t, state.Msgs == 1) 4433 require_True(t, state.FirstSeq == 9) 4434 require_True(t, !state.FirstTime.IsZero()) 4435 4436 // Wait and make sure expire timer is still working properly. 4437 time.Sleep(2 * ttl) 4438 fs.FastState(&state) 4439 require_Equal(t, state.Msgs, 0) 4440 require_Equal(t, state.FirstSeq, 10) 4441 require_Equal(t, state.LastSeq, 9) 4442 require_Equal(t, state.LastTime, ts) 4443 }) 4444 } 4445 4446 func TestFileStoreCompactAllWithDanglingLMB(t *testing.T) { 4447 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4448 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage} 4449 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4450 require_NoError(t, err) 4451 defer fs.Stop() 4452 4453 subj, msg := "foo", []byte("ZZ") 4454 for i := 0; i < 100; i++ { 4455 _, _, err := fs.StoreMsg(subj, nil, msg) 4456 require_NoError(t, err) 4457 } 4458 4459 fs.RemoveMsg(100) 4460 purged, err := fs.Compact(100) 4461 require_NoError(t, err) 4462 require_True(t, purged == 99) 4463 4464 _, _, err = fs.StoreMsg(subj, nil, msg) 4465 require_NoError(t, err) 4466 }) 4467 } 4468 4469 func TestFileStoreStateWithBlkFirstDeleted(t *testing.T) { 4470 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4471 fcfg.BlockSize = 4096 4472 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage} 4473 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4474 require_NoError(t, err) 4475 defer fs.Stop() 4476 4477 subj, msg := "foo", []byte("Hello World") 4478 toStore := 500 4479 for i := 0; i < toStore; i++ { 4480 _, _, err := fs.StoreMsg(subj, nil, msg) 4481 require_NoError(t, err) 4482 } 4483 4484 // Delete some messages from the beginning of an interior block. 4485 fs.mu.RLock() 4486 require_True(t, len(fs.blks) > 2) 4487 fseq := fs.blks[1].first.seq 4488 fs.mu.RUnlock() 4489 4490 // Now start from first seq of second blk and delete 10 msgs 4491 for seq := fseq; seq < fseq+10; seq++ { 4492 removed, err := fs.RemoveMsg(seq) 4493 require_NoError(t, err) 4494 require_True(t, removed) 4495 } 4496 4497 // This bug was in normal detailed state. But check fast state too. 4498 var fstate StreamState 4499 fs.FastState(&fstate) 4500 require_True(t, fstate.NumDeleted == 10) 4501 state := fs.State() 4502 require_True(t, state.NumDeleted == 10) 4503 }) 4504 } 4505 4506 func TestFileStoreMsgBlkFailOnKernelFaultLostDataReporting(t *testing.T) { 4507 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4508 fcfg.BlockSize = 4096 4509 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage} 4510 created := time.Now() 4511 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 4512 require_NoError(t, err) 4513 defer fs.Stop() 4514 4515 subj, msg := "foo", []byte("Hello World") 4516 toStore := 500 4517 for i := 0; i < toStore; i++ { 4518 _, _, err := fs.StoreMsg(subj, nil, msg) 4519 require_NoError(t, err) 4520 } 4521 4522 // We want to make sure all of the scenarios report lost data properly. 4523 // Will run 3 scenarios, 1st block, last block, interior block. 4524 // The new system does not detect byzantine behavior by default on creating the store. 4525 // A LoadMsg() of checkMsgs() call will be needed now. 4526 4527 // First block 4528 fs.mu.RLock() 4529 require_True(t, len(fs.blks) > 0) 4530 mfn := fs.blks[0].mfn 4531 fs.mu.RUnlock() 4532 4533 fs.Stop() 4534 4535 require_NoError(t, os.Remove(mfn)) 4536 4537 // Restart. 4538 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 4539 require_NoError(t, err) 4540 defer fs.Stop() 4541 4542 _, err = fs.LoadMsg(1, nil) 4543 require_Error(t, err, errNoBlkData) 4544 4545 // Load will rebuild fs itself async.. 4546 checkFor(t, time.Second, 50*time.Millisecond, func() error { 4547 if state := fs.State(); state.Lost != nil { 4548 return nil 4549 } 4550 return errors.New("no ld yet") 4551 }) 4552 4553 state := fs.State() 4554 require_True(t, state.FirstSeq == 94) 4555 require_True(t, state.Lost != nil) 4556 require_True(t, len(state.Lost.Msgs) == 93) 4557 4558 // Last block 4559 fs.mu.RLock() 4560 require_True(t, len(fs.blks) > 0) 4561 require_True(t, fs.lmb != nil) 4562 mfn = fs.lmb.mfn 4563 fs.mu.RUnlock() 4564 4565 fs.Stop() 4566 4567 require_NoError(t, os.Remove(mfn)) 4568 4569 // Restart. 4570 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 4571 require_NoError(t, err) 4572 defer fs.Stop() 4573 4574 state = fs.State() 4575 require_True(t, state.FirstSeq == 94) 4576 require_True(t, state.LastSeq == 500) // Make sure we do not lose last seq. 4577 require_True(t, state.NumDeleted == 35) // These are interiors 4578 require_True(t, state.Lost != nil) 4579 require_True(t, len(state.Lost.Msgs) == 35) 4580 4581 // Interior block. 4582 fs.mu.RLock() 4583 require_True(t, len(fs.blks) > 3) 4584 mfn = fs.blks[len(fs.blks)-3].mfn 4585 fs.mu.RUnlock() 4586 4587 fs.Stop() 4588 4589 require_NoError(t, os.Remove(mfn)) 4590 4591 // Restart. 4592 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 4593 require_NoError(t, err) 4594 defer fs.Stop() 4595 4596 // Need checkMsgs to catch interior one. 4597 require_True(t, fs.checkMsgs() != nil) 4598 4599 state = fs.State() 4600 require_True(t, state.FirstSeq == 94) 4601 require_True(t, state.LastSeq == 500) // Make sure we do not lose last seq. 4602 require_True(t, state.NumDeleted == 128) 4603 require_True(t, state.Lost != nil) 4604 require_True(t, len(state.Lost.Msgs) == 93) 4605 }) 4606 } 4607 4608 func TestFileStoreAllFilteredStateWithDeleted(t *testing.T) { 4609 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4610 fcfg.BlockSize = 1024 4611 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage} 4612 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4613 require_NoError(t, err) 4614 defer fs.Stop() 4615 4616 subj, msg := "foo", []byte("Hello World") 4617 for i := 0; i < 100; i++ { 4618 _, _, err := fs.StoreMsg(subj, nil, msg) 4619 require_NoError(t, err) 4620 } 4621 4622 remove := func(seqs ...uint64) { 4623 for _, seq := range seqs { 4624 ok, err := fs.RemoveMsg(seq) 4625 require_NoError(t, err) 4626 require_True(t, ok) 4627 } 4628 } 4629 4630 checkFilteredState := func(start, msgs, first, last int) { 4631 fss := fs.FilteredState(uint64(start), _EMPTY_) 4632 if fss.Msgs != uint64(msgs) { 4633 t.Fatalf("Expected %d msgs, got %d", msgs, fss.Msgs) 4634 } 4635 if fss.First != uint64(first) { 4636 t.Fatalf("Expected %d to be first, got %d", first, fss.First) 4637 } 4638 if fss.Last != uint64(last) { 4639 t.Fatalf("Expected %d to be last, got %d", last, fss.Last) 4640 } 4641 } 4642 4643 checkFilteredState(1, 100, 1, 100) 4644 remove(2) 4645 checkFilteredState(2, 98, 3, 100) 4646 remove(3, 4, 5) 4647 checkFilteredState(2, 95, 6, 100) 4648 checkFilteredState(6, 95, 6, 100) 4649 remove(8, 10, 12, 14, 16, 18) 4650 checkFilteredState(7, 88, 7, 100) 4651 4652 // Now check when purged that we return first and last sequences properly. 4653 fs.Purge() 4654 checkFilteredState(0, 0, 101, 100) 4655 }) 4656 } 4657 4658 func TestFileStoreStreamTruncateResetMultiBlock(t *testing.T) { 4659 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4660 fcfg.BlockSize = 128 4661 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage} 4662 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4663 require_NoError(t, err) 4664 defer fs.Stop() 4665 4666 subj, msg := "foo", []byte("Hello World") 4667 for i := 0; i < 1000; i++ { 4668 _, _, err := fs.StoreMsg(subj, nil, msg) 4669 require_NoError(t, err) 4670 } 4671 fs.syncBlocks() 4672 require_True(t, fs.numMsgBlocks() == 500) 4673 4674 // Reset everything 4675 require_NoError(t, fs.Truncate(0)) 4676 require_True(t, fs.numMsgBlocks() == 0) 4677 4678 state := fs.State() 4679 require_Equal(t, state.Msgs, 0) 4680 require_Equal(t, state.Bytes, 0) 4681 require_Equal(t, state.FirstSeq, 0) 4682 require_Equal(t, state.LastSeq, 0) 4683 require_Equal(t, state.NumSubjects, 0) 4684 require_Equal(t, state.NumDeleted, 0) 4685 4686 for i := 0; i < 1000; i++ { 4687 _, _, err := fs.StoreMsg(subj, nil, msg) 4688 require_NoError(t, err) 4689 } 4690 fs.syncBlocks() 4691 4692 state = fs.State() 4693 require_Equal(t, state.Msgs, 1000) 4694 require_Equal(t, state.Bytes, 44000) 4695 require_Equal(t, state.FirstSeq, 1) 4696 require_Equal(t, state.LastSeq, 1000) 4697 require_Equal(t, state.NumSubjects, 1) 4698 require_Equal(t, state.NumDeleted, 0) 4699 }) 4700 } 4701 4702 func TestFileStoreStreamCompactMultiBlockSubjectInfo(t *testing.T) { 4703 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4704 fcfg.BlockSize = 128 4705 cfg := StreamConfig{Name: "zzz", Subjects: []string{"foo.*"}, Storage: FileStorage} 4706 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4707 require_NoError(t, err) 4708 defer fs.Stop() 4709 4710 for i := 0; i < 1000; i++ { 4711 subj := fmt.Sprintf("foo.%d", i) 4712 _, _, err := fs.StoreMsg(subj, nil, []byte("Hello World")) 4713 require_NoError(t, err) 4714 } 4715 require_True(t, fs.numMsgBlocks() == 500) 4716 4717 // Compact such that we know we throw blocks away from the beginning. 4718 deleted, err := fs.Compact(501) 4719 require_NoError(t, err) 4720 require_True(t, deleted == 500) 4721 require_True(t, fs.numMsgBlocks() == 250) 4722 4723 // Make sure we adjusted for subjects etc. 4724 state := fs.State() 4725 require_True(t, state.NumSubjects == 500) 4726 }) 4727 } 4728 4729 func TestFileStoreSubjectsTotals(t *testing.T) { 4730 // No need for all permutations here. 4731 storeDir := t.TempDir() 4732 fcfg := FileStoreConfig{StoreDir: storeDir} 4733 fs, err := newFileStore(fcfg, StreamConfig{Name: "zzz", Subjects: []string{"*.*"}, Storage: FileStorage}) 4734 require_NoError(t, err) 4735 defer fs.Stop() 4736 4737 fmap := make(map[int]int) 4738 bmap := make(map[int]int) 4739 4740 var m map[int]int 4741 var ft string 4742 4743 for i := 0; i < 10_000; i++ { 4744 // Flip coin for prefix 4745 if rand.Intn(2) == 0 { 4746 ft, m = "foo", fmap 4747 } else { 4748 ft, m = "bar", bmap 4749 } 4750 dt := rand.Intn(100) 4751 subj := fmt.Sprintf("%s.%d", ft, dt) 4752 m[dt]++ 4753 4754 _, _, err := fs.StoreMsg(subj, nil, []byte("Hello World")) 4755 require_NoError(t, err) 4756 } 4757 4758 // Now test SubjectsTotal 4759 for dt, total := range fmap { 4760 subj := fmt.Sprintf("foo.%d", dt) 4761 m := fs.SubjectsTotals(subj) 4762 if m[subj] != uint64(total) { 4763 t.Fatalf("Expected %q to have %d total, got %d", subj, total, m[subj]) 4764 } 4765 } 4766 4767 // Check fmap. 4768 if st := fs.SubjectsTotals("foo.*"); len(st) != len(fmap) { 4769 t.Fatalf("Expected %d subjects for %q, got %d", len(fmap), "foo.*", len(st)) 4770 } else { 4771 expected := 0 4772 for _, n := range fmap { 4773 expected += n 4774 } 4775 received := uint64(0) 4776 for _, n := range st { 4777 received += n 4778 } 4779 if received != uint64(expected) { 4780 t.Fatalf("Expected %d total but got %d", expected, received) 4781 } 4782 } 4783 4784 // Check bmap. 4785 if st := fs.SubjectsTotals("bar.*"); len(st) != len(bmap) { 4786 t.Fatalf("Expected %d subjects for %q, got %d", len(bmap), "bar.*", len(st)) 4787 } else { 4788 expected := 0 4789 for _, n := range bmap { 4790 expected += n 4791 } 4792 received := uint64(0) 4793 for _, n := range st { 4794 received += n 4795 } 4796 if received != uint64(expected) { 4797 t.Fatalf("Expected %d total but got %d", expected, received) 4798 } 4799 } 4800 4801 // All with pwc match. 4802 if st, expected := fs.SubjectsTotals("*.*"), len(bmap)+len(fmap); len(st) != expected { 4803 t.Fatalf("Expected %d subjects for %q, got %d", expected, "*.*", len(st)) 4804 } 4805 } 4806 4807 func TestFileStoreConsumerStoreEncodeAfterRestart(t *testing.T) { 4808 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4809 fs, err := newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 4810 require_NoError(t, err) 4811 defer fs.Stop() 4812 4813 o, err := fs.ConsumerStore("o22", &ConsumerConfig{AckPolicy: AckExplicit}) 4814 require_NoError(t, err) 4815 4816 state := &ConsumerState{} 4817 state.Delivered.Consumer = 22 4818 state.Delivered.Stream = 22 4819 state.AckFloor.Consumer = 11 4820 state.AckFloor.Stream = 11 4821 err = o.Update(state) 4822 require_NoError(t, err) 4823 4824 fs.Stop() 4825 4826 fs, err = newFileStoreWithCreated(fcfg, StreamConfig{Name: "zzz", Storage: FileStorage}, time.Now(), prf(&fcfg), nil) 4827 require_NoError(t, err) 4828 defer fs.Stop() 4829 4830 o, err = fs.ConsumerStore("o22", &ConsumerConfig{AckPolicy: AckExplicit}) 4831 require_NoError(t, err) 4832 4833 if o.(*consumerFileStore).state.Delivered != state.Delivered { 4834 t.Fatalf("Consumer state is wrong %+v vs %+v", o.(*consumerFileStore).state, state) 4835 } 4836 if o.(*consumerFileStore).state.AckFloor != state.AckFloor { 4837 t.Fatalf("Consumer state is wrong %+v vs %+v", o.(*consumerFileStore).state, state) 4838 } 4839 }) 4840 } 4841 4842 func TestFileStoreNumPendingLargeNumBlks(t *testing.T) { 4843 // No need for all permutations here. 4844 storeDir := t.TempDir() 4845 fcfg := FileStoreConfig{ 4846 StoreDir: storeDir, 4847 BlockSize: 128, // Small on purpose to create alot of blks. 4848 } 4849 fs, err := newFileStore(fcfg, StreamConfig{Name: "zzz", Subjects: []string{"zzz"}, Storage: FileStorage}) 4850 require_NoError(t, err) 4851 defer fs.Stop() 4852 4853 subj, msg := "zzz", bytes.Repeat([]byte("X"), 100) 4854 numMsgs := 10_000 4855 4856 for i := 0; i < numMsgs; i++ { 4857 fs.StoreMsg(subj, nil, msg) 4858 } 4859 4860 start := time.Now() 4861 total, _ := fs.NumPending(4000, "zzz", false) 4862 require_LessThan(t, time.Since(start), 15*time.Millisecond) 4863 require_Equal(t, total, 6001) 4864 4865 start = time.Now() 4866 total, _ = fs.NumPending(6000, "zzz", false) 4867 require_LessThan(t, time.Since(start), 25*time.Millisecond) 4868 require_Equal(t, total, 4001) 4869 4870 // Now delete a message in first half and second half. 4871 fs.RemoveMsg(1000) 4872 fs.RemoveMsg(9000) 4873 4874 start = time.Now() 4875 total, _ = fs.NumPending(4000, "zzz", false) 4876 require_LessThan(t, time.Since(start), 50*time.Millisecond) 4877 require_Equal(t, total, 6000) 4878 4879 start = time.Now() 4880 total, _ = fs.NumPending(6000, "zzz", false) 4881 require_LessThan(t, time.Since(start), 50*time.Millisecond) 4882 require_Equal(t, total, 4000) 4883 } 4884 4885 func TestFileStoreSkipMsgAndNumBlocks(t *testing.T) { 4886 // No need for all permutations here. 4887 storeDir := t.TempDir() 4888 fcfg := FileStoreConfig{ 4889 StoreDir: storeDir, 4890 BlockSize: 128, // Small on purpose to create alot of blks. 4891 } 4892 fs, err := newFileStore(fcfg, StreamConfig{Name: "zzz", Subjects: []string{"zzz"}, Storage: FileStorage}) 4893 require_NoError(t, err) 4894 defer fs.Stop() 4895 4896 subj, msg := "zzz", bytes.Repeat([]byte("X"), 100) 4897 numMsgs := 10_000 4898 4899 fs.StoreMsg(subj, nil, msg) 4900 for i := 0; i < numMsgs; i++ { 4901 fs.SkipMsg() 4902 } 4903 fs.StoreMsg(subj, nil, msg) 4904 require_True(t, fs.numMsgBlocks() == 2) 4905 } 4906 4907 func TestFileStoreRestoreEncryptedWithNoKeyFuncFails(t *testing.T) { 4908 // No need for all permutations here. 4909 fcfg := FileStoreConfig{StoreDir: t.TempDir(), Cipher: AES} 4910 cfg := StreamConfig{Name: "zzz", Subjects: []string{"zzz"}, Storage: FileStorage} 4911 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4912 require_NoError(t, err) 4913 defer fs.Stop() 4914 4915 subj, msg := "zzz", bytes.Repeat([]byte("X"), 100) 4916 numMsgs := 100 4917 for i := 0; i < numMsgs; i++ { 4918 fs.StoreMsg(subj, nil, msg) 4919 } 4920 4921 fs.Stop() 4922 4923 // Make sure if we try to restore with no prf (key) that it fails. 4924 _, err = newFileStoreWithCreated(fcfg, cfg, time.Now(), nil, nil) 4925 require_Error(t, err, errNoMainKey) 4926 } 4927 4928 func TestFileStoreInitialFirstSeq(t *testing.T) { 4929 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 4930 cfg := StreamConfig{Name: "zzz", Storage: FileStorage, FirstSeq: 1000} 4931 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 4932 require_NoError(t, err) 4933 defer fs.Stop() 4934 4935 seq, _, err := fs.StoreMsg("A", nil, []byte("OK")) 4936 require_NoError(t, err) 4937 if seq != 1000 { 4938 t.Fatalf("Message should have been sequence 1000 but was %d", seq) 4939 } 4940 4941 seq, _, err = fs.StoreMsg("B", nil, []byte("OK")) 4942 require_NoError(t, err) 4943 if seq != 1001 { 4944 t.Fatalf("Message should have been sequence 1001 but was %d", seq) 4945 } 4946 4947 var state StreamState 4948 fs.FastState(&state) 4949 switch { 4950 case state.Msgs != 2: 4951 t.Fatalf("Expected 2 messages, got %d", state.Msgs) 4952 case state.FirstSeq != 1000: 4953 t.Fatalf("Expected first seq 1000, got %d", state.FirstSeq) 4954 case state.LastSeq != 1001: 4955 t.Fatalf("Expected last seq 1001, got %d", state.LastSeq) 4956 } 4957 }) 4958 } 4959 4960 func TestFileStoreRecaluclateFirstForSubjBug(t *testing.T) { 4961 fs, err := newFileStore(FileStoreConfig{StoreDir: t.TempDir()}, StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) 4962 require_NoError(t, err) 4963 defer fs.Stop() 4964 4965 fs.StoreMsg("foo", nil, nil) // 1 4966 fs.StoreMsg("bar", nil, nil) // 2 4967 fs.StoreMsg("foo", nil, nil) // 3 4968 4969 // Now remove first 2.. 4970 fs.RemoveMsg(1) 4971 fs.RemoveMsg(2) 4972 4973 // Now grab first (and only) block. 4974 fs.mu.RLock() 4975 mb := fs.blks[0] 4976 fs.mu.RUnlock() 4977 4978 // Since we lazy update the first, simulate that we have not updated it as of yet. 4979 ss := &SimpleState{Msgs: 1, First: 1, Last: 3, firstNeedsUpdate: true} 4980 4981 mb.mu.Lock() 4982 defer mb.mu.Unlock() 4983 4984 // Flush the cache. 4985 mb.clearCacheAndOffset() 4986 // Now call with start sequence of 1, the old one 4987 // This will panic without the fix. 4988 mb.recalculateFirstForSubj("foo", 1, ss) 4989 // Make sure it was update properly. 4990 require_True(t, *ss == SimpleState{Msgs: 1, First: 3, Last: 3, firstNeedsUpdate: false}) 4991 } 4992 4993 func TestFileStoreKeepWithDeletedMsgsBug(t *testing.T) { 4994 fs, err := newFileStore(FileStoreConfig{StoreDir: t.TempDir()}, StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) 4995 require_NoError(t, err) 4996 defer fs.Stop() 4997 4998 msg := bytes.Repeat([]byte("A"), 19) 4999 for i := 0; i < 5; i++ { 5000 fs.StoreMsg("A", nil, msg) 5001 fs.StoreMsg("B", nil, msg) 5002 } 5003 5004 n, err := fs.PurgeEx("A", 0, 0) 5005 require_NoError(t, err) 5006 require_True(t, n == 5) 5007 5008 // Purge with keep. 5009 n, err = fs.PurgeEx(_EMPTY_, 0, 2) 5010 require_NoError(t, err) 5011 require_True(t, n == 3) 5012 } 5013 5014 func TestFileStoreRestartWithExpireAndLockingBug(t *testing.T) { 5015 sd := t.TempDir() 5016 scfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage} 5017 fs, err := newFileStore(FileStoreConfig{StoreDir: sd}, scfg) 5018 require_NoError(t, err) 5019 defer fs.Stop() 5020 5021 // 20 total 5022 msg := []byte("HELLO WORLD") 5023 for i := 0; i < 10; i++ { 5024 fs.StoreMsg("A", nil, msg) 5025 fs.StoreMsg("B", nil, msg) 5026 } 5027 fs.Stop() 5028 5029 // Now change config underneath of so we will do expires at startup. 5030 scfg.MaxMsgs = 15 5031 scfg.MaxMsgsPer = 2 5032 newCfg := FileStreamInfo{Created: fs.cfg.Created, StreamConfig: scfg} 5033 5034 // Replace 5035 fs.cfg = newCfg 5036 require_NoError(t, fs.writeStreamMeta()) 5037 5038 fs, err = newFileStore(FileStoreConfig{StoreDir: sd}, scfg) 5039 require_NoError(t, err) 5040 defer fs.Stop() 5041 } 5042 5043 // Test that loads from lmb under lots of writes do not return errPartialCache. 5044 func TestFileStoreErrPartialLoad(t *testing.T) { 5045 fs, err := newFileStore(FileStoreConfig{StoreDir: t.TempDir()}, StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) 5046 require_NoError(t, err) 5047 defer fs.Stop() 5048 5049 put := func(num int) { 5050 for i := 0; i < num; i++ { 5051 fs.StoreMsg("Z", nil, []byte("ZZZZZZZZZZZZZ")) 5052 } 5053 } 5054 5055 put(100) 5056 5057 // Dump cache of lmb. 5058 clearCache := func() { 5059 fs.mu.RLock() 5060 lmb := fs.lmb 5061 fs.mu.RUnlock() 5062 lmb.mu.Lock() 5063 lmb.clearCache() 5064 lmb.mu.Unlock() 5065 } 5066 clearCache() 5067 5068 qch := make(chan struct{}) 5069 defer close(qch) 5070 5071 for i := 0; i < 10; i++ { 5072 go func() { 5073 for { 5074 select { 5075 case <-qch: 5076 return 5077 default: 5078 put(5) 5079 } 5080 } 5081 }() 5082 } 5083 5084 time.Sleep(100 * time.Millisecond) 5085 5086 var smv StoreMsg 5087 for i := 0; i < 10_000; i++ { 5088 fs.mu.RLock() 5089 lmb := fs.lmb 5090 fs.mu.RUnlock() 5091 lmb.mu.Lock() 5092 first, last := fs.lmb.first.seq, fs.lmb.last.seq 5093 if i%100 == 0 { 5094 lmb.clearCache() 5095 } 5096 lmb.mu.Unlock() 5097 5098 if spread := int(last - first); spread > 0 { 5099 seq := first + uint64(rand.Intn(spread)) 5100 _, err = fs.LoadMsg(seq, &smv) 5101 require_NoError(t, err) 5102 } 5103 } 5104 } 5105 5106 func TestFileStoreErrPartialLoadOnSyncClose(t *testing.T) { 5107 fs, err := newFileStore( 5108 FileStoreConfig{StoreDir: t.TempDir(), BlockSize: 500}, 5109 StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}, 5110 ) 5111 require_NoError(t, err) 5112 defer fs.Stop() 5113 5114 // This yields an internal record length of 50 bytes. So 10 msgs per blk. 5115 msgLen := 19 5116 msg := bytes.Repeat([]byte("A"), msgLen) 5117 5118 // Load up half the block. 5119 for _, subj := range []string{"A", "B", "C", "D", "E"} { 5120 fs.StoreMsg(subj, nil, msg) 5121 } 5122 5123 // Now simulate the sync timer closing the last block. 5124 fs.mu.RLock() 5125 lmb := fs.lmb 5126 fs.mu.RUnlock() 5127 require_True(t, lmb != nil) 5128 5129 lmb.mu.Lock() 5130 lmb.expireCacheLocked() 5131 lmb.dirtyCloseWithRemove(false) 5132 lmb.mu.Unlock() 5133 5134 fs.StoreMsg("Z", nil, msg) 5135 _, err = fs.LoadMsg(1, nil) 5136 require_NoError(t, err) 5137 } 5138 5139 func TestFileStoreSyncIntervals(t *testing.T) { 5140 fcfg := FileStoreConfig{StoreDir: t.TempDir(), SyncInterval: 250 * time.Millisecond} 5141 fs, err := newFileStore(fcfg, StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) 5142 require_NoError(t, err) 5143 defer fs.Stop() 5144 5145 checkSyncFlag := func(expected bool) { 5146 fs.mu.RLock() 5147 lmb := fs.lmb 5148 fs.mu.RUnlock() 5149 lmb.mu.RLock() 5150 syncNeeded := lmb.needSync 5151 lmb.mu.RUnlock() 5152 if syncNeeded != expected { 5153 t.Fatalf("Expected needSync to be %v", expected) 5154 } 5155 } 5156 5157 checkSyncFlag(false) 5158 fs.StoreMsg("Z", nil, []byte("hello")) 5159 checkSyncFlag(true) 5160 time.Sleep(400 * time.Millisecond) 5161 checkSyncFlag(false) 5162 fs.Stop() 5163 5164 // Now check always 5165 fcfg.SyncInterval = 10 * time.Second 5166 fcfg.SyncAlways = true 5167 fs, err = newFileStore(fcfg, StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) 5168 require_NoError(t, err) 5169 defer fs.Stop() 5170 5171 checkSyncFlag(false) 5172 fs.StoreMsg("Z", nil, []byte("hello")) 5173 checkSyncFlag(false) 5174 } 5175 5176 // https://github.com/nats-io/nats-server/issues/4529 5177 // Run this wuth --race and you will see the unlocked access that probably caused this. 5178 func TestFileStoreRecalcFirstSequenceBug(t *testing.T) { 5179 fcfg := FileStoreConfig{StoreDir: t.TempDir()} 5180 fs, err := newFileStore(fcfg, StreamConfig{Name: "zzz", Subjects: []string{"*"}, MaxMsgsPer: 2, Storage: FileStorage}) 5181 require_NoError(t, err) 5182 defer fs.Stop() 5183 5184 msg := bytes.Repeat([]byte("A"), 22) 5185 5186 for _, subj := range []string{"A", "A", "B", "B"} { 5187 fs.StoreMsg(subj, nil, msg) 5188 } 5189 // Make sure the buffer is cleared. 5190 clearLMBCache := func() { 5191 fs.mu.RLock() 5192 mb := fs.lmb 5193 fs.mu.RUnlock() 5194 mb.mu.Lock() 5195 mb.clearCacheAndOffset() 5196 mb.mu.Unlock() 5197 } 5198 5199 clearLMBCache() 5200 5201 // Do first here. 5202 fs.StoreMsg("A", nil, msg) 5203 5204 var wg sync.WaitGroup 5205 start := make(chan bool) 5206 5207 wg.Add(1) 5208 go func() { 5209 defer wg.Done() 5210 <-start 5211 for i := 0; i < 1_000; i++ { 5212 fs.LoadLastMsg("A", nil) 5213 clearLMBCache() 5214 } 5215 }() 5216 5217 wg.Add(1) 5218 go func() { 5219 defer wg.Done() 5220 <-start 5221 for i := 0; i < 1_000; i++ { 5222 fs.StoreMsg("A", nil, msg) 5223 } 5224 }() 5225 5226 close(start) 5227 wg.Wait() 5228 } 5229 5230 /////////////////////////////////////////////////////////////////////////// 5231 // New WAL based architecture tests 5232 /////////////////////////////////////////////////////////////////////////// 5233 5234 func TestFileStoreFullStateBasics(t *testing.T) { 5235 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 5236 fcfg.BlockSize = 100 5237 cfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage} 5238 created := time.Now() 5239 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5240 require_NoError(t, err) 5241 defer fs.Stop() 5242 5243 // This yields an internal record length of 50 bytes. So 2 msgs per blk. 5244 subj, msgLen, recLen := "A", 19, uint64(50) 5245 msgA := bytes.Repeat([]byte("A"), msgLen) 5246 msgZ := bytes.Repeat([]byte("Z"), msgLen) 5247 5248 // Send 2 msgs and stop, check for presence of our full state file. 5249 fs.StoreMsg(subj, nil, msgA) 5250 fs.StoreMsg(subj, nil, msgZ) 5251 require_True(t, fs.numMsgBlocks() == 1) 5252 5253 // Make sure there is a full state file after we do a stop. 5254 fs.Stop() 5255 5256 sfile := filepath.Join(fcfg.StoreDir, msgDir, streamStreamStateFile) 5257 if _, err := os.Stat(sfile); err != nil { 5258 t.Fatalf("Expected stream state file but got %v", err) 5259 } 5260 5261 // Read it in and make sure len > 0. 5262 buf, err := os.ReadFile(sfile) 5263 require_NoError(t, err) 5264 require_True(t, len(buf) > 0) 5265 5266 // Now make sure we recover properly. 5267 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5268 require_NoError(t, err) 5269 defer fs.Stop() 5270 5271 // Make sure there are no old idx or fss files. 5272 matches, err := filepath.Glob(filepath.Join(fcfg.StoreDir, msgDir, "%d.fss")) 5273 require_NoError(t, err) 5274 require_True(t, len(matches) == 0) 5275 matches, err = filepath.Glob(filepath.Join(fcfg.StoreDir, msgDir, "%d.idx")) 5276 require_NoError(t, err) 5277 require_True(t, len(matches) == 0) 5278 5279 state := fs.State() 5280 require_Equal(t, state.Msgs, 2) 5281 require_Equal(t, state.FirstSeq, 1) 5282 require_Equal(t, state.LastSeq, 2) 5283 5284 // Now make sure we can read in values. 5285 var smv StoreMsg 5286 sm, err := fs.LoadMsg(1, &smv) 5287 require_NoError(t, err) 5288 require_True(t, bytes.Equal(sm.msg, msgA)) 5289 5290 sm, err = fs.LoadMsg(2, &smv) 5291 require_NoError(t, err) 5292 require_True(t, bytes.Equal(sm.msg, msgZ)) 5293 5294 // Now add in 1 more here to split the lmb. 5295 fs.StoreMsg(subj, nil, msgZ) 5296 5297 // Now stop the filestore and replace the old stream state and make sure we recover correctly. 5298 fs.Stop() 5299 5300 // Regrab the stream state 5301 buf, err = os.ReadFile(sfile) 5302 require_NoError(t, err) 5303 5304 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5305 require_NoError(t, err) 5306 defer fs.Stop() 5307 5308 // Add in one more. 5309 fs.StoreMsg(subj, nil, msgZ) 5310 fs.Stop() 5311 5312 // Put old stream state back with only 3. 5313 err = os.WriteFile(sfile, buf, defaultFilePerms) 5314 require_NoError(t, err) 5315 5316 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5317 require_NoError(t, err) 5318 defer fs.Stop() 5319 5320 state = fs.State() 5321 require_Equal(t, state.Msgs, 4) 5322 require_Equal(t, state.Bytes, 4*recLen) 5323 require_Equal(t, state.FirstSeq, 1) 5324 require_Equal(t, state.LastSeq, 4) 5325 require_Equal(t, fs.numMsgBlocks(), 2) 5326 5327 // Make sure we are tracking subjects correctly. 5328 fs.mu.RLock() 5329 info, _ := fs.psim.Find(stringToBytes(subj)) 5330 psi := *info 5331 fs.mu.RUnlock() 5332 5333 require_Equal(t, psi.total, 4) 5334 require_Equal(t, psi.fblk, 1) 5335 require_Equal(t, psi.lblk, 2) 5336 5337 // Store 1 more 5338 fs.StoreMsg(subj, nil, msgA) 5339 fs.Stop() 5340 // Put old stream state back with only 3. 5341 err = os.WriteFile(sfile, buf, defaultFilePerms) 5342 require_NoError(t, err) 5343 5344 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5345 require_NoError(t, err) 5346 defer fs.Stop() 5347 5348 state = fs.State() 5349 require_Equal(t, state.Msgs, 5) 5350 require_Equal(t, state.FirstSeq, 1) 5351 require_Equal(t, state.LastSeq, 5) 5352 require_Equal(t, fs.numMsgBlocks(), 3) 5353 // Make sure we are tracking subjects correctly. 5354 fs.mu.RLock() 5355 info, _ = fs.psim.Find(stringToBytes(subj)) 5356 psi = *info 5357 fs.mu.RUnlock() 5358 require_Equal(t, psi.total, 5) 5359 require_Equal(t, psi.fblk, 1) 5360 require_Equal(t, psi.lblk, 3) 5361 }) 5362 } 5363 5364 func TestFileStoreFullStatePurge(t *testing.T) { 5365 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 5366 fcfg.BlockSize = 132 // Leave room for tombstones. 5367 cfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage} 5368 created := time.Now() 5369 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5370 require_NoError(t, err) 5371 defer fs.Stop() 5372 5373 // This yields an internal record length of 50 bytes. So 2 msgs per blk. 5374 subj, msg := "A", bytes.Repeat([]byte("A"), 19) 5375 5376 // Should be 2 per block, so 5 blocks. 5377 for i := 0; i < 10; i++ { 5378 fs.StoreMsg(subj, nil, msg) 5379 } 5380 n, err := fs.Purge() 5381 require_NoError(t, err) 5382 require_Equal(t, n, 10) 5383 state := fs.State() 5384 fs.Stop() 5385 5386 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5387 require_NoError(t, err) 5388 defer fs.Stop() 5389 5390 if newState := fs.State(); !reflect.DeepEqual(state, newState) { 5391 t.Fatalf("Restore state after purge does not match:\n%+v\n%+v", 5392 state, newState) 5393 } 5394 5395 // Add in more 10 more total, some B some C. 5396 for i := 0; i < 5; i++ { 5397 fs.StoreMsg("B", nil, msg) 5398 fs.StoreMsg("C", nil, msg) 5399 } 5400 5401 n, err = fs.PurgeEx("B", 0, 0) 5402 require_NoError(t, err) 5403 require_Equal(t, n, 5) 5404 5405 state = fs.State() 5406 fs.Stop() 5407 5408 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5409 require_NoError(t, err) 5410 defer fs.Stop() 5411 5412 if newState := fs.State(); !reflect.DeepEqual(state, newState) { 5413 t.Fatalf("Restore state after purge does not match:\n%+v\n%+v", 5414 state, newState) 5415 } 5416 5417 // Purge with keep. 5418 n, err = fs.PurgeEx(_EMPTY_, 0, 2) 5419 require_NoError(t, err) 5420 require_Equal(t, n, 3) 5421 5422 state = fs.State() 5423 5424 // Do some quick checks here, keep had a bug. 5425 require_Equal(t, state.Msgs, 2) 5426 require_Equal(t, state.FirstSeq, 18) 5427 require_Equal(t, state.LastSeq, 20) 5428 5429 fs.Stop() 5430 5431 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5432 require_NoError(t, err) 5433 defer fs.Stop() 5434 5435 if newState := fs.State(); !reflect.DeepEqual(state, newState) { 5436 t.Fatalf("Restore state after purge does not match:\n%+v\n%+v", 5437 state, newState) 5438 } 5439 5440 // Make sure we can survive a purge with no full stream state and have the correct first sequence. 5441 // This used to be provided by the idx file and is now tombstones and the full stream state snapshot. 5442 n, err = fs.Purge() 5443 require_NoError(t, err) 5444 require_Equal(t, n, 2) 5445 state = fs.State() 5446 fs.Stop() 5447 5448 sfile := filepath.Join(fcfg.StoreDir, msgDir, streamStreamStateFile) 5449 os.Remove(sfile) 5450 5451 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5452 require_NoError(t, err) 5453 defer fs.Stop() 5454 5455 if newState := fs.State(); !reflect.DeepEqual(state, newState) { 5456 t.Fatalf("Restore state after purge does not match:\n%+v\n%+v", 5457 state, newState) 5458 } 5459 }) 5460 } 5461 5462 func TestFileStoreFullStateTestUserRemoveWAL(t *testing.T) { 5463 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 5464 fcfg.BlockSize = 132 // Leave room for tombstones. 5465 cfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage} 5466 created := time.Now() 5467 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5468 require_NoError(t, err) 5469 defer fs.Stop() 5470 5471 // This yields an internal record length of 50 bytes. So 2 msgs per blk. 5472 msgLen := 19 5473 msgA := bytes.Repeat([]byte("A"), msgLen) 5474 msgZ := bytes.Repeat([]byte("Z"), msgLen) 5475 5476 // Store 2 msgs and delete first. 5477 fs.StoreMsg("A", nil, msgA) 5478 fs.StoreMsg("Z", nil, msgZ) 5479 fs.RemoveMsg(1) 5480 5481 // Check we can load things properly since the block will have a tombstone now for seq 1. 5482 sm, err := fs.LoadMsg(2, nil) 5483 require_NoError(t, err) 5484 require_True(t, bytes.Equal(sm.msg, msgZ)) 5485 5486 require_Equal(t, fs.numMsgBlocks(), 1) 5487 state := fs.State() 5488 fs.Stop() 5489 5490 // Grab the state from this stop. 5491 sfile := filepath.Join(fcfg.StoreDir, msgDir, streamStreamStateFile) 5492 buf, err := os.ReadFile(sfile) 5493 require_NoError(t, err) 5494 5495 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5496 require_NoError(t, err) 5497 defer fs.Stop() 5498 5499 // Check we can load things properly since the block will have a tombstone now for seq 1. 5500 _, err = fs.LoadMsg(2, nil) 5501 require_NoError(t, err) 5502 _, err = fs.LoadMsg(1, nil) 5503 require_Error(t, err, ErrStoreMsgNotFound) 5504 5505 if newState := fs.State(); !reflect.DeepEqual(state, newState) { 5506 t.Fatalf("Restore state does not match:\n%+v\n%+v", 5507 state, newState) 5508 } 5509 require_True(t, !state.FirstTime.IsZero()) 5510 5511 // Store 2 more msgs and delete 2 & 4. 5512 fs.StoreMsg("A", nil, msgA) 5513 fs.StoreMsg("Z", nil, msgZ) 5514 fs.RemoveMsg(2) 5515 fs.RemoveMsg(4) 5516 5517 state = fs.State() 5518 require_Equal(t, len(state.Deleted), state.NumDeleted) 5519 fs.Stop() 5520 5521 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5522 require_NoError(t, err) 5523 defer fs.Stop() 5524 5525 if newState := fs.State(); !reflect.DeepEqual(state, newState) { 5526 t.Fatalf("Restore state does not match:\n%+v\n%+v", 5527 state, newState) 5528 } 5529 require_True(t, !state.FirstTime.IsZero()) 5530 5531 // Now close again and put back old stream state. 5532 // This will test that we can remember user deletes by placing tombstones in the lmb/wal. 5533 fs.Stop() 5534 err = os.WriteFile(sfile, buf, defaultFilePerms) 5535 require_NoError(t, err) 5536 5537 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5538 require_NoError(t, err) 5539 defer fs.Stop() 5540 5541 if newState := fs.State(); !reflect.DeepEqual(state, newState) { 5542 t.Fatalf("Restore state does not match:\n%+v\n%+v", 5543 state, newState) 5544 } 5545 require_True(t, !state.FirstTime.IsZero()) 5546 }) 5547 } 5548 5549 func TestFileStoreFullStateTestSysRemovals(t *testing.T) { 5550 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 5551 fcfg.BlockSize = 100 5552 cfg := StreamConfig{ 5553 Name: "zzz", 5554 Subjects: []string{"*"}, 5555 MaxMsgs: 10, 5556 MaxMsgsPer: 1, 5557 Storage: FileStorage, 5558 } 5559 created := time.Now() 5560 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5561 require_NoError(t, err) 5562 defer fs.Stop() 5563 5564 // This yields an internal record length of 50 bytes. So 2 msgs per blk. 5565 msgLen := 19 5566 msg := bytes.Repeat([]byte("A"), msgLen) 5567 5568 for _, subj := range []string{"A", "B", "A", "B"} { 5569 fs.StoreMsg(subj, nil, msg) 5570 } 5571 5572 state := fs.State() 5573 require_Equal(t, state.Msgs, 2) 5574 require_Equal(t, state.FirstSeq, 3) 5575 require_Equal(t, state.LastSeq, 4) 5576 fs.Stop() 5577 5578 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5579 require_NoError(t, err) 5580 defer fs.Stop() 5581 5582 if newState := fs.State(); !reflect.DeepEqual(state, newState) { 5583 t.Fatalf("Restore state after purge does not match:\n%+v\n%+v", 5584 state, newState) 5585 } 5586 5587 for _, subj := range []string{"C", "D", "E", "F", "G", "H", "I", "J"} { 5588 fs.StoreMsg(subj, nil, msg) 5589 } 5590 5591 state = fs.State() 5592 require_Equal(t, state.Msgs, 10) 5593 require_Equal(t, state.FirstSeq, 3) 5594 require_Equal(t, state.LastSeq, 12) 5595 fs.Stop() 5596 5597 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5598 require_NoError(t, err) 5599 defer fs.Stop() 5600 5601 if newState := fs.State(); !reflect.DeepEqual(state, newState) { 5602 t.Fatalf("Restore state after purge does not match:\n%+v\n%+v", 5603 state, newState) 5604 } 5605 5606 // Goes over limit 5607 fs.StoreMsg("ZZZ", nil, msg) 5608 5609 state = fs.State() 5610 require_Equal(t, state.Msgs, 10) 5611 require_Equal(t, state.FirstSeq, 4) 5612 require_Equal(t, state.LastSeq, 13) 5613 fs.Stop() 5614 5615 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5616 require_NoError(t, err) 5617 defer fs.Stop() 5618 5619 if newState := fs.State(); !reflect.DeepEqual(state, newState) { 5620 t.Fatalf("Restore state after purge does not match:\n%+v\n%+v", 5621 state, newState) 5622 } 5623 }) 5624 } 5625 5626 func TestFileStoreSelectBlockWithFirstSeqRemovals(t *testing.T) { 5627 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 5628 fcfg.BlockSize = 100 5629 cfg := StreamConfig{ 5630 Name: "zzz", 5631 Subjects: []string{"*"}, 5632 MaxMsgsPer: 1, 5633 Storage: FileStorage, 5634 } 5635 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 5636 require_NoError(t, err) 5637 defer fs.Stop() 5638 5639 // This yields an internal record length of 50 bytes. So 2 msgs per blk. 5640 msgLen := 19 5641 msg := bytes.Repeat([]byte("A"), msgLen) 5642 5643 subjects := "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+@$^" 5644 // We need over 32 blocks to kick in binary search. So 32*2+1 (65) msgs to get 33 blocks. 5645 for i := 0; i < 32*2+1; i++ { 5646 subj := string(subjects[i]) 5647 fs.StoreMsg(subj, nil, msg) 5648 } 5649 require_Equal(t, fs.numMsgBlocks(), 33) 5650 5651 // Now we want to delete the first msg of each block to move the first sequence. 5652 // Want to do this via system removes, not user initiated moves. 5653 for i := 0; i < len(subjects); i += 2 { 5654 subj := string(subjects[i]) 5655 fs.StoreMsg(subj, nil, msg) 5656 } 5657 5658 var ss StreamState 5659 fs.FastState(&ss) 5660 5661 // We want to make sure that select always returns an index and a non-nil mb. 5662 for seq := ss.FirstSeq; seq <= ss.LastSeq; seq++ { 5663 fs.mu.RLock() 5664 index, mb := fs.selectMsgBlockWithIndex(seq) 5665 fs.mu.RUnlock() 5666 require_True(t, index >= 0) 5667 require_True(t, mb != nil) 5668 require_Equal(t, (seq-1)/2, uint64(index)) 5669 } 5670 }) 5671 } 5672 5673 func TestFileStoreMsgBlockHolesAndIndexing(t *testing.T) { 5674 fs, err := newFileStore( 5675 FileStoreConfig{StoreDir: t.TempDir()}, 5676 StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage, MaxMsgsPer: 1}, 5677 ) 5678 require_NoError(t, err) 5679 defer fs.Stop() 5680 5681 // Grab the message block by hand and manipulate at that level. 5682 mb := fs.getFirstBlock() 5683 writeMsg := func(subj string, seq uint64) { 5684 rl := fileStoreMsgSize(subj, nil, []byte(subj)) 5685 require_NoError(t, mb.writeMsgRecord(rl, seq, subj, nil, []byte(subj), time.Now().UnixNano(), true)) 5686 fs.rebuildState(nil) 5687 } 5688 readMsg := func(seq uint64, expectedSubj string) { 5689 // Clear cache so we load back in from disk and need to properly process any holes. 5690 ld, tombs, err := mb.rebuildState() 5691 require_NoError(t, err) 5692 require_Equal(t, ld, nil) 5693 require_Equal(t, len(tombs), 0) 5694 fs.rebuildState(nil) 5695 sm, _, err := mb.fetchMsg(seq, nil) 5696 require_NoError(t, err) 5697 require_Equal(t, sm.subj, expectedSubj) 5698 require_True(t, bytes.Equal(sm.buf[:len(expectedSubj)], []byte(expectedSubj))) 5699 } 5700 5701 writeMsg("A", 2) 5702 require_Equal(t, mb.first.seq, 2) 5703 require_Equal(t, mb.last.seq, 2) 5704 5705 writeMsg("B", 4) 5706 require_Equal(t, mb.first.seq, 2) 5707 require_Equal(t, mb.last.seq, 4) 5708 5709 writeMsg("C", 12) 5710 5711 readMsg(4, "B") 5712 require_True(t, mb.dmap.Exists(3)) 5713 5714 readMsg(12, "C") 5715 readMsg(2, "A") 5716 5717 // Check that we get deleted for the right ones etc. 5718 checkDeleted := func(seq uint64) { 5719 _, _, err = mb.fetchMsg(seq, nil) 5720 require_Error(t, err, ErrStoreMsgNotFound, errDeletedMsg) 5721 mb.mu.RLock() 5722 shouldExist, exists := seq >= mb.first.seq, mb.dmap.Exists(seq) 5723 mb.mu.RUnlock() 5724 if shouldExist { 5725 require_True(t, exists) 5726 } 5727 } 5728 checkDeleted(1) 5729 checkDeleted(3) 5730 for seq := 5; seq < 12; seq++ { 5731 checkDeleted(uint64(seq)) 5732 } 5733 } 5734 5735 func TestFileStoreMsgBlockCompactionAndHoles(t *testing.T) { 5736 fs, err := newFileStore( 5737 FileStoreConfig{StoreDir: t.TempDir()}, 5738 StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage, MaxMsgsPer: 1}, 5739 ) 5740 require_NoError(t, err) 5741 defer fs.Stop() 5742 5743 msg := bytes.Repeat([]byte("Z"), 1024) 5744 for _, subj := range []string{"A", "B", "C", "D", "E", "F", "G", "H", "I", "J"} { 5745 fs.StoreMsg(subj, nil, msg) 5746 } 5747 // Leave first one but delete the rest. 5748 for seq := uint64(2); seq < 10; seq++ { 5749 fs.RemoveMsg(seq) 5750 } 5751 require_Equal(t, fs.numMsgBlocks(), 1) 5752 mb := fs.getFirstBlock() 5753 require_NotNil(t, mb) 5754 5755 _, ub, _ := fs.Utilization() 5756 5757 // Do compaction, should remove all excess now. 5758 mb.mu.Lock() 5759 mb.compact() 5760 mb.mu.Unlock() 5761 5762 ta, ua, _ := fs.Utilization() 5763 require_Equal(t, ub, ua) 5764 require_Equal(t, ta, ua) 5765 } 5766 5767 func TestFileStoreRemoveLastNoDoubleTombstones(t *testing.T) { 5768 fs, err := newFileStore( 5769 FileStoreConfig{StoreDir: t.TempDir()}, 5770 StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage, MaxMsgsPer: 1}, 5771 ) 5772 require_NoError(t, err) 5773 defer fs.Stop() 5774 5775 fs.StoreMsg("A", nil, []byte("hello")) 5776 fs.mu.Lock() 5777 fs.removeMsgViaLimits(1) 5778 fs.mu.Unlock() 5779 5780 require_Equal(t, fs.numMsgBlocks(), 1) 5781 mb := fs.getFirstBlock() 5782 require_NotNil(t, mb) 5783 mb.loadMsgs() 5784 rbytes, _, err := fs.Utilization() 5785 require_NoError(t, err) 5786 require_Equal(t, rbytes, emptyRecordLen) 5787 } 5788 5789 func TestFileStoreFullStateMultiBlockPastWAL(t *testing.T) { 5790 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 5791 fcfg.BlockSize = 100 5792 cfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage} 5793 created := time.Now() 5794 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5795 require_NoError(t, err) 5796 defer fs.Stop() 5797 5798 // This yields an internal record length of 50 bytes. So 2 msgs per blk. 5799 msgLen := 19 5800 msgA := bytes.Repeat([]byte("A"), msgLen) 5801 msgZ := bytes.Repeat([]byte("Z"), msgLen) 5802 5803 // Store 2 msgs 5804 fs.StoreMsg("A", nil, msgA) 5805 fs.StoreMsg("B", nil, msgZ) 5806 require_Equal(t, fs.numMsgBlocks(), 1) 5807 fs.Stop() 5808 5809 // Grab the state from this stop. 5810 sfile := filepath.Join(fcfg.StoreDir, msgDir, streamStreamStateFile) 5811 buf, err := os.ReadFile(sfile) 5812 require_NoError(t, err) 5813 5814 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5815 require_NoError(t, err) 5816 defer fs.Stop() 5817 5818 // Store 6 more msgs. 5819 fs.StoreMsg("C", nil, msgA) 5820 fs.StoreMsg("D", nil, msgZ) 5821 fs.StoreMsg("E", nil, msgA) 5822 fs.StoreMsg("F", nil, msgZ) 5823 fs.StoreMsg("G", nil, msgA) 5824 fs.StoreMsg("H", nil, msgZ) 5825 require_Equal(t, fs.numMsgBlocks(), 4) 5826 state := fs.State() 5827 fs.Stop() 5828 5829 // Put back old stream state. 5830 // This will test that we properly walk multiple blocks past where we snapshotted state. 5831 err = os.WriteFile(sfile, buf, defaultFilePerms) 5832 require_NoError(t, err) 5833 5834 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5835 require_NoError(t, err) 5836 defer fs.Stop() 5837 5838 if newState := fs.State(); !reflect.DeepEqual(state, newState) { 5839 t.Fatalf("Restore state does not match:\n%+v\n%+v", 5840 state, newState) 5841 } 5842 require_True(t, !state.FirstTime.IsZero()) 5843 }) 5844 } 5845 5846 // This tests we can successfully recover without having to rebuild the whole stream from a mid block index.db marker 5847 // when the updated block has a removed entry. 5848 // Make sure this does not cause a recover of the full state. 5849 func TestFileStoreFullStateMidBlockPastWAL(t *testing.T) { 5850 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 5851 cfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage, MaxMsgsPer: 1} 5852 created := time.Now() 5853 fs, err := newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5854 require_NoError(t, err) 5855 defer fs.Stop() 5856 5857 // This yields an internal record length of 50 bytes. So 2 msgs per blk. 5858 msg := bytes.Repeat([]byte("Z"), 19) 5859 5860 // Store 5 msgs 5861 fs.StoreMsg("A", nil, msg) 5862 fs.StoreMsg("B", nil, msg) 5863 fs.StoreMsg("C", nil, msg) 5864 fs.StoreMsg("D", nil, msg) 5865 fs.StoreMsg("E", nil, msg) 5866 require_Equal(t, fs.numMsgBlocks(), 1) 5867 fs.Stop() 5868 5869 // Grab the state from this stop. 5870 sfile := filepath.Join(fcfg.StoreDir, msgDir, streamStreamStateFile) 5871 buf, err := os.ReadFile(sfile) 5872 require_NoError(t, err) 5873 5874 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5875 require_NoError(t, err) 5876 defer fs.Stop() 5877 5878 // Store 5 more messages, then remove seq 2, "B". 5879 fs.StoreMsg("F", nil, msg) 5880 fs.StoreMsg("G", nil, msg) 5881 fs.StoreMsg("H", nil, msg) 5882 fs.StoreMsg("I", nil, msg) 5883 fs.StoreMsg("J", nil, msg) 5884 fs.RemoveMsg(2) 5885 5886 require_Equal(t, fs.numMsgBlocks(), 1) 5887 state := fs.State() 5888 fs.Stop() 5889 5890 // Put back old stream state. 5891 // This will test that we properly walk multiple blocks past where we snapshotted state. 5892 err = os.WriteFile(sfile, buf, defaultFilePerms) 5893 require_NoError(t, err) 5894 5895 fs, err = newFileStoreWithCreated(fcfg, cfg, created, prf(&fcfg), nil) 5896 require_NoError(t, err) 5897 defer fs.Stop() 5898 5899 if newState := fs.State(); !reflect.DeepEqual(state, newState) { 5900 t.Fatalf("Restore state does not match:\n%+v\n%+v", 5901 state, newState) 5902 } 5903 // Check that index.db is still there. If we recover by raw data on a corrupt state we delete this. 5904 _, err = os.Stat(sfile) 5905 require_NoError(t, err) 5906 }) 5907 } 5908 5909 func TestFileStoreCompactingBlocksOnSync(t *testing.T) { 5910 testFileStoreAllPermutations(t, func(t *testing.T, fcfg FileStoreConfig) { 5911 fcfg.BlockSize = 1000 // 20 msgs per block. 5912 fcfg.SyncInterval = 100 * time.Millisecond 5913 cfg := StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage, MaxMsgsPer: 1} 5914 fs, err := newFileStoreWithCreated(fcfg, cfg, time.Now(), prf(&fcfg), nil) 5915 require_NoError(t, err) 5916 defer fs.Stop() 5917 5918 // This yields an internal record length of 50 bytes. So 20 msgs per blk. 5919 msg := bytes.Repeat([]byte("Z"), 19) 5920 subjects := "ABCDEFGHIJKLMNOPQRST" 5921 for _, subj := range subjects { 5922 fs.StoreMsg(string(subj), nil, msg) 5923 } 5924 require_Equal(t, fs.numMsgBlocks(), 1) 5925 total, reported, err := fs.Utilization() 5926 require_NoError(t, err) 5927 5928 require_Equal(t, total, reported) 5929 5930 // Now start removing, since we are small this should not kick in any inline logic. 5931 // Remove all interior messages, leave 1 and 20. So write B-S 5932 for i := 1; i < 19; i++ { 5933 fs.StoreMsg(string(subjects[i]), nil, msg) 5934 } 5935 require_Equal(t, fs.numMsgBlocks(), 2) 5936 5937 blkUtil := func() (uint64, uint64) { 5938 fs.mu.RLock() 5939 fmb := fs.blks[0] 5940 fs.mu.RUnlock() 5941 fmb.mu.RLock() 5942 defer fmb.mu.RUnlock() 5943 return fmb.rbytes, fmb.bytes 5944 } 5945 5946 total, reported = blkUtil() 5947 require_Equal(t, reported, 100) 5948 // Raw bytes will be 1000, but due to compression could be less. 5949 if fcfg.Compression != NoCompression { 5950 require_True(t, total > reported) 5951 } else { 5952 require_Equal(t, total, 1000) 5953 } 5954 5955 // Make sure the sync interval when kicked in compacts down to rbytes == 100. 5956 checkFor(t, time.Second, 100*time.Millisecond, func() error { 5957 if total, reported := blkUtil(); total <= reported { 5958 return nil 5959 } 5960 return fmt.Errorf("Not compacted yet, raw %v vs reported %v", 5961 friendlyBytes(total), friendlyBytes(reported)) 5962 }) 5963 }) 5964 } 5965 5966 // Make sure a call to Compact() updates PSIM correctly. 5967 func TestFileStoreCompactAndPSIMWhenDeletingBlocks(t *testing.T) { 5968 fs, err := newFileStore( 5969 FileStoreConfig{StoreDir: t.TempDir(), BlockSize: 512}, 5970 StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) 5971 require_NoError(t, err) 5972 defer fs.Stop() 5973 5974 subj, msg := "A", bytes.Repeat([]byte("ABC"), 33) // ~100bytes 5975 5976 // Add in 10 As 5977 for i := 0; i < 10; i++ { 5978 fs.StoreMsg(subj, nil, msg) 5979 } 5980 require_Equal(t, fs.numMsgBlocks(), 4) 5981 5982 // Should leave 1. 5983 n, err := fs.Compact(10) 5984 require_NoError(t, err) 5985 require_Equal(t, n, 9) 5986 require_Equal(t, fs.numMsgBlocks(), 1) 5987 5988 fs.mu.RLock() 5989 info, _ := fs.psim.Find(stringToBytes(subj)) 5990 psi := *info 5991 fs.mu.RUnlock() 5992 5993 require_Equal(t, psi.total, 1) 5994 require_Equal(t, psi.fblk, psi.lblk) 5995 } 5996 5997 func TestFileStoreTrackSubjLenForPSIM(t *testing.T) { 5998 sd := t.TempDir() 5999 fs, err := newFileStore( 6000 FileStoreConfig{StoreDir: sd}, 6001 StreamConfig{Name: "zzz", Subjects: []string{">"}, Storage: FileStorage}) 6002 require_NoError(t, err) 6003 defer fs.Stop() 6004 6005 // Place 1000 msgs with varying subjects. 6006 // Make sure we track the subject length properly. 6007 smap := make(map[string]int, 1000) 6008 buf := make([]byte, 10) 6009 for i := 0; i < 1000; i++ { 6010 var b strings.Builder 6011 // 1-6 tokens. 6012 numTokens := rand.Intn(6) + 1 6013 for i := 0; i < numTokens; i++ { 6014 tlen := rand.Intn(4) + 2 6015 tok := buf[:tlen] 6016 crand.Read(tok) 6017 b.WriteString(hex.EncodeToString(tok)) 6018 if i != numTokens-1 { 6019 b.WriteString(".") 6020 } 6021 } 6022 subj := b.String() 6023 // Avoid dupes since will cause check to fail after we delete messages. 6024 if _, ok := smap[subj]; ok { 6025 continue 6026 } 6027 smap[subj] = len(subj) 6028 fs.StoreMsg(subj, nil, nil) 6029 } 6030 6031 check := func() { 6032 t.Helper() 6033 var total int 6034 for _, slen := range smap { 6035 total += slen 6036 } 6037 fs.mu.RLock() 6038 tsl := fs.tsl 6039 fs.mu.RUnlock() 6040 require_Equal(t, tsl, total) 6041 } 6042 6043 check() 6044 6045 // Delete ~half 6046 var smv StoreMsg 6047 for i := 0; i < 500; i++ { 6048 seq := uint64(rand.Intn(1000) + 1) 6049 sm, err := fs.LoadMsg(seq, &smv) 6050 if err != nil { 6051 continue 6052 } 6053 fs.RemoveMsg(seq) 6054 delete(smap, sm.subj) 6055 } 6056 6057 check() 6058 6059 // Make sure we can recover same after restart. 6060 fs.Stop() 6061 fs, err = newFileStore( 6062 FileStoreConfig{StoreDir: sd}, 6063 StreamConfig{Name: "zzz", Subjects: []string{">"}, Storage: FileStorage}) 6064 require_NoError(t, err) 6065 defer fs.Stop() 6066 6067 check() 6068 6069 // Drain the rest through purge. 6070 fs.Purge() 6071 smap = nil 6072 check() 6073 } 6074 6075 // This was used to make sure our estimate was correct, but not needed normally. 6076 func TestFileStoreLargeFullStatePSIM(t *testing.T) { 6077 sd := t.TempDir() 6078 fs, err := newFileStore( 6079 FileStoreConfig{StoreDir: sd}, 6080 StreamConfig{Name: "zzz", Subjects: []string{">"}, Storage: FileStorage}) 6081 require_NoError(t, err) 6082 defer fs.Stop() 6083 6084 buf := make([]byte, 20) 6085 for i := 0; i < 100_000; i++ { 6086 var b strings.Builder 6087 // 1-6 tokens. 6088 numTokens := rand.Intn(6) + 1 6089 for i := 0; i < numTokens; i++ { 6090 tlen := rand.Intn(8) + 2 6091 tok := buf[:tlen] 6092 crand.Read(tok) 6093 b.WriteString(hex.EncodeToString(tok)) 6094 if i != numTokens-1 { 6095 b.WriteString(".") 6096 } 6097 } 6098 subj := b.String() 6099 fs.StoreMsg(subj, nil, nil) 6100 } 6101 fs.Stop() 6102 } 6103 6104 func TestFileStoreLargeFullStateMetaCleanup(t *testing.T) { 6105 sd := t.TempDir() 6106 fs, err := newFileStore( 6107 FileStoreConfig{StoreDir: sd}, 6108 StreamConfig{Name: "zzz", Subjects: []string{">"}, Storage: FileStorage}) 6109 require_NoError(t, err) 6110 defer fs.Stop() 6111 6112 subj, msg := "foo.bar.baz", bytes.Repeat([]byte("ABC"), 33) // ~100bytes 6113 for i := 0; i < 1000; i++ { 6114 fs.StoreMsg(subj, nil, nil) 6115 } 6116 fs.Stop() 6117 6118 mdir := filepath.Join(sd, msgDir) 6119 idxFile := filepath.Join(mdir, "1.idx") 6120 fssFile := filepath.Join(mdir, "1.fss") 6121 require_NoError(t, os.WriteFile(idxFile, msg, defaultFilePerms)) 6122 require_NoError(t, os.WriteFile(fssFile, msg, defaultFilePerms)) 6123 6124 fs, err = newFileStore( 6125 FileStoreConfig{StoreDir: sd}, 6126 StreamConfig{Name: "zzz", Subjects: []string{">"}, Storage: FileStorage}) 6127 require_NoError(t, err) 6128 defer fs.Stop() 6129 6130 checkFor(t, time.Second, 50*time.Millisecond, func() error { 6131 if _, err := os.Stat(idxFile); err == nil { 6132 return errors.New("idx file still exists") 6133 } 6134 if _, err := os.Stat(fssFile); err == nil { 6135 return errors.New("fss file still exists") 6136 } 6137 return nil 6138 }) 6139 } 6140 6141 func TestFileStoreIndexDBExistsAfterShutdown(t *testing.T) { 6142 sd := t.TempDir() 6143 fs, err := newFileStore( 6144 FileStoreConfig{StoreDir: sd}, 6145 StreamConfig{Name: "zzz", Subjects: []string{">"}, Storage: FileStorage}) 6146 require_NoError(t, err) 6147 defer fs.Stop() 6148 6149 subj := "foo.bar.baz" 6150 for i := 0; i < 1000; i++ { 6151 fs.StoreMsg(subj, nil, nil) 6152 } 6153 6154 idxFile := filepath.Join(sd, msgDir, streamStreamStateFile) 6155 6156 fs.mu.Lock() 6157 fs.dirty = 1 6158 if err := os.Remove(idxFile); err != nil && !errors.Is(err, os.ErrNotExist) { 6159 t.Fatal(err) 6160 } 6161 fs.mu.Unlock() 6162 6163 fs.Stop() 6164 6165 checkFor(t, time.Second, 50*time.Millisecond, func() error { 6166 if _, err := os.Stat(idxFile); err != nil { 6167 return fmt.Errorf("%q doesn't exist", idxFile) 6168 } 6169 return nil 6170 }) 6171 } 6172 6173 // https://github.com/nats-io/nats-server/issues/4842 6174 func TestFileStoreSubjectCorruption(t *testing.T) { 6175 sd, blkSize := t.TempDir(), uint64(2*1024*1024) 6176 fs, err := newFileStore( 6177 FileStoreConfig{StoreDir: sd, BlockSize: blkSize}, 6178 StreamConfig{Name: "zzz", Subjects: []string{"foo.*"}, Storage: FileStorage}) 6179 require_NoError(t, err) 6180 defer fs.Stop() 6181 6182 numSubjects := 100 6183 msgs := [][]byte{bytes.Repeat([]byte("ABC"), 333), bytes.Repeat([]byte("ABC"), 888), bytes.Repeat([]byte("ABC"), 555)} 6184 for i := 0; i < 10_000; i++ { 6185 subj := fmt.Sprintf("foo.%d", rand.Intn(numSubjects)+1) 6186 msg := msgs[rand.Intn(len(msgs))] 6187 fs.StoreMsg(subj, nil, msg) 6188 } 6189 fs.Stop() 6190 6191 require_NoError(t, os.Remove(filepath.Join(sd, msgDir, streamStreamStateFile))) 6192 6193 fs, err = newFileStore( 6194 FileStoreConfig{StoreDir: sd, BlockSize: blkSize}, 6195 StreamConfig{Name: "zzz", Subjects: []string{"foo.*"}, Storage: FileStorage}) 6196 require_NoError(t, err) 6197 defer fs.Stop() 6198 6199 for subj := range fs.SubjectsTotals(">") { 6200 var n int 6201 _, err := fmt.Sscanf(subj, "foo.%d", &n) 6202 require_NoError(t, err) 6203 } 6204 } 6205 6206 // Since 2.10 we no longer have fss, and the approach for calculating NumPending would branch 6207 // based on the old fss metadata being present. This meant that calculating NumPending in >= 2.10.x 6208 // would load all blocks to complete. This test makes sure we do not do that anymore. 6209 func TestFileStoreNumPendingLastBySubject(t *testing.T) { 6210 sd, blkSize := t.TempDir(), uint64(1024) 6211 fs, err := newFileStore( 6212 FileStoreConfig{StoreDir: sd, BlockSize: blkSize}, 6213 StreamConfig{Name: "zzz", Subjects: []string{"foo.*.*"}, Storage: FileStorage}) 6214 require_NoError(t, err) 6215 defer fs.Stop() 6216 6217 numSubjects := 20 6218 msg := bytes.Repeat([]byte("ABC"), 25) 6219 for i := 1; i <= 1000; i++ { 6220 subj := fmt.Sprintf("foo.%d.%d", rand.Intn(numSubjects)+1, i) 6221 fs.StoreMsg(subj, nil, msg) 6222 } 6223 // Each block has ~8 msgs. 6224 require_True(t, fs.numMsgBlocks() > 100) 6225 6226 calcCacheLoads := func() (cloads uint64) { 6227 fs.mu.RLock() 6228 defer fs.mu.RUnlock() 6229 for _, mb := range fs.blks { 6230 mb.mu.RLock() 6231 cloads += mb.cloads 6232 mb.mu.RUnlock() 6233 } 6234 return cloads 6235 } 6236 6237 total, _ := fs.NumPending(0, "foo.*.*", true) 6238 require_Equal(t, total, 1000) 6239 // Make sure no blocks were loaded to calculate this as a new consumer. 6240 require_Equal(t, calcCacheLoads(), 0) 6241 6242 checkResult := func(sseq, np uint64, filter string) { 6243 t.Helper() 6244 var checkTotal uint64 6245 var smv StoreMsg 6246 for seq := sseq; seq <= 1000; seq++ { 6247 sm, err := fs.LoadMsg(seq, &smv) 6248 require_NoError(t, err) 6249 if subjectIsSubsetMatch(sm.subj, filter) { 6250 checkTotal++ 6251 } 6252 } 6253 require_Equal(t, np, checkTotal) 6254 } 6255 6256 // Make sure partials work properly. 6257 for _, filter := range []string{"foo.10.*", "*.22.*", "*.*.222", "foo.5.999", "*.2.*"} { 6258 sseq := uint64(rand.Intn(250) + 200) // Between 200-450 6259 total, _ = fs.NumPending(sseq, filter, true) 6260 checkResult(sseq, total, filter) 6261 } 6262 } 6263 6264 // We had a bug that could cause internal memory corruption of the psim keys in memory 6265 // which could have been written to disk via index.db. 6266 func TestFileStoreCorruptPSIMOnDisk(t *testing.T) { 6267 sd := t.TempDir() 6268 fs, err := newFileStore( 6269 FileStoreConfig{StoreDir: sd}, 6270 StreamConfig{Name: "zzz", Subjects: []string{"foo.*"}, Storage: FileStorage}) 6271 require_NoError(t, err) 6272 defer fs.Stop() 6273 6274 fs.StoreMsg("foo.bar", nil, []byte("ABC")) 6275 fs.StoreMsg("foo.baz", nil, []byte("XYZ")) 6276 6277 // Force bad subject. 6278 fs.mu.Lock() 6279 psi, _ := fs.psim.Find(stringToBytes("foo.bar")) 6280 bad := make([]byte, 7) 6281 crand.Read(bad) 6282 fs.psim.Insert(bad, *psi) 6283 fs.psim.Delete(stringToBytes("foo.bar")) 6284 fs.dirty++ 6285 fs.mu.Unlock() 6286 6287 // Restart 6288 fs.Stop() 6289 fs, err = newFileStore( 6290 FileStoreConfig{StoreDir: sd}, 6291 StreamConfig{Name: "zzz", Subjects: []string{"foo.*"}, Storage: FileStorage}) 6292 require_NoError(t, err) 6293 defer fs.Stop() 6294 6295 sm, err := fs.LoadLastMsg("foo.bar", nil) 6296 require_NoError(t, err) 6297 require_True(t, bytes.Equal(sm.msg, []byte("ABC"))) 6298 6299 sm, err = fs.LoadLastMsg("foo.baz", nil) 6300 require_NoError(t, err) 6301 require_True(t, bytes.Equal(sm.msg, []byte("XYZ"))) 6302 } 6303 6304 func TestFileStorePurgeExBufPool(t *testing.T) { 6305 sd := t.TempDir() 6306 fs, err := newFileStore( 6307 FileStoreConfig{StoreDir: sd, BlockSize: 1024}, 6308 StreamConfig{Name: "zzz", Subjects: []string{"foo.*"}, Storage: FileStorage}) 6309 require_NoError(t, err) 6310 defer fs.Stop() 6311 6312 msg := bytes.Repeat([]byte("ABC"), 33) // ~100bytes 6313 for i := 0; i < 1000; i++ { 6314 fs.StoreMsg("foo.foo", nil, msg) 6315 fs.StoreMsg("foo.bar", nil, msg) 6316 } 6317 6318 p, err := fs.PurgeEx("foo.bar", 1, 0) 6319 require_NoError(t, err) 6320 require_Equal(t, p, 1000) 6321 6322 // Now make sure we do not have all of the msg blocks cache's loaded. 6323 var loaded int 6324 fs.mu.RLock() 6325 for _, mb := range fs.blks { 6326 mb.mu.RLock() 6327 if mb.cacheAlreadyLoaded() { 6328 loaded++ 6329 } 6330 mb.mu.RUnlock() 6331 } 6332 fs.mu.RUnlock() 6333 require_Equal(t, loaded, 1) 6334 } 6335 6336 func TestFileStoreFSSMeta(t *testing.T) { 6337 sd := t.TempDir() 6338 fs, err := newFileStore( 6339 FileStoreConfig{StoreDir: sd, BlockSize: 100, CacheExpire: 200 * time.Millisecond, SyncInterval: time.Second}, 6340 StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) 6341 require_NoError(t, err) 6342 defer fs.Stop() 6343 6344 // This yields an internal record length of 50 bytes. So 2 msgs per blk with subject len of 1, e.g. "A" or "Z". 6345 msg := bytes.Repeat([]byte("Z"), 19) 6346 6347 // Should leave us with |A-Z| |Z-Z| |Z-Z| |Z-A| 6348 fs.StoreMsg("A", nil, msg) 6349 for i := 0; i < 6; i++ { 6350 fs.StoreMsg("Z", nil, msg) 6351 } 6352 fs.StoreMsg("A", nil, msg) 6353 6354 // Let cache's expire before PurgeEx which will load them back in. 6355 time.Sleep(250 * time.Millisecond) 6356 6357 p, err := fs.PurgeEx("A", 1, 0) 6358 require_NoError(t, err) 6359 require_Equal(t, p, 2) 6360 6361 // Make sure cache is not loaded but fss state still is. 6362 var stillHasCache, noFSS bool 6363 fs.mu.RLock() 6364 for _, mb := range fs.blks { 6365 mb.mu.RLock() 6366 stillHasCache = stillHasCache || mb.cacheAlreadyLoaded() 6367 noFSS = noFSS || mb.fssNotLoaded() 6368 mb.mu.RUnlock() 6369 } 6370 fs.mu.RUnlock() 6371 6372 require_False(t, stillHasCache) 6373 require_False(t, noFSS) 6374 6375 // Let fss expire via syncInterval. 6376 time.Sleep(time.Second) 6377 6378 fs.mu.RLock() 6379 for _, mb := range fs.blks { 6380 mb.mu.RLock() 6381 noFSS = noFSS || mb.fssNotLoaded() 6382 mb.mu.RUnlock() 6383 } 6384 fs.mu.RUnlock() 6385 6386 require_True(t, noFSS) 6387 } 6388 6389 func TestFileStoreExpireCacheOnLinearWalk(t *testing.T) { 6390 sd := t.TempDir() 6391 expire := 250 * time.Millisecond 6392 fs, err := newFileStore( 6393 FileStoreConfig{StoreDir: sd, CacheExpire: expire}, 6394 StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) 6395 require_NoError(t, err) 6396 defer fs.Stop() 6397 6398 // This yields an internal record length of 50 bytes. 6399 subj, msg := "Z", bytes.Repeat([]byte("Z"), 19) 6400 6401 // Store 10 messages, so 5 blocks. 6402 for i := 0; i < 10; i++ { 6403 fs.StoreMsg(subj, nil, msg) 6404 } 6405 // Let them all expire. This way we load as we walk and can test that we expire all blocks without 6406 // needing to worry about last write times blocking forced expiration. 6407 time.Sleep(expire) 6408 6409 checkNoCache := func() { 6410 t.Helper() 6411 fs.mu.RLock() 6412 var stillHasCache bool 6413 for _, mb := range fs.blks { 6414 mb.mu.RLock() 6415 stillHasCache = stillHasCache || mb.cacheAlreadyLoaded() 6416 mb.mu.RUnlock() 6417 } 6418 fs.mu.RUnlock() 6419 require_False(t, stillHasCache) 6420 } 6421 6422 // Walk forward. 6423 var smv StoreMsg 6424 for seq := uint64(1); seq <= 10; seq++ { 6425 _, err := fs.LoadMsg(seq, &smv) 6426 require_NoError(t, err) 6427 } 6428 checkNoCache() 6429 6430 // No test walking backwards. We have this scenario when we search for starting points for sourced streams. 6431 // Noticed some memory bloat when we have to search many blocks looking for a source that may be closer to the 6432 // beginning of the stream (infrequently updated sourced stream). 6433 for seq := uint64(10); seq >= 1; seq-- { 6434 _, err := fs.LoadMsg(seq, &smv) 6435 require_NoError(t, err) 6436 } 6437 checkNoCache() 6438 6439 // Now make sure still expires properly on linear scans with deleted msgs. 6440 // We want to make sure we track linear updates even if message deleted. 6441 _, err = fs.RemoveMsg(2) 6442 require_NoError(t, err) 6443 _, err = fs.RemoveMsg(9) 6444 require_NoError(t, err) 6445 6446 // Walk forward. 6447 for seq := uint64(1); seq <= 10; seq++ { 6448 _, err := fs.LoadMsg(seq, &smv) 6449 if seq == 2 || seq == 9 { 6450 require_Error(t, err, errDeletedMsg) 6451 } else { 6452 require_NoError(t, err) 6453 } 6454 } 6455 checkNoCache() 6456 } 6457 6458 func TestFileStoreSkipMsgs(t *testing.T) { 6459 fs, err := newFileStore( 6460 FileStoreConfig{StoreDir: t.TempDir(), BlockSize: 1024}, 6461 StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) 6462 require_NoError(t, err) 6463 defer fs.Stop() 6464 6465 // Test on empty FS first. 6466 // Make sure wrong starting sequence fails. 6467 err = fs.SkipMsgs(10, 100) 6468 require_Error(t, err, ErrSequenceMismatch) 6469 6470 err = fs.SkipMsgs(1, 100) 6471 require_NoError(t, err) 6472 6473 state := fs.State() 6474 require_Equal(t, state.FirstSeq, 101) 6475 require_Equal(t, state.LastSeq, 100) 6476 require_Equal(t, fs.numMsgBlocks(), 1) 6477 6478 // Now add alot. 6479 err = fs.SkipMsgs(101, 100_000) 6480 require_NoError(t, err) 6481 state = fs.State() 6482 require_Equal(t, state.FirstSeq, 100_101) 6483 require_Equal(t, state.LastSeq, 100_100) 6484 require_Equal(t, fs.numMsgBlocks(), 1) 6485 6486 // Now add in a message, and then skip to check dmap. 6487 fs, err = newFileStore( 6488 FileStoreConfig{StoreDir: t.TempDir(), BlockSize: 1024}, 6489 StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) 6490 require_NoError(t, err) 6491 defer fs.Stop() 6492 6493 fs.StoreMsg("foo", nil, nil) 6494 err = fs.SkipMsgs(2, 10) 6495 require_NoError(t, err) 6496 state = fs.State() 6497 require_Equal(t, state.FirstSeq, 1) 6498 require_Equal(t, state.LastSeq, 11) 6499 require_Equal(t, state.Msgs, 1) 6500 require_Equal(t, state.NumDeleted, 10) 6501 require_Equal(t, len(state.Deleted), 10) 6502 6503 // Check Fast State too. 6504 state.Deleted = nil 6505 fs.FastState(&state) 6506 require_Equal(t, state.FirstSeq, 1) 6507 require_Equal(t, state.LastSeq, 11) 6508 require_Equal(t, state.Msgs, 1) 6509 require_Equal(t, state.NumDeleted, 10) 6510 } 6511 6512 func TestFileStoreOptimizeFirstLoadNextMsgWithSequenceZero(t *testing.T) { 6513 sd := t.TempDir() 6514 fs, err := newFileStore( 6515 FileStoreConfig{StoreDir: sd, BlockSize: 4096}, 6516 StreamConfig{Name: "zzz", Subjects: []string{"foo.*"}, Storage: FileStorage}) 6517 require_NoError(t, err) 6518 defer fs.Stop() 6519 6520 msg := bytes.Repeat([]byte("ZZZ"), 33) // ~100bytes 6521 6522 for i := 0; i < 5000; i++ { 6523 fs.StoreMsg("foo.A", nil, msg) 6524 } 6525 // This will create alot of blocks, ~167. 6526 // Just used to check that we do not load these in when searching. 6527 // Now add in 10 for foo.bar at the end. 6528 for i := 0; i < 10; i++ { 6529 fs.StoreMsg("foo.B", nil, msg) 6530 } 6531 // The bug would not be visible on running server per se since we would have had fss loaded 6532 // and that sticks around a bit longer, we would use that to skip over the early blocks. So stop 6533 // and restart the filestore. 6534 fs.Stop() 6535 fs, err = newFileStore( 6536 FileStoreConfig{StoreDir: sd, BlockSize: 4096}, 6537 StreamConfig{Name: "zzz", Subjects: []string{"foo.*"}, Storage: FileStorage}) 6538 require_NoError(t, err) 6539 defer fs.Stop() 6540 6541 // Now fetch the next message for foo.B but set starting sequence to 0. 6542 _, nseq, err := fs.LoadNextMsg("foo.B", false, 0, nil) 6543 require_NoError(t, err) 6544 require_Equal(t, nseq, 5001) 6545 // Now check how many blks are loaded, should be only 1. 6546 require_Equal(t, fs.cacheLoads(), 1) 6547 } 6548 6549 func TestFileStoreWriteFullStateHighSubjectCardinality(t *testing.T) { 6550 t.Skip() 6551 6552 sd := t.TempDir() 6553 fs, err := newFileStore( 6554 FileStoreConfig{StoreDir: sd, BlockSize: 4096}, 6555 StreamConfig{Name: "zzz", Subjects: []string{"foo.*"}, Storage: FileStorage}) 6556 require_NoError(t, err) 6557 defer fs.Stop() 6558 6559 msg := []byte{1, 2, 3} 6560 6561 for i := 0; i < 1_000_000; i++ { 6562 subj := fmt.Sprintf("subj_%d", i) 6563 _, _, err := fs.StoreMsg(subj, nil, msg) 6564 require_NoError(t, err) 6565 } 6566 6567 start := time.Now() 6568 require_NoError(t, fs.writeFullState()) 6569 t.Logf("Took %s to writeFullState", time.Since(start)) 6570 } 6571 6572 func TestFileStoreEraseMsgWithDbitSlots(t *testing.T) { 6573 fs, err := newFileStore( 6574 FileStoreConfig{StoreDir: t.TempDir()}, 6575 StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage}) 6576 require_NoError(t, err) 6577 defer fs.Stop() 6578 6579 fs.StoreMsg("foo", nil, []byte("abd")) 6580 for i := 0; i < 10; i++ { 6581 fs.SkipMsg() 6582 } 6583 fs.StoreMsg("foo", nil, []byte("abd")) 6584 // Now grab that first block and compact away the skips which will 6585 // introduce dbits into our idx. 6586 fs.mu.RLock() 6587 mb := fs.blks[0] 6588 fs.mu.RUnlock() 6589 // Compact. 6590 mb.mu.Lock() 6591 mb.compact() 6592 mb.mu.Unlock() 6593 6594 removed, err := fs.EraseMsg(1) 6595 require_NoError(t, err) 6596 require_True(t, removed) 6597 } 6598 6599 func TestFileStoreEraseMsgWithAllTrailingDbitSlots(t *testing.T) { 6600 fs, err := newFileStore( 6601 FileStoreConfig{StoreDir: t.TempDir()}, 6602 StreamConfig{Name: "zzz", Subjects: []string{"foo"}, Storage: FileStorage}) 6603 require_NoError(t, err) 6604 defer fs.Stop() 6605 6606 fs.StoreMsg("foo", nil, []byte("abc")) 6607 fs.StoreMsg("foo", nil, []byte("abcdefg")) 6608 6609 for i := 0; i < 10; i++ { 6610 fs.SkipMsg() 6611 } 6612 // Now grab that first block and compact away the skips which will 6613 // introduce dbits into our idx. 6614 fs.mu.RLock() 6615 mb := fs.blks[0] 6616 fs.mu.RUnlock() 6617 // Compact. 6618 mb.mu.Lock() 6619 mb.compact() 6620 mb.mu.Unlock() 6621 6622 removed, err := fs.EraseMsg(2) 6623 require_NoError(t, err) 6624 require_True(t, removed) 6625 } 6626 6627 func TestFileStoreMultiLastSeqs(t *testing.T) { 6628 fs, err := newFileStore( 6629 FileStoreConfig{StoreDir: t.TempDir(), BlockSize: 256}, // Make block size small to test multiblock selections with maxSeq 6630 StreamConfig{Name: "zzz", Subjects: []string{"foo.*"}, Storage: FileStorage}) 6631 require_NoError(t, err) 6632 defer fs.Stop() 6633 6634 msg := []byte("abc") 6635 for i := 0; i < 33; i++ { 6636 fs.StoreMsg("foo.foo", nil, msg) 6637 fs.StoreMsg("foo.bar", nil, msg) 6638 fs.StoreMsg("foo.baz", nil, msg) 6639 } 6640 for i := 0; i < 33; i++ { 6641 fs.StoreMsg("bar.foo", nil, msg) 6642 fs.StoreMsg("bar.bar", nil, msg) 6643 fs.StoreMsg("bar.baz", nil, msg) 6644 } 6645 6646 checkResults := func(seqs, expected []uint64) { 6647 t.Helper() 6648 if len(seqs) != len(expected) { 6649 t.Fatalf("Expected %+v got %+v", expected, seqs) 6650 } 6651 for i := range seqs { 6652 if seqs[i] != expected[i] { 6653 t.Fatalf("Expected %+v got %+v", expected, seqs) 6654 } 6655 } 6656 } 6657 6658 // UpTo sequence 3. Tests block split. 6659 seqs, err := fs.MultiLastSeqs([]string{"foo.*"}, 3, -1) 6660 require_NoError(t, err) 6661 checkResults(seqs, []uint64{1, 2, 3}) 6662 // Up to last sequence of the stream. 6663 seqs, err = fs.MultiLastSeqs([]string{"foo.*"}, 0, -1) 6664 require_NoError(t, err) 6665 checkResults(seqs, []uint64{97, 98, 99}) 6666 // Check for bar.* at the end. 6667 seqs, err = fs.MultiLastSeqs([]string{"bar.*"}, 0, -1) 6668 require_NoError(t, err) 6669 checkResults(seqs, []uint64{196, 197, 198}) 6670 // This should find nothing. 6671 seqs, err = fs.MultiLastSeqs([]string{"bar.*"}, 99, -1) 6672 require_NoError(t, err) 6673 checkResults(seqs, nil) 6674 6675 // Do multiple subjects explicitly. 6676 seqs, err = fs.MultiLastSeqs([]string{"foo.foo", "foo.bar", "foo.baz"}, 3, -1) 6677 require_NoError(t, err) 6678 checkResults(seqs, []uint64{1, 2, 3}) 6679 seqs, err = fs.MultiLastSeqs([]string{"foo.foo", "foo.bar", "foo.baz"}, 0, -1) 6680 require_NoError(t, err) 6681 checkResults(seqs, []uint64{97, 98, 99}) 6682 seqs, err = fs.MultiLastSeqs([]string{"bar.foo", "bar.bar", "bar.baz"}, 0, -1) 6683 require_NoError(t, err) 6684 checkResults(seqs, []uint64{196, 197, 198}) 6685 seqs, err = fs.MultiLastSeqs([]string{"bar.foo", "bar.bar", "bar.baz"}, 99, -1) 6686 require_NoError(t, err) 6687 checkResults(seqs, nil) 6688 6689 // Check single works 6690 seqs, err = fs.MultiLastSeqs([]string{"foo.foo"}, 3, -1) 6691 require_NoError(t, err) 6692 checkResults(seqs, []uint64{1}) 6693 6694 // Now test that we properly de-duplicate between filters. 6695 seqs, err = fs.MultiLastSeqs([]string{"foo.*", "foo.bar"}, 3, -1) 6696 require_NoError(t, err) 6697 checkResults(seqs, []uint64{1, 2, 3}) 6698 seqs, err = fs.MultiLastSeqs([]string{"bar.>", "bar.bar", "bar.baz"}, 0, -1) 6699 require_NoError(t, err) 6700 checkResults(seqs, []uint64{196, 197, 198}) 6701 6702 // All 6703 seqs, err = fs.MultiLastSeqs([]string{">"}, 0, -1) 6704 require_NoError(t, err) 6705 checkResults(seqs, []uint64{97, 98, 99, 196, 197, 198}) 6706 seqs, err = fs.MultiLastSeqs([]string{">"}, 99, -1) 6707 require_NoError(t, err) 6708 checkResults(seqs, []uint64{97, 98, 99}) 6709 } 6710 6711 func TestFileStoreMultiLastSeqsMaxAllowed(t *testing.T) { 6712 fs, err := newFileStore( 6713 FileStoreConfig{StoreDir: t.TempDir()}, 6714 StreamConfig{Name: "zzz", Subjects: []string{"foo.*"}, Storage: FileStorage}) 6715 require_NoError(t, err) 6716 defer fs.Stop() 6717 6718 msg := []byte("abc") 6719 for i := 1; i <= 100; i++ { 6720 fs.StoreMsg(fmt.Sprintf("foo.%d", i), nil, msg) 6721 } 6722 // Test that if we specify maxAllowed that we get the correct error. 6723 seqs, err := fs.MultiLastSeqs([]string{"foo.*"}, 0, 10) 6724 require_True(t, seqs == nil) 6725 require_Error(t, err, ErrTooManyResults) 6726 } 6727 6728 // https://github.com/nats-io/nats-server/issues/5236 6729 // Unclear how the sequences get off here, this is just forcing the situation reported. 6730 func TestFileStoreMsgBlockFirstAndLastSeqCorrupt(t *testing.T) { 6731 fs, err := newFileStore( 6732 FileStoreConfig{StoreDir: t.TempDir()}, 6733 StreamConfig{Name: "zzz", Subjects: []string{"foo.*"}, Storage: FileStorage}) 6734 require_NoError(t, err) 6735 defer fs.Stop() 6736 6737 msg := []byte("abc") 6738 for i := 1; i <= 10; i++ { 6739 fs.StoreMsg(fmt.Sprintf("foo.%d", i), nil, msg) 6740 } 6741 fs.Purge() 6742 6743 fs.mu.RLock() 6744 mb := fs.blks[0] 6745 fs.mu.RUnlock() 6746 6747 mb.mu.Lock() 6748 mb.tryForceExpireCacheLocked() 6749 atomic.StoreUint64(&mb.last.seq, 9) 6750 mb.mu.Unlock() 6751 6752 // We should rebuild here and return no error. 6753 require_NoError(t, mb.loadMsgs()) 6754 mb.mu.RLock() 6755 fseq, lseq := atomic.LoadUint64(&mb.first.seq), atomic.LoadUint64(&mb.last.seq) 6756 mb.mu.RUnlock() 6757 require_Equal(t, fseq, 11) 6758 require_Equal(t, lseq, 10) 6759 } 6760 6761 func TestFileStoreWriteFullStateAfterPurgeEx(t *testing.T) { 6762 fs, err := newFileStore( 6763 FileStoreConfig{StoreDir: t.TempDir()}, 6764 StreamConfig{Name: "zzz", Subjects: []string{"foo.*"}, Storage: FileStorage}) 6765 require_NoError(t, err) 6766 defer fs.Stop() 6767 6768 msg := []byte("abc") 6769 for i := 1; i <= 10; i++ { 6770 fs.StoreMsg(fmt.Sprintf("foo.%d", i), nil, msg) 6771 } 6772 fs.RemoveMsg(8) 6773 fs.RemoveMsg(9) 6774 fs.RemoveMsg(10) 6775 6776 n, err := fs.PurgeEx(">", 8, 0) 6777 require_NoError(t, err) 6778 require_Equal(t, n, 7) 6779 6780 var ss StreamState 6781 fs.FastState(&ss) 6782 require_Equal(t, ss.FirstSeq, 11) 6783 require_Equal(t, ss.LastSeq, 10) 6784 6785 // Make sure this does not reset our state due to skew with msg blocks. 6786 fs.writeFullState() 6787 fs.FastState(&ss) 6788 require_Equal(t, ss.FirstSeq, 11) 6789 require_Equal(t, ss.LastSeq, 10) 6790 } 6791 6792 /////////////////////////////////////////////////////////////////////////// 6793 // Benchmarks 6794 /////////////////////////////////////////////////////////////////////////// 6795 6796 func Benchmark_FileStoreSelectMsgBlock(b *testing.B) { 6797 // We use small block size to create lots of blocks for this test. 6798 fs, err := newFileStore( 6799 FileStoreConfig{StoreDir: b.TempDir(), BlockSize: 128}, 6800 StreamConfig{Name: "zzz", Subjects: []string{"*"}, Storage: FileStorage}) 6801 if err != nil { 6802 b.Fatalf("Unexpected error: %v", err) 6803 } 6804 defer fs.Stop() 6805 6806 subj, msg := "A", bytes.Repeat([]byte("ABC"), 33) // ~100bytes 6807 6808 // Add in a bunch of blocks. 6809 for i := 0; i < 1000; i++ { 6810 fs.StoreMsg(subj, nil, msg) 6811 } 6812 if fs.numMsgBlocks() < 1000 { 6813 b.Fatalf("Expected at least 1000 blocks, got %d", fs.numMsgBlocks()) 6814 } 6815 6816 fs.mu.RLock() 6817 defer fs.mu.RUnlock() 6818 6819 b.ResetTimer() 6820 for i := 0; i < b.N; i++ { 6821 _, mb := fs.selectMsgBlockWithIndex(1) 6822 if mb == nil { 6823 b.Fatalf("Expected a non-nil mb") 6824 } 6825 } 6826 b.StopTimer() 6827 }