get.pme.sh/pnats@v0.0.0-20240304004023-26bb5a137ed0/server/jetstream_test.go (about) 1 // Copyright 2019-2024 The NATS Authors 2 // Licensed under the Apache License, Version 2.0 (the "License"); 3 // you may not use this file except in compliance with the License. 4 // You may obtain a copy of the License at 5 // 6 // http://www.apache.org/licenses/LICENSE-2.0 7 // 8 // Unless required by applicable law or agreed to in writing, software 9 // distributed under the License is distributed on an "AS IS" BASIS, 10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 // See the License for the specific language governing permissions and 12 // limitations under the License. 13 14 //go:build !skip_js_tests 15 // +build !skip_js_tests 16 17 package server 18 19 import ( 20 "bytes" 21 "context" 22 crand "crypto/rand" 23 "encoding/base64" 24 "encoding/json" 25 "errors" 26 "fmt" 27 "io" 28 "math" 29 "math/rand" 30 "net/http" 31 "net/url" 32 "os" 33 "path/filepath" 34 "reflect" 35 "runtime" 36 "runtime/debug" 37 "sort" 38 "strconv" 39 "strings" 40 "sync" 41 "sync/atomic" 42 "testing" 43 "time" 44 45 "get.pme.sh/pnats/server/sysmem" 46 "github.com/nats-io/jwt/v2" 47 "github.com/nats-io/nats.go" 48 "github.com/nats-io/nkeys" 49 "github.com/nats-io/nuid" 50 ) 51 52 func TestJetStreamBasicNilConfig(t *testing.T) { 53 s := RunRandClientPortServer(t) 54 defer s.Shutdown() 55 56 if err := s.EnableJetStream(nil); err != nil { 57 t.Fatalf("Expected no error, got %v", err) 58 } 59 if !s.JetStreamEnabled() { 60 t.Fatalf("Expected JetStream to be enabled") 61 } 62 if s.SystemAccount() == nil { 63 t.Fatalf("Expected system account to be created automatically") 64 } 65 // Grab our config since it was dynamically generated. 66 config := s.JetStreamConfig() 67 if config == nil { 68 t.Fatalf("Expected non-nil config") 69 } 70 // Check dynamic max memory. 71 hwMem := sysmem.Memory() 72 if hwMem != 0 { 73 // Make sure its about 75% 74 est := hwMem / 4 * 3 75 if config.MaxMemory != est { 76 t.Fatalf("Expected memory to be 80 percent of system memory, got %v vs %v", config.MaxMemory, est) 77 } 78 } 79 // Make sure it was created. 80 stat, err := os.Stat(config.StoreDir) 81 if err != nil { 82 t.Fatalf("Expected the store directory to be present, %v", err) 83 } 84 if stat == nil || !stat.IsDir() { 85 t.Fatalf("Expected a directory") 86 } 87 } 88 89 func RunBasicJetStreamServer(t testing.TB) *Server { 90 opts := DefaultTestOptions 91 opts.Port = -1 92 opts.JetStream = true 93 opts.StoreDir = t.TempDir() 94 return RunServer(&opts) 95 } 96 97 func RunJetStreamServerOnPort(port int, sd string) *Server { 98 opts := DefaultTestOptions 99 opts.Port = port 100 opts.JetStream = true 101 opts.StoreDir = filepath.Dir(sd) 102 return RunServer(&opts) 103 } 104 105 func clientConnectToServer(t *testing.T, s *Server) *nats.Conn { 106 t.Helper() 107 nc, err := nats.Connect(s.ClientURL(), 108 nats.Name("JS-TEST"), 109 nats.ReconnectWait(5*time.Millisecond), 110 nats.MaxReconnects(-1)) 111 if err != nil { 112 t.Fatalf("Failed to create client: %v", err) 113 } 114 return nc 115 } 116 117 func clientConnectWithOldRequest(t *testing.T, s *Server) *nats.Conn { 118 nc, err := nats.Connect(s.ClientURL(), nats.UseOldRequestStyle()) 119 if err != nil { 120 t.Fatalf("Failed to create client: %v", err) 121 } 122 return nc 123 } 124 125 func TestJetStreamEnableAndDisableAccount(t *testing.T) { 126 s := RunBasicJetStreamServer(t) 127 defer s.Shutdown() 128 129 // Global in simple setup should be enabled already. 130 if !s.GlobalAccount().JetStreamEnabled() { 131 t.Fatalf("Expected to have jetstream enabled on global account") 132 } 133 if na := s.JetStreamNumAccounts(); na != 1 { 134 t.Fatalf("Expected 1 account, got %d", na) 135 } 136 137 if err := s.GlobalAccount().DisableJetStream(); err != nil { 138 t.Fatalf("Did not expect error on disabling account: %v", err) 139 } 140 if na := s.JetStreamNumAccounts(); na != 0 { 141 t.Fatalf("Expected no accounts, got %d", na) 142 } 143 // Make sure we unreserved resources. 144 if rm, rd, err := s.JetStreamReservedResources(); err != nil { 145 t.Fatalf("Unexpected error requesting jetstream reserved resources: %v", err) 146 } else if rm != 0 || rd != 0 { 147 t.Fatalf("Expected reserved memory and store to be 0, got %v and %v", friendlyBytes(rm), friendlyBytes(rd)) 148 } 149 150 acc, _ := s.LookupOrRegisterAccount("$FOO") 151 if err := acc.EnableJetStream(nil); err != nil { 152 t.Fatalf("Did not expect error on enabling account: %v", err) 153 } 154 if na := s.JetStreamNumAccounts(); na != 1 { 155 t.Fatalf("Expected 1 account, got %d", na) 156 } 157 if err := acc.DisableJetStream(); err != nil { 158 t.Fatalf("Did not expect error on disabling account: %v", err) 159 } 160 if na := s.JetStreamNumAccounts(); na != 0 { 161 t.Fatalf("Expected no accounts, got %d", na) 162 } 163 // We should get error if disabling something not enabled. 164 acc, _ = s.LookupOrRegisterAccount("$BAR") 165 if err := acc.DisableJetStream(); err == nil { 166 t.Fatalf("Expected error on disabling account that was not enabled") 167 } 168 // Should get an error for trying to enable a non-registered account. 169 acc = NewAccount("$BAZ") 170 if err := acc.EnableJetStream(nil); err == nil { 171 t.Fatalf("Expected error on enabling account that was not registered") 172 } 173 } 174 175 func TestJetStreamAddStream(t *testing.T) { 176 cases := []struct { 177 name string 178 mconfig *StreamConfig 179 }{ 180 {name: "MemoryStore", 181 mconfig: &StreamConfig{ 182 Name: "foo", 183 Retention: LimitsPolicy, 184 MaxAge: time.Hour, 185 Storage: MemoryStorage, 186 Replicas: 1, 187 }}, 188 {name: "FileStore", 189 mconfig: &StreamConfig{ 190 Name: "foo", 191 Retention: LimitsPolicy, 192 MaxAge: time.Hour, 193 Storage: FileStorage, 194 Replicas: 1, 195 }}, 196 } 197 for _, c := range cases { 198 t.Run(c.name, func(t *testing.T) { 199 s := RunBasicJetStreamServer(t) 200 defer s.Shutdown() 201 202 mset, err := s.GlobalAccount().addStream(c.mconfig) 203 if err != nil { 204 t.Fatalf("Unexpected error adding stream: %v", err) 205 } 206 defer mset.delete() 207 208 nc, js := jsClientConnect(t, s) 209 defer nc.Close() 210 211 js.Publish("foo", []byte("Hello World!")) 212 state := mset.state() 213 if state.Msgs != 1 { 214 t.Fatalf("Expected 1 message, got %d", state.Msgs) 215 } 216 if state.Bytes == 0 { 217 t.Fatalf("Expected non-zero bytes") 218 } 219 220 js.Publish("foo", []byte("Hello World Again!")) 221 state = mset.state() 222 if state.Msgs != 2 { 223 t.Fatalf("Expected 2 messages, got %d", state.Msgs) 224 } 225 226 if err := mset.delete(); err != nil { 227 t.Fatalf("Got an error deleting the stream: %v", err) 228 } 229 }) 230 } 231 } 232 233 func TestJetStreamAddStreamDiscardNew(t *testing.T) { 234 cases := []struct { 235 name string 236 mconfig *StreamConfig 237 }{ 238 {name: "MemoryStore", 239 mconfig: &StreamConfig{ 240 Name: "foo", 241 MaxMsgs: 10, 242 MaxBytes: 4096, 243 Discard: DiscardNew, 244 Storage: MemoryStorage, 245 Replicas: 1, 246 }}, 247 {name: "FileStore", 248 mconfig: &StreamConfig{ 249 Name: "foo", 250 MaxMsgs: 10, 251 MaxBytes: 4096, 252 Discard: DiscardNew, 253 Storage: FileStorage, 254 Replicas: 1, 255 }}, 256 } 257 for _, c := range cases { 258 t.Run(c.name, func(t *testing.T) { 259 s := RunBasicJetStreamServer(t) 260 defer s.Shutdown() 261 262 mset, err := s.GlobalAccount().addStream(c.mconfig) 263 if err != nil { 264 t.Fatalf("Unexpected error adding stream: %v", err) 265 } 266 defer mset.delete() 267 268 nc := clientConnectToServer(t, s) 269 defer nc.Close() 270 271 subj := "foo" 272 toSend := 10 273 for i := 0; i < toSend; i++ { 274 sendStreamMsg(t, nc, subj, fmt.Sprintf("MSG: %d", i+1)) 275 } 276 // We expect this one to fail due to discard policy. 277 resp, _ := nc.Request(subj, []byte("discard me"), 100*time.Millisecond) 278 if resp == nil { 279 t.Fatalf("No response, possible timeout?") 280 } 281 if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error.Description != "maximum messages exceeded" || pa.Stream != "foo" { 282 t.Fatalf("Expected to get an error about maximum messages, got %q", resp.Data) 283 } 284 285 // Now do bytes. 286 mset.purge(nil) 287 288 big := make([]byte, 8192) 289 resp, _ = nc.Request(subj, big, 100*time.Millisecond) 290 if resp == nil { 291 t.Fatalf("No response, possible timeout?") 292 } 293 if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error.Description != "maximum bytes exceeded" || pa.Stream != "foo" { 294 t.Fatalf("Expected to get an error about maximum bytes, got %q", resp.Data) 295 } 296 }) 297 } 298 } 299 300 func TestJetStreamAutoTuneFSConfig(t *testing.T) { 301 s := RunRandClientPortServer(t) 302 defer s.Shutdown() 303 304 jsconfig := &JetStreamConfig{MaxMemory: -1, MaxStore: 128 * 1024 * 1024, StoreDir: t.TempDir()} 305 if err := s.EnableJetStream(jsconfig); err != nil { 306 t.Fatalf("Expected no error, got %v", err) 307 } 308 309 maxMsgSize := int32(512) 310 streamConfig := func(name string, maxMsgs, maxBytes int64) *StreamConfig { 311 t.Helper() 312 cfg := &StreamConfig{Name: name, MaxMsgSize: maxMsgSize, Storage: FileStorage} 313 if maxMsgs > 0 { 314 cfg.MaxMsgs = maxMsgs 315 } 316 if maxBytes > 0 { 317 cfg.MaxBytes = maxBytes 318 } 319 return cfg 320 } 321 322 acc := s.GlobalAccount() 323 324 testBlkSize := func(subject string, maxMsgs, maxBytes int64, expectedBlkSize uint64) { 325 t.Helper() 326 mset, err := acc.addStream(streamConfig(subject, maxMsgs, maxBytes)) 327 if err != nil { 328 t.Fatalf("Unexpected error adding stream: %v", err) 329 } 330 defer mset.delete() 331 fsCfg, err := mset.fileStoreConfig() 332 if err != nil { 333 t.Fatalf("Unexpected error retrieving file store: %v", err) 334 } 335 if fsCfg.BlockSize != expectedBlkSize { 336 t.Fatalf("Expected auto tuned block size to be %d, got %d", expectedBlkSize, fsCfg.BlockSize) 337 } 338 } 339 340 testBlkSize("foo", 1, 0, FileStoreMinBlkSize) 341 testBlkSize("foo", 1, 512, FileStoreMinBlkSize) 342 testBlkSize("foo", 1, 1024*1024, defaultMediumBlockSize) 343 testBlkSize("foo", 1, 8*1024*1024, defaultMediumBlockSize) 344 testBlkSize("foo_bar_baz", -1, 32*1024*1024, FileStoreMaxBlkSize) 345 } 346 347 func TestJetStreamConsumerAndStreamDescriptions(t *testing.T) { 348 s := RunBasicJetStreamServer(t) 349 defer s.Shutdown() 350 351 descr := "foo asset" 352 acc := s.GlobalAccount() 353 354 // Check stream's first. 355 mset, err := acc.addStream(&StreamConfig{Name: "foo", Description: descr}) 356 if err != nil { 357 t.Fatalf("Unexpected error adding stream: %v", err) 358 } 359 if cfg := mset.config(); cfg.Description != descr { 360 t.Fatalf("Expected a description of %q, got %q", descr, cfg.Description) 361 } 362 363 // Now consumer 364 edescr := "analytics" 365 o, err := mset.addConsumer(&ConsumerConfig{ 366 Description: edescr, 367 DeliverSubject: "to", 368 AckPolicy: AckNone}) 369 if err != nil { 370 t.Fatalf("Unexpected error adding consumer: %v", err) 371 } 372 if cfg := o.config(); cfg.Description != edescr { 373 t.Fatalf("Expected a description of %q, got %q", edescr, cfg.Description) 374 } 375 376 // Test max. 377 data := make([]byte, JSMaxDescriptionLen+1) 378 crand.Read(data) 379 bigDescr := base64.StdEncoding.EncodeToString(data) 380 381 _, err = acc.addStream(&StreamConfig{Name: "bar", Description: bigDescr}) 382 if err == nil || !strings.Contains(err.Error(), "description is too long") { 383 t.Fatalf("Expected an error but got none") 384 } 385 386 _, err = mset.addConsumer(&ConsumerConfig{ 387 Description: bigDescr, 388 DeliverSubject: "to", 389 AckPolicy: AckNone}) 390 if err == nil || !strings.Contains(err.Error(), "description is too long") { 391 t.Fatalf("Expected an error but got none") 392 } 393 } 394 func TestJetStreamConsumerWithNameAndDurable(t *testing.T) { 395 s := RunBasicJetStreamServer(t) 396 defer s.Shutdown() 397 398 descr := "foo asset" 399 name := "name" 400 durable := "durable" 401 acc := s.GlobalAccount() 402 403 // Check stream's first. 404 mset, err := acc.addStream(&StreamConfig{Name: "foo", Description: descr}) 405 if err != nil { 406 t.Fatalf("Unexpected error adding stream: %v", err) 407 } 408 if cfg := mset.config(); cfg.Description != descr { 409 t.Fatalf("Expected a description of %q, got %q", descr, cfg.Description) 410 } 411 412 // it's ok to specify both durable and name, but they have to be the same. 413 _, err = mset.addConsumer(&ConsumerConfig{ 414 DeliverSubject: "to", 415 Durable: "consumer", 416 Name: "consumer", 417 AckPolicy: AckNone}) 418 if err != nil { 419 t.Fatalf("Unexpected error adding consumer: %v", err) 420 } 421 422 // if they're not the same, expect error 423 _, err = mset.addConsumer(&ConsumerConfig{ 424 DeliverSubject: "to", 425 Durable: durable, 426 Name: name, 427 AckPolicy: AckNone}) 428 429 if !strings.Contains(err.Error(), "Consumer Durable and Name have to be equal") { 430 t.Fatalf("Wrong error while adding consumer with not matching Name and Durable: %v", err) 431 } 432 433 } 434 435 func TestJetStreamPubAck(t *testing.T) { 436 s := RunBasicJetStreamServer(t) 437 defer s.Shutdown() 438 439 sname := "PUBACK" 440 acc := s.GlobalAccount() 441 mconfig := &StreamConfig{Name: sname, Subjects: []string{"foo"}, Storage: MemoryStorage} 442 mset, err := acc.addStream(mconfig) 443 if err != nil { 444 t.Fatalf("Unexpected error adding stream: %v", err) 445 } 446 defer mset.delete() 447 448 nc := clientConnectToServer(t, s) 449 defer nc.Close() 450 451 checkRespDetails := func(resp *nats.Msg, err error, seq uint64) { 452 if err != nil { 453 t.Fatalf("Unexpected error from send stream msg: %v", err) 454 } 455 if resp == nil { 456 t.Fatalf("No response from send stream msg") 457 } 458 pa := getPubAckResponse(resp.Data) 459 if pa == nil || pa.Error != nil { 460 t.Fatalf("Expected a valid JetStreamPubAck, got %q", resp.Data) 461 } 462 if pa.Stream != sname { 463 t.Fatalf("Expected %q for stream name, got %q", sname, pa.Stream) 464 } 465 if pa.Sequence != seq { 466 t.Fatalf("Expected %d for sequence, got %d", seq, pa.Sequence) 467 } 468 } 469 470 // Send messages and make sure pubAck details are correct. 471 for i := uint64(1); i <= 1000; i++ { 472 resp, err := nc.Request("foo", []byte("HELLO"), 100*time.Millisecond) 473 checkRespDetails(resp, err, i) 474 } 475 } 476 477 func TestJetStreamConsumerWithStartTime(t *testing.T) { 478 subj := "my_stream" 479 cases := []struct { 480 name string 481 mconfig *StreamConfig 482 }{ 483 {"MemoryStore", &StreamConfig{Name: subj, Storage: MemoryStorage}}, 484 {"FileStore", &StreamConfig{Name: subj, Storage: FileStorage}}, 485 } 486 for _, c := range cases { 487 t.Run(c.name, func(t *testing.T) { 488 s := RunBasicJetStreamServer(t) 489 defer s.Shutdown() 490 491 fsCfg := &FileStoreConfig{BlockSize: 100} 492 mset, err := s.GlobalAccount().addStreamWithStore(c.mconfig, fsCfg) 493 if err != nil { 494 t.Fatalf("Unexpected error adding stream: %v", err) 495 } 496 defer mset.delete() 497 498 nc := clientConnectToServer(t, s) 499 defer nc.Close() 500 501 toSend := 250 502 for i := 0; i < toSend; i++ { 503 sendStreamMsg(t, nc, subj, fmt.Sprintf("MSG: %d", i+1)) 504 } 505 506 time.Sleep(10 * time.Millisecond) 507 startTime := time.Now().UTC() 508 509 for i := 0; i < toSend; i++ { 510 sendStreamMsg(t, nc, subj, fmt.Sprintf("MSG: %d", i+1)) 511 } 512 513 if msgs := mset.state().Msgs; msgs != uint64(toSend*2) { 514 t.Fatalf("Expected %d messages, got %d", toSend*2, msgs) 515 } 516 517 o, err := mset.addConsumer(&ConsumerConfig{ 518 Durable: "d", 519 DeliverPolicy: DeliverByStartTime, 520 OptStartTime: &startTime, 521 AckPolicy: AckExplicit, 522 }) 523 require_NoError(t, err) 524 defer o.delete() 525 526 msg, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second) 527 require_NoError(t, err) 528 sseq, dseq, _, _, _ := replyInfo(msg.Reply) 529 if dseq != 1 { 530 t.Fatalf("Expected delivered seq of 1, got %d", dseq) 531 } 532 if sseq != uint64(toSend+1) { 533 t.Fatalf("Expected to get store seq of %d, got %d", toSend+1, sseq) 534 } 535 }) 536 } 537 } 538 539 // Test for https://github.com/nats-io/jetstream/issues/143 540 func TestJetStreamConsumerWithMultipleStartOptions(t *testing.T) { 541 subj := "my_stream" 542 cases := []struct { 543 name string 544 mconfig *StreamConfig 545 }{ 546 {"MemoryStore", &StreamConfig{Name: subj, Subjects: []string{"foo.>"}, Storage: MemoryStorage}}, 547 {"FileStore", &StreamConfig{Name: subj, Subjects: []string{"foo.>"}, Storage: FileStorage}}, 548 } 549 for _, c := range cases { 550 t.Run(c.name, func(t *testing.T) { 551 s := RunBasicJetStreamServer(t) 552 defer s.Shutdown() 553 554 mset, err := s.GlobalAccount().addStream(c.mconfig) 555 if err != nil { 556 t.Fatalf("Unexpected error adding stream: %v", err) 557 } 558 defer mset.delete() 559 560 nc := clientConnectToServer(t, s) 561 defer nc.Close() 562 563 obsReq := CreateConsumerRequest{ 564 Stream: subj, 565 Config: ConsumerConfig{ 566 Durable: "d", 567 DeliverPolicy: DeliverLast, 568 FilterSubject: "foo.22", 569 AckPolicy: AckExplicit, 570 }, 571 } 572 req, err := json.Marshal(obsReq) 573 require_NoError(t, err) 574 _, err = nc.Request(fmt.Sprintf(JSApiConsumerCreateT, subj), req, time.Second) 575 require_NoError(t, err) 576 nc.Close() 577 s.Shutdown() 578 }) 579 } 580 } 581 582 func TestJetStreamConsumerMaxDeliveries(t *testing.T) { 583 cases := []struct { 584 name string 585 mconfig *StreamConfig 586 }{ 587 {"MemoryStore", &StreamConfig{Name: "MY_WQ", Storage: MemoryStorage}}, 588 {"FileStore", &StreamConfig{Name: "MY_WQ", Storage: FileStorage}}, 589 } 590 for _, c := range cases { 591 t.Run(c.name, func(t *testing.T) { 592 s := RunBasicJetStreamServer(t) 593 defer s.Shutdown() 594 595 mset, err := s.GlobalAccount().addStream(c.mconfig) 596 if err != nil { 597 t.Fatalf("Unexpected error adding stream: %v", err) 598 } 599 defer mset.delete() 600 601 nc := clientConnectToServer(t, s) 602 defer nc.Close() 603 604 // Queue up our work item. 605 sendStreamMsg(t, nc, c.mconfig.Name, "Hello World!") 606 607 sub, _ := nc.SubscribeSync(nats.NewInbox()) 608 defer sub.Unsubscribe() 609 nc.Flush() 610 611 maxDeliver := 5 612 ackWait := 10 * time.Millisecond 613 614 o, err := mset.addConsumer(&ConsumerConfig{ 615 DeliverSubject: sub.Subject, 616 AckPolicy: AckExplicit, 617 AckWait: ackWait, 618 MaxDeliver: maxDeliver, 619 }) 620 require_NoError(t, err) 621 defer o.delete() 622 623 // Wait for redeliveries to pile up. 624 checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error { 625 if nmsgs, _, err := sub.Pending(); err != nil || nmsgs != maxDeliver { 626 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, maxDeliver) 627 } 628 return nil 629 }) 630 631 // Now wait a bit longer and make sure we do not have more than maxDeliveries. 632 time.Sleep(2 * ackWait) 633 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != maxDeliver { 634 t.Fatalf("Did not receive correct number of messages: %d vs %d", nmsgs, maxDeliver) 635 } 636 }) 637 } 638 } 639 640 func TestJetStreamNextReqFromMsg(t *testing.T) { 641 bef := time.Now() 642 expires, _, _, _, _, _, err := nextReqFromMsg([]byte(`{"expires":5000000000}`)) // nanoseconds 643 require_NoError(t, err) 644 now := time.Now() 645 if expires.Before(bef.Add(5*time.Second)) || expires.After(now.Add(5*time.Second)) { 646 t.Fatal("Expires out of expected range") 647 } 648 } 649 650 func TestJetStreamPullConsumerDelayedFirstPullWithReplayOriginal(t *testing.T) { 651 cases := []struct { 652 name string 653 mconfig *StreamConfig 654 }{ 655 {"MemoryStore", &StreamConfig{Name: "MY_WQ", Storage: MemoryStorage}}, 656 {"FileStore", &StreamConfig{Name: "MY_WQ", Storage: FileStorage}}, 657 } 658 for _, c := range cases { 659 t.Run(c.name, func(t *testing.T) { 660 s := RunBasicJetStreamServer(t) 661 defer s.Shutdown() 662 663 mset, err := s.GlobalAccount().addStream(c.mconfig) 664 if err != nil { 665 t.Fatalf("Unexpected error adding stream: %v", err) 666 } 667 defer mset.delete() 668 669 nc := clientConnectToServer(t, s) 670 defer nc.Close() 671 672 // Queue up our work item. 673 sendStreamMsg(t, nc, c.mconfig.Name, "Hello World!") 674 675 o, err := mset.addConsumer(&ConsumerConfig{ 676 Durable: "d", 677 AckPolicy: AckExplicit, 678 ReplayPolicy: ReplayOriginal, 679 }) 680 require_NoError(t, err) 681 defer o.delete() 682 683 // Force delay here which triggers the bug. 684 time.Sleep(250 * time.Millisecond) 685 686 if _, err = nc.Request(o.requestNextMsgSubject(), nil, time.Second); err != nil { 687 t.Fatalf("Unexpected error: %v", err) 688 } 689 }) 690 } 691 } 692 693 func TestJetStreamConsumerAckFloorFill(t *testing.T) { 694 cases := []struct { 695 name string 696 mconfig *StreamConfig 697 }{ 698 {"MemoryStore", &StreamConfig{Name: "MQ", Storage: MemoryStorage}}, 699 {"FileStore", &StreamConfig{Name: "MQ", Storage: FileStorage}}, 700 } 701 for _, c := range cases { 702 t.Run(c.name, func(t *testing.T) { 703 s := RunBasicJetStreamServer(t) 704 defer s.Shutdown() 705 706 mset, err := s.GlobalAccount().addStream(c.mconfig) 707 if err != nil { 708 t.Fatalf("Unexpected error adding stream: %v", err) 709 } 710 defer mset.delete() 711 712 nc := clientConnectToServer(t, s) 713 defer nc.Close() 714 715 for i := 1; i <= 4; i++ { 716 sendStreamMsg(t, nc, c.mconfig.Name, fmt.Sprintf("msg-%d", i)) 717 } 718 719 sub, _ := nc.SubscribeSync(nats.NewInbox()) 720 defer sub.Unsubscribe() 721 nc.Flush() 722 723 o, err := mset.addConsumer(&ConsumerConfig{ 724 Durable: "d", 725 DeliverSubject: sub.Subject, 726 AckPolicy: AckExplicit, 727 }) 728 require_NoError(t, err) 729 defer o.delete() 730 731 var first *nats.Msg 732 733 for i := 1; i <= 3; i++ { 734 m, err := sub.NextMsg(time.Second) 735 if err != nil { 736 t.Fatalf("Error receiving message %d: %v", i, err) 737 } 738 // Don't ack 1 or 4. 739 if i == 1 { 740 first = m 741 } else if i == 2 || i == 3 { 742 m.Respond(nil) 743 } 744 } 745 nc.Flush() 746 if info := o.info(); info.AckFloor.Consumer != 0 { 747 t.Fatalf("Expected the ack floor to be 0, got %d", info.AckFloor.Consumer) 748 } 749 // Now ack first, should move ack floor to 3. 750 first.Respond(nil) 751 nc.Flush() 752 753 checkFor(t, time.Second, 50*time.Millisecond, func() error { 754 if info := o.info(); info.AckFloor.Consumer != 3 { 755 return fmt.Errorf("Expected the ack floor to be 3, got %d", info.AckFloor.Consumer) 756 } 757 return nil 758 }) 759 }) 760 } 761 } 762 763 func TestJetStreamNoPanicOnRaceBetweenShutdownAndConsumerDelete(t *testing.T) { 764 cases := []struct { 765 name string 766 mconfig *StreamConfig 767 }{ 768 {"MemoryStore", &StreamConfig{Name: "MY_STREAM", Storage: MemoryStorage}}, 769 {"FileStore", &StreamConfig{Name: "MY_STREAM", Storage: FileStorage}}, 770 } 771 for _, c := range cases { 772 t.Run(c.name, func(t *testing.T) { 773 s := RunBasicJetStreamServer(t) 774 defer s.Shutdown() 775 776 mset, err := s.GlobalAccount().addStream(c.mconfig) 777 if err != nil { 778 t.Fatalf("Unexpected error adding stream: %v", err) 779 } 780 defer mset.delete() 781 782 var cons []*consumer 783 for i := 0; i < 100; i++ { 784 o, err := mset.addConsumer(&ConsumerConfig{ 785 Durable: fmt.Sprintf("d%d", i), 786 AckPolicy: AckExplicit, 787 }) 788 require_NoError(t, err) 789 defer o.delete() 790 cons = append(cons, o) 791 } 792 793 wg := sync.WaitGroup{} 794 wg.Add(1) 795 go func() { 796 defer wg.Done() 797 for _, c := range cons { 798 c.delete() 799 } 800 }() 801 time.Sleep(10 * time.Millisecond) 802 s.Shutdown() 803 }) 804 } 805 } 806 807 func TestJetStreamAddStreamMaxMsgSize(t *testing.T) { 808 cases := []struct { 809 name string 810 mconfig *StreamConfig 811 }{ 812 {name: "MemoryStore", 813 mconfig: &StreamConfig{ 814 Name: "foo", 815 Retention: LimitsPolicy, 816 MaxAge: time.Hour, 817 Storage: MemoryStorage, 818 MaxMsgSize: 22, 819 Replicas: 1, 820 }}, 821 {name: "FileStore", 822 mconfig: &StreamConfig{ 823 Name: "foo", 824 Retention: LimitsPolicy, 825 MaxAge: time.Hour, 826 Storage: FileStorage, 827 MaxMsgSize: 22, 828 Replicas: 1, 829 }}, 830 } 831 for _, c := range cases { 832 t.Run(c.name, func(t *testing.T) { 833 s := RunBasicJetStreamServer(t) 834 defer s.Shutdown() 835 836 mset, err := s.GlobalAccount().addStream(c.mconfig) 837 if err != nil { 838 t.Fatalf("Unexpected error adding stream: %v", err) 839 } 840 defer mset.delete() 841 842 nc := clientConnectToServer(t, s) 843 defer nc.Close() 844 845 if _, err := nc.Request("foo", []byte("Hello World!"), time.Second); err != nil { 846 t.Fatalf("Unexpected error: %v", err) 847 } 848 849 tooBig := []byte("1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ") 850 resp, err := nc.Request("foo", tooBig, time.Second) 851 require_NoError(t, err) 852 if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error.Description != "message size exceeds maximum allowed" { 853 t.Fatalf("Expected to get an error for maximum message size, got %q", pa.Error) 854 } 855 }) 856 } 857 } 858 859 func TestJetStreamAddStreamCanonicalNames(t *testing.T) { 860 s := RunBasicJetStreamServer(t) 861 defer s.Shutdown() 862 863 acc := s.GlobalAccount() 864 865 expectErr := func(_ *stream, err error) { 866 t.Helper() 867 if !IsNatsErr(err, JSStreamInvalidConfigF) { 868 t.Fatalf("Expected error but got none") 869 } 870 } 871 872 expectErr(acc.addStream(&StreamConfig{Name: "foo.bar"})) 873 expectErr(acc.addStream(&StreamConfig{Name: "foo.bar."})) 874 expectErr(acc.addStream(&StreamConfig{Name: "foo.*"})) 875 expectErr(acc.addStream(&StreamConfig{Name: "foo.>"})) 876 expectErr(acc.addStream(&StreamConfig{Name: "*"})) 877 expectErr(acc.addStream(&StreamConfig{Name: ">"})) 878 expectErr(acc.addStream(&StreamConfig{Name: "*>"})) 879 } 880 881 func TestJetStreamAddStreamBadSubjects(t *testing.T) { 882 s := RunBasicJetStreamServer(t) 883 defer s.Shutdown() 884 885 // Client for API requests. 886 nc := clientConnectToServer(t, s) 887 defer nc.Close() 888 889 expectAPIErr := func(cfg StreamConfig) { 890 t.Helper() 891 req, err := json.Marshal(cfg) 892 require_NoError(t, err) 893 resp, _ := nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second) 894 var scResp JSApiStreamCreateResponse 895 if err := json.Unmarshal(resp.Data, &scResp); err != nil { 896 t.Fatalf("Unexpected error: %v", err) 897 } 898 899 require_Error(t, scResp.ToError(), NewJSStreamInvalidConfigError(fmt.Errorf("invalid subject"))) 900 } 901 902 expectAPIErr(StreamConfig{Name: "MyStream", Storage: MemoryStorage, Subjects: []string{"foo.bar."}}) 903 expectAPIErr(StreamConfig{Name: "MyStream", Storage: MemoryStorage, Subjects: []string{".."}}) 904 expectAPIErr(StreamConfig{Name: "MyStream", Storage: MemoryStorage, Subjects: []string{".*"}}) 905 expectAPIErr(StreamConfig{Name: "MyStream", Storage: MemoryStorage, Subjects: []string{".>"}}) 906 expectAPIErr(StreamConfig{Name: "MyStream", Storage: MemoryStorage, Subjects: []string{" x"}}) 907 expectAPIErr(StreamConfig{Name: "MyStream", Storage: MemoryStorage, Subjects: []string{"y "}}) 908 } 909 910 func TestJetStreamMaxConsumers(t *testing.T) { 911 s := RunBasicJetStreamServer(t) 912 defer s.Shutdown() 913 914 nc, js := jsClientConnect(t, s) 915 defer nc.Close() 916 917 cfg := &nats.StreamConfig{ 918 Name: "MAXC", 919 Storage: nats.MemoryStorage, 920 Subjects: []string{"in.maxc.>"}, 921 MaxConsumers: 1, 922 } 923 if _, err := js.AddStream(cfg); err != nil { 924 t.Fatalf("Unexpected error: %v", err) 925 } 926 si, err := js.StreamInfo("MAXC") 927 require_NoError(t, err) 928 if si.Config.MaxConsumers != 1 { 929 t.Fatalf("Expected max of 1, got %d", si.Config.MaxConsumers) 930 } 931 // Make sure we get the right error. 932 // This should succeed. 933 if _, err := js.SubscribeSync("in.maxc.foo"); err != nil { 934 t.Fatalf("Unexpected error: %v", err) 935 } 936 if _, err := js.SubscribeSync("in.maxc.bar"); err == nil { 937 t.Fatalf("Eexpected error but got none") 938 } 939 } 940 941 func TestJetStreamAddStreamOverlappingSubjects(t *testing.T) { 942 mconfig := &StreamConfig{ 943 Name: "ok", 944 Storage: MemoryStorage, 945 Subjects: []string{"foo", "bar", "baz.*", "foo.bar.baz.>"}, 946 } 947 948 s := RunBasicJetStreamServer(t) 949 defer s.Shutdown() 950 951 acc := s.GlobalAccount() 952 mset, err := acc.addStream(mconfig) 953 if err != nil { 954 t.Fatalf("Unexpected error adding stream: %v", err) 955 } 956 defer mset.delete() 957 958 expectErr := func(_ *stream, err error) { 959 t.Helper() 960 if err == nil || !strings.Contains(err.Error(), "subjects overlap") { 961 t.Fatalf("Expected error but got none") 962 } 963 } 964 965 // Test that any overlapping subjects will fail. 966 expectErr(acc.addStream(&StreamConfig{Name: "foo"})) 967 expectErr(acc.addStream(&StreamConfig{Name: "a", Subjects: []string{"baz", "bar"}})) 968 expectErr(acc.addStream(&StreamConfig{Name: "b", Subjects: []string{">"}})) 969 expectErr(acc.addStream(&StreamConfig{Name: "c", Subjects: []string{"baz.33"}})) 970 expectErr(acc.addStream(&StreamConfig{Name: "d", Subjects: []string{"*.33"}})) 971 expectErr(acc.addStream(&StreamConfig{Name: "e", Subjects: []string{"*.>"}})) 972 expectErr(acc.addStream(&StreamConfig{Name: "f", Subjects: []string{"foo.bar", "*.bar.>"}})) 973 } 974 975 func TestJetStreamAddStreamOverlapWithJSAPISubjects(t *testing.T) { 976 s := RunBasicJetStreamServer(t) 977 defer s.Shutdown() 978 979 acc := s.GlobalAccount() 980 981 expectErr := func(_ *stream, err error) { 982 t.Helper() 983 if err == nil || !strings.Contains(err.Error(), "subjects overlap") { 984 t.Fatalf("Expected error but got none") 985 } 986 } 987 988 // Test that any overlapping subjects with our JSAPI should fail. 989 expectErr(acc.addStream(&StreamConfig{Name: "a", Subjects: []string{"$JS.API.foo", "$JS.API.bar"}})) 990 expectErr(acc.addStream(&StreamConfig{Name: "b", Subjects: []string{"$JS.API.>"}})) 991 expectErr(acc.addStream(&StreamConfig{Name: "c", Subjects: []string{"$JS.API.*"}})) 992 993 // Events and Advisories etc should be ok. 994 if _, err := acc.addStream(&StreamConfig{Name: "a", Subjects: []string{"$JS.EVENT.>"}}); err != nil { 995 t.Fatalf("Expected this to work: %v", err) 996 } 997 } 998 999 func TestJetStreamAddStreamSameConfigOK(t *testing.T) { 1000 mconfig := &StreamConfig{ 1001 Name: "ok", 1002 Subjects: []string{"foo", "bar", "baz.*", "foo.bar.baz.>"}, 1003 Storage: MemoryStorage, 1004 } 1005 1006 s := RunBasicJetStreamServer(t) 1007 defer s.Shutdown() 1008 1009 acc := s.GlobalAccount() 1010 mset, err := acc.addStream(mconfig) 1011 if err != nil { 1012 t.Fatalf("Unexpected error adding stream: %v", err) 1013 } 1014 defer mset.delete() 1015 1016 // Adding again with same config should be idempotent. 1017 if _, err = acc.addStream(mconfig); err != nil { 1018 t.Fatalf("Unexpected error adding stream: %v", err) 1019 } 1020 } 1021 1022 func sendStreamMsg(t *testing.T, nc *nats.Conn, subject, msg string) *PubAck { 1023 t.Helper() 1024 resp, _ := nc.Request(subject, []byte(msg), 500*time.Millisecond) 1025 if resp == nil { 1026 t.Fatalf("No response for %q, possible timeout?", msg) 1027 } 1028 pa := getPubAckResponse(resp.Data) 1029 if pa == nil || pa.Error != nil { 1030 t.Fatalf("Expected a valid JetStreamPubAck, got %q", resp.Data) 1031 } 1032 return pa.PubAck 1033 } 1034 1035 func TestJetStreamBasicAckPublish(t *testing.T) { 1036 cases := []struct { 1037 name string 1038 mconfig *StreamConfig 1039 }{ 1040 {"MemoryStore", &StreamConfig{Name: "foo", Storage: MemoryStorage, Subjects: []string{"foo.*"}}}, 1041 {"FileStore", &StreamConfig{Name: "foo", Storage: FileStorage, Subjects: []string{"foo.*"}}}, 1042 } 1043 for _, c := range cases { 1044 t.Run(c.name, func(t *testing.T) { 1045 s := RunBasicJetStreamServer(t) 1046 defer s.Shutdown() 1047 1048 mset, err := s.GlobalAccount().addStream(c.mconfig) 1049 if err != nil { 1050 t.Fatalf("Unexpected error adding stream: %v", err) 1051 } 1052 defer mset.delete() 1053 1054 nc := clientConnectToServer(t, s) 1055 defer nc.Close() 1056 1057 for i := 0; i < 50; i++ { 1058 sendStreamMsg(t, nc, "foo.bar", "Hello World!") 1059 } 1060 state := mset.state() 1061 if state.Msgs != 50 { 1062 t.Fatalf("Expected 50 messages, got %d", state.Msgs) 1063 } 1064 }) 1065 } 1066 } 1067 1068 func TestJetStreamStateTimestamps(t *testing.T) { 1069 cases := []struct { 1070 name string 1071 mconfig *StreamConfig 1072 }{ 1073 {"MemoryStore", &StreamConfig{Name: "foo", Storage: MemoryStorage, Subjects: []string{"foo.*"}}}, 1074 {"FileStore", &StreamConfig{Name: "foo", Storage: FileStorage, Subjects: []string{"foo.*"}}}, 1075 } 1076 for _, c := range cases { 1077 t.Run(c.name, func(t *testing.T) { 1078 s := RunBasicJetStreamServer(t) 1079 defer s.Shutdown() 1080 1081 mset, err := s.GlobalAccount().addStream(c.mconfig) 1082 if err != nil { 1083 t.Fatalf("Unexpected error adding stream: %v", err) 1084 } 1085 defer mset.delete() 1086 1087 nc := clientConnectToServer(t, s) 1088 defer nc.Close() 1089 1090 start := time.Now() 1091 delay := 250 * time.Millisecond 1092 sendStreamMsg(t, nc, "foo.bar", "Hello World!") 1093 time.Sleep(delay) 1094 sendStreamMsg(t, nc, "foo.bar", "Hello World Again!") 1095 1096 state := mset.state() 1097 if state.FirstTime.Before(start) { 1098 t.Fatalf("Unexpected first message timestamp: %v", state.FirstTime) 1099 } 1100 if state.LastTime.Before(start.Add(delay)) { 1101 t.Fatalf("Unexpected last message timestamp: %v", state.LastTime) 1102 } 1103 }) 1104 } 1105 } 1106 1107 func TestJetStreamNoAckStream(t *testing.T) { 1108 cases := []struct { 1109 name string 1110 mconfig *StreamConfig 1111 }{ 1112 {"MemoryStore", &StreamConfig{Name: "foo", Storage: MemoryStorage, NoAck: true}}, 1113 {"FileStore", &StreamConfig{Name: "foo", Storage: FileStorage, NoAck: true}}, 1114 } 1115 for _, c := range cases { 1116 t.Run(c.name, func(t *testing.T) { 1117 s := RunBasicJetStreamServer(t) 1118 defer s.Shutdown() 1119 1120 // We can use NoAck to suppress acks even when reply subjects are present. 1121 mset, err := s.GlobalAccount().addStream(c.mconfig) 1122 if err != nil { 1123 t.Fatalf("Unexpected error adding stream: %v", err) 1124 } 1125 defer mset.delete() 1126 1127 nc := clientConnectToServer(t, s) 1128 defer nc.Close() 1129 1130 if _, err := nc.Request("foo", []byte("Hello World!"), 25*time.Millisecond); err != nats.ErrTimeout { 1131 t.Fatalf("Expected a timeout error and no response with acks suppressed") 1132 } 1133 1134 state := mset.state() 1135 if state.Msgs != 1 { 1136 t.Fatalf("Expected 1 message, got %d", state.Msgs) 1137 } 1138 }) 1139 } 1140 } 1141 1142 func TestJetStreamCreateConsumer(t *testing.T) { 1143 cases := []struct { 1144 name string 1145 mconfig *StreamConfig 1146 }{ 1147 {"MemoryStore", &StreamConfig{Name: "foo", Storage: MemoryStorage, Subjects: []string{"foo", "bar"}, Retention: WorkQueuePolicy}}, 1148 {"FileStore", &StreamConfig{Name: "foo", Storage: FileStorage, Subjects: []string{"foo", "bar"}, Retention: WorkQueuePolicy}}, 1149 } 1150 for _, c := range cases { 1151 t.Run(c.name, func(t *testing.T) { 1152 s := RunBasicJetStreamServer(t) 1153 defer s.Shutdown() 1154 1155 mset, err := s.GlobalAccount().addStream(c.mconfig) 1156 if err != nil { 1157 t.Fatalf("Unexpected error adding stream: %v", err) 1158 } 1159 defer mset.delete() 1160 1161 // Check for basic errors. 1162 if _, err := mset.addConsumer(nil); err == nil { 1163 t.Fatalf("Expected an error for no config") 1164 } 1165 1166 // No deliver subject, meaning its in pull mode, work queue mode means it is required to 1167 // do explicit ack. 1168 if _, err := mset.addConsumer(&ConsumerConfig{}); err == nil { 1169 t.Fatalf("Expected an error on work queue / pull mode without explicit ack mode") 1170 } 1171 1172 // Check for delivery subject errors. 1173 1174 // Literal delivery subject required. 1175 if _, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: "foo.*"}); err == nil { 1176 t.Fatalf("Expected an error on bad delivery subject") 1177 } 1178 // Check for cycles 1179 if _, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: "foo"}); err == nil { 1180 t.Fatalf("Expected an error on delivery subject that forms a cycle") 1181 } 1182 if _, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: "bar"}); err == nil { 1183 t.Fatalf("Expected an error on delivery subject that forms a cycle") 1184 } 1185 if _, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: "*"}); err == nil { 1186 t.Fatalf("Expected an error on delivery subject that forms a cycle") 1187 } 1188 1189 // StartPosition conflicts 1190 now := time.Now().UTC() 1191 if _, err := mset.addConsumer(&ConsumerConfig{ 1192 DeliverSubject: "A", 1193 OptStartSeq: 1, 1194 OptStartTime: &now, 1195 }); err == nil { 1196 t.Fatalf("Expected an error on start position conflicts") 1197 } 1198 if _, err := mset.addConsumer(&ConsumerConfig{ 1199 DeliverSubject: "A", 1200 OptStartTime: &now, 1201 }); err == nil { 1202 t.Fatalf("Expected an error on start position conflicts") 1203 } 1204 1205 // Non-Durables need to have subscription to delivery subject. 1206 delivery := nats.NewInbox() 1207 nc := clientConnectToServer(t, s) 1208 defer nc.Close() 1209 sub, _ := nc.SubscribeSync(delivery) 1210 defer sub.Unsubscribe() 1211 nc.Flush() 1212 1213 o, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: delivery, AckPolicy: AckExplicit}) 1214 if err != nil { 1215 t.Fatalf("Expected no error with registered interest, got %v", err) 1216 } 1217 1218 if err := mset.deleteConsumer(o); err != nil { 1219 t.Fatalf("Expected no error on delete, got %v", err) 1220 } 1221 1222 // Now let's check that durables can be created and a duplicate call to add will be ok. 1223 dcfg := &ConsumerConfig{ 1224 Durable: "ddd", 1225 DeliverSubject: delivery, 1226 AckPolicy: AckExplicit, 1227 } 1228 if _, err = mset.addConsumer(dcfg); err != nil { 1229 t.Fatalf("Unexpected error creating consumer: %v", err) 1230 } 1231 if _, err = mset.addConsumer(dcfg); err != nil { 1232 t.Fatalf("Unexpected error creating second identical consumer: %v", err) 1233 } 1234 // Not test that we can change the delivery subject if that is only thing that has not 1235 // changed and we are not active. 1236 sub.Unsubscribe() 1237 sub, _ = nc.SubscribeSync("d.d.d") 1238 nc.Flush() 1239 defer sub.Unsubscribe() 1240 dcfg.DeliverSubject = "d.d.d" 1241 if _, err = mset.addConsumer(dcfg); err != nil { 1242 t.Fatalf("Unexpected error creating third consumer with just deliver subject changed: %v", err) 1243 } 1244 }) 1245 } 1246 } 1247 1248 func TestJetStreamBasicDeliverSubject(t *testing.T) { 1249 cases := []struct { 1250 name string 1251 mconfig *StreamConfig 1252 }{ 1253 {"MemoryStore", &StreamConfig{Name: "MSET", Storage: MemoryStorage, Subjects: []string{"foo.*"}}}, 1254 {"FileStore", &StreamConfig{Name: "MSET", Storage: FileStorage, Subjects: []string{"foo.*"}}}, 1255 } 1256 for _, c := range cases { 1257 t.Run(c.name, func(t *testing.T) { 1258 s := RunBasicJetStreamServer(t) 1259 defer s.Shutdown() 1260 1261 mset, err := s.GlobalAccount().addStream(c.mconfig) 1262 if err != nil { 1263 t.Fatalf("Unexpected error adding stream: %v", err) 1264 } 1265 defer mset.delete() 1266 1267 nc := clientConnectToServer(t, s) 1268 defer nc.Close() 1269 1270 toSend := 100 1271 sendSubj := "foo.bar" 1272 for i := 1; i <= toSend; i++ { 1273 sendStreamMsg(t, nc, sendSubj, strconv.Itoa(i)) 1274 } 1275 state := mset.state() 1276 if state.Msgs != uint64(toSend) { 1277 t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs) 1278 } 1279 1280 // Now create an consumer. Use different connection. 1281 nc2 := clientConnectToServer(t, s) 1282 defer nc2.Close() 1283 1284 sub, _ := nc2.SubscribeSync(nats.NewInbox()) 1285 defer sub.Unsubscribe() 1286 nc2.Flush() 1287 1288 o, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: sub.Subject}) 1289 if err != nil { 1290 t.Fatalf("Expected no error with registered interest, got %v", err) 1291 } 1292 defer o.delete() 1293 1294 // Check for our messages. 1295 checkMsgs := func(seqOff int) { 1296 t.Helper() 1297 1298 checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error { 1299 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend { 1300 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend) 1301 } 1302 return nil 1303 }) 1304 1305 // Now let's check the messages 1306 for i := 0; i < toSend; i++ { 1307 m, _ := sub.NextMsg(time.Second) 1308 // JetStream will have the subject match the stream subject, not delivery subject. 1309 if m.Subject != sendSubj { 1310 t.Fatalf("Expected original subject of %q, but got %q", sendSubj, m.Subject) 1311 } 1312 // Now check that reply subject exists and has a sequence as the last token. 1313 if seq := o.seqFromReply(m.Reply); seq != uint64(i+seqOff) { 1314 t.Fatalf("Expected sequence of %d , got %d", i+seqOff, seq) 1315 } 1316 // Ack the message here. 1317 m.Respond(nil) 1318 } 1319 } 1320 1321 checkMsgs(1) 1322 1323 // Now send more and make sure delivery picks back up. 1324 for i := toSend + 1; i <= toSend*2; i++ { 1325 sendStreamMsg(t, nc, sendSubj, strconv.Itoa(i)) 1326 } 1327 state = mset.state() 1328 if state.Msgs != uint64(toSend*2) { 1329 t.Fatalf("Expected %d messages, got %d", toSend*2, state.Msgs) 1330 } 1331 1332 checkMsgs(101) 1333 1334 checkSubEmpty := func() { 1335 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != 0 { 1336 t.Fatalf("Expected sub to have no pending") 1337 } 1338 } 1339 checkSubEmpty() 1340 o.delete() 1341 1342 // Now check for deliver last, deliver new and deliver by seq. 1343 o, err = mset.addConsumer(&ConsumerConfig{DeliverSubject: sub.Subject, DeliverPolicy: DeliverLast}) 1344 if err != nil { 1345 t.Fatalf("Expected no error with registered interest, got %v", err) 1346 } 1347 defer o.delete() 1348 1349 m, err := sub.NextMsg(time.Second) 1350 if err != nil { 1351 t.Fatalf("Did not get expected message, got %v", err) 1352 } 1353 // All Consumers start with sequence #1. 1354 if seq := o.seqFromReply(m.Reply); seq != 1 { 1355 t.Fatalf("Expected sequence to be 1, but got %d", seq) 1356 } 1357 // Check that is is the last msg we sent though. 1358 if mseq, _ := strconv.Atoi(string(m.Data)); mseq != 200 { 1359 t.Fatalf("Expected messag sequence to be 200, but got %d", mseq) 1360 } 1361 1362 checkSubEmpty() 1363 o.delete() 1364 1365 // Make sure we only got one message. 1366 if m, err := sub.NextMsg(5 * time.Millisecond); err == nil { 1367 t.Fatalf("Expected no msg, got %+v", m) 1368 } 1369 1370 checkSubEmpty() 1371 o.delete() 1372 1373 // Now try by sequence number. 1374 o, err = mset.addConsumer(&ConsumerConfig{DeliverSubject: sub.Subject, DeliverPolicy: DeliverByStartSequence, OptStartSeq: 101}) 1375 if err != nil { 1376 t.Fatalf("Expected no error with registered interest, got %v", err) 1377 } 1378 defer o.delete() 1379 1380 checkMsgs(1) 1381 1382 // Now do push based queue-subscribers 1383 sub, _ = nc2.QueueSubscribeSync("_qg_", "dev") 1384 defer sub.Unsubscribe() 1385 nc2.Flush() 1386 1387 o, err = mset.addConsumer(&ConsumerConfig{DeliverSubject: sub.Subject, DeliverGroup: "dev"}) 1388 if err != nil { 1389 t.Fatalf("Expected no error with registered interest, got %v", err) 1390 } 1391 defer o.delete() 1392 1393 // Since we sent another batch need check to be looking for 2x. 1394 toSend *= 2 1395 checkMsgs(1) 1396 }) 1397 } 1398 } 1399 1400 func workerModeConfig(name string) *ConsumerConfig { 1401 return &ConsumerConfig{Durable: name, AckPolicy: AckExplicit} 1402 } 1403 1404 func TestJetStreamBasicWorkQueue(t *testing.T) { 1405 cases := []struct { 1406 name string 1407 mconfig *StreamConfig 1408 }{ 1409 {"MemoryStore", &StreamConfig{Name: "MY_MSG_SET", Storage: MemoryStorage, Subjects: []string{"foo", "bar"}}}, 1410 {"FileStore", &StreamConfig{Name: "MY_MSG_SET", Storage: FileStorage, Subjects: []string{"foo", "bar"}}}, 1411 } 1412 for _, c := range cases { 1413 t.Run(c.name, func(t *testing.T) { 1414 s := RunBasicJetStreamServer(t) 1415 defer s.Shutdown() 1416 1417 mset, err := s.GlobalAccount().addStream(c.mconfig) 1418 if err != nil { 1419 t.Fatalf("Unexpected error adding stream: %v", err) 1420 } 1421 defer mset.delete() 1422 1423 // Create basic work queue mode consumer. 1424 oname := "WQ" 1425 o, err := mset.addConsumer(workerModeConfig(oname)) 1426 if err != nil { 1427 t.Fatalf("Expected no error with registered interest, got %v", err) 1428 } 1429 defer o.delete() 1430 1431 if o.nextSeq() != 1 { 1432 t.Fatalf("Expected to be starting at sequence 1") 1433 } 1434 1435 nc := clientConnectWithOldRequest(t, s) 1436 defer nc.Close() 1437 1438 // Now load up some messages. 1439 toSend := 100 1440 sendSubj := "bar" 1441 for i := 0; i < toSend; i++ { 1442 sendStreamMsg(t, nc, sendSubj, "Hello World!") 1443 } 1444 state := mset.state() 1445 if state.Msgs != uint64(toSend) { 1446 t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs) 1447 } 1448 1449 getNext := func(seqno int) { 1450 t.Helper() 1451 nextMsg, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second) 1452 if err != nil { 1453 t.Fatalf("Unexpected error for seq %d: %v", seqno, err) 1454 } 1455 if nextMsg.Subject != "bar" { 1456 t.Fatalf("Expected subject of %q, got %q", "bar", nextMsg.Subject) 1457 } 1458 if seq := o.seqFromReply(nextMsg.Reply); seq != uint64(seqno) { 1459 t.Fatalf("Expected sequence of %d , got %d", seqno, seq) 1460 } 1461 } 1462 1463 // Make sure we can get the messages already there. 1464 for i := 1; i <= toSend; i++ { 1465 getNext(i) 1466 } 1467 1468 // Now we want to make sure we can get a message that is published to the message 1469 // set as we are waiting for it. 1470 nextDelay := 50 * time.Millisecond 1471 1472 go func() { 1473 time.Sleep(nextDelay) 1474 sendStreamMsg(t, nc, sendSubj, "Hello World!") 1475 }() 1476 1477 start := time.Now() 1478 getNext(toSend + 1) 1479 if time.Since(start) < nextDelay { 1480 t.Fatalf("Received message too quickly") 1481 } 1482 1483 // Now do same thing but combine waiting for new ones with sending. 1484 go func() { 1485 time.Sleep(nextDelay) 1486 for i := 0; i < toSend; i++ { 1487 nc.Request(sendSubj, []byte("Hello World!"), 50*time.Millisecond) 1488 } 1489 }() 1490 1491 for i := toSend + 2; i < toSend*2+2; i++ { 1492 getNext(i) 1493 } 1494 }) 1495 } 1496 } 1497 1498 func TestJetStreamWorkQueueMaxWaiting(t *testing.T) { 1499 cases := []struct { 1500 name string 1501 mconfig *StreamConfig 1502 }{ 1503 {"MemoryStore", &StreamConfig{Name: "MY_MSG_SET", Storage: MemoryStorage, Subjects: []string{"foo", "bar"}}}, 1504 {"FileStore", &StreamConfig{Name: "MY_MSG_SET", Storage: FileStorage, Subjects: []string{"foo", "bar"}}}, 1505 } 1506 for _, c := range cases { 1507 t.Run(c.name, func(t *testing.T) { 1508 s := RunBasicJetStreamServer(t) 1509 defer s.Shutdown() 1510 1511 mset, err := s.GlobalAccount().addStream(c.mconfig) 1512 if err != nil { 1513 t.Fatalf("Unexpected error adding stream: %v", err) 1514 } 1515 defer mset.delete() 1516 1517 // Make sure these cases fail 1518 cfg := &ConsumerConfig{Durable: "foo", AckPolicy: AckExplicit, MaxWaiting: 10, DeliverSubject: "_INBOX.22"} 1519 if _, err := mset.addConsumer(cfg); err == nil { 1520 t.Fatalf("Expected an error with MaxWaiting set on non-pull based consumer") 1521 } 1522 cfg = &ConsumerConfig{Durable: "foo", AckPolicy: AckExplicit, MaxWaiting: -1} 1523 if _, err := mset.addConsumer(cfg); err == nil { 1524 t.Fatalf("Expected an error with MaxWaiting being negative") 1525 } 1526 1527 // Create basic work queue mode consumer. 1528 wcfg := workerModeConfig("MAXWQ") 1529 o, err := mset.addConsumer(wcfg) 1530 if err != nil { 1531 t.Fatalf("Expected no error with registered interest, got %v", err) 1532 } 1533 defer o.delete() 1534 1535 // Make sure we set default correctly. 1536 if cfg := o.config(); cfg.MaxWaiting != JSWaitQueueDefaultMax { 1537 t.Fatalf("Expected default max waiting to have been set to %d, got %d", JSWaitQueueDefaultMax, cfg.MaxWaiting) 1538 } 1539 1540 expectWaiting := func(expected int) { 1541 t.Helper() 1542 checkFor(t, time.Second, 25*time.Millisecond, func() error { 1543 if oi := o.info(); oi.NumWaiting != expected { 1544 return fmt.Errorf("Expected %d waiting, got %d", expected, oi.NumWaiting) 1545 } 1546 return nil 1547 }) 1548 } 1549 1550 nc := clientConnectWithOldRequest(t, s) 1551 defer nc.Close() 1552 1553 // Like muxed new INBOX. 1554 sub, _ := nc.SubscribeSync("req.*") 1555 defer sub.Unsubscribe() 1556 nc.Flush() 1557 1558 checkSubPending := func(numExpected int) { 1559 t.Helper() 1560 checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error { 1561 if nmsgs, _, err := sub.Pending(); err != nil || nmsgs != numExpected { 1562 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected) 1563 } 1564 return nil 1565 }) 1566 } 1567 1568 getSubj := o.requestNextMsgSubject() 1569 // Queue up JSWaitQueueDefaultMax requests. 1570 for i := 0; i < JSWaitQueueDefaultMax; i++ { 1571 nc.PublishRequest(getSubj, fmt.Sprintf("req.%d", i), nil) 1572 } 1573 expectWaiting(JSWaitQueueDefaultMax) 1574 1575 // We are at the max, so we should get a 409 saying that we have 1576 // exceeded the number of pull requests. 1577 m, err := nc.Request(getSubj, nil, 100*time.Millisecond) 1578 require_NoError(t, err) 1579 // Make sure this is the 409 1580 if v := m.Header.Get("Status"); v != "409" { 1581 t.Fatalf("Expected a 409 status code, got %q", v) 1582 } 1583 // The sub for the other requests should not have received anything 1584 checkSubPending(0) 1585 // Now send some messages that should make some of the requests complete 1586 sendStreamMsg(t, nc, "foo", "Hello World!") 1587 sendStreamMsg(t, nc, "bar", "Hello World!") 1588 expectWaiting(JSWaitQueueDefaultMax - 2) 1589 }) 1590 } 1591 } 1592 1593 func TestJetStreamWorkQueueWrapWaiting(t *testing.T) { 1594 cases := []struct { 1595 name string 1596 mconfig *StreamConfig 1597 }{ 1598 {"MemoryStore", &StreamConfig{Name: "MY_MSG_SET", Storage: MemoryStorage, Subjects: []string{"foo", "bar"}}}, 1599 {"FileStore", &StreamConfig{Name: "MY_MSG_SET", Storage: FileStorage, Subjects: []string{"foo", "bar"}}}, 1600 } 1601 for _, c := range cases { 1602 t.Run(c.name, func(t *testing.T) { 1603 s := RunBasicJetStreamServer(t) 1604 defer s.Shutdown() 1605 1606 mset, err := s.GlobalAccount().addStream(c.mconfig) 1607 if err != nil { 1608 t.Fatalf("Unexpected error adding stream: %v", err) 1609 } 1610 defer mset.delete() 1611 1612 maxWaiting := 8 1613 wcfg := workerModeConfig("WRAP") 1614 wcfg.MaxWaiting = maxWaiting 1615 1616 o, err := mset.addConsumer(wcfg) 1617 if err != nil { 1618 t.Fatalf("Expected no error with registered interest, got %v", err) 1619 } 1620 defer o.delete() 1621 1622 getSubj := o.requestNextMsgSubject() 1623 1624 expectWaiting := func(expected int) { 1625 t.Helper() 1626 checkFor(t, time.Second, 25*time.Millisecond, func() error { 1627 if oi := o.info(); oi.NumWaiting != expected { 1628 return fmt.Errorf("Expected %d waiting, got %d", expected, oi.NumWaiting) 1629 } 1630 return nil 1631 }) 1632 } 1633 1634 nc := clientConnectToServer(t, s) 1635 defer nc.Close() 1636 1637 sub, _ := nc.SubscribeSync("req.*") 1638 defer sub.Unsubscribe() 1639 nc.Flush() 1640 1641 // Fill up waiting. 1642 for i := 0; i < maxWaiting; i++ { 1643 nc.PublishRequest(getSubj, fmt.Sprintf("req.%d", i), nil) 1644 } 1645 expectWaiting(maxWaiting) 1646 1647 // Now use 1/2 of the waiting. 1648 for i := 0; i < maxWaiting/2; i++ { 1649 sendStreamMsg(t, nc, "foo", "Hello World!") 1650 } 1651 expectWaiting(maxWaiting / 2) 1652 1653 // Now add in two (2) more pull requests. 1654 for i := maxWaiting; i < maxWaiting+2; i++ { 1655 nc.PublishRequest(getSubj, fmt.Sprintf("req.%d", i), nil) 1656 } 1657 expectWaiting(maxWaiting/2 + 2) 1658 1659 // Now use second 1/2 of the waiting and the 2 extra. 1660 for i := 0; i < maxWaiting/2+2; i++ { 1661 sendStreamMsg(t, nc, "bar", "Hello World!") 1662 } 1663 expectWaiting(0) 1664 1665 checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error { 1666 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != maxWaiting+2 { 1667 return fmt.Errorf("Expected sub to have %d pending, got %d", maxWaiting+2, nmsgs) 1668 } 1669 return nil 1670 }) 1671 }) 1672 } 1673 } 1674 1675 func TestJetStreamWorkQueueRequest(t *testing.T) { 1676 cases := []struct { 1677 name string 1678 mconfig *StreamConfig 1679 }{ 1680 {"MemoryStore", &StreamConfig{Name: "MY_MSG_SET", Storage: MemoryStorage, Subjects: []string{"foo", "bar"}}}, 1681 {"FileStore", &StreamConfig{Name: "MY_MSG_SET", Storage: FileStorage, Subjects: []string{"foo", "bar"}}}, 1682 } 1683 for _, c := range cases { 1684 t.Run(c.name, func(t *testing.T) { 1685 s := RunBasicJetStreamServer(t) 1686 defer s.Shutdown() 1687 1688 mset, err := s.GlobalAccount().addStream(c.mconfig) 1689 if err != nil { 1690 t.Fatalf("Unexpected error adding stream: %v", err) 1691 } 1692 defer mset.delete() 1693 1694 o, err := mset.addConsumer(workerModeConfig("WRAP")) 1695 if err != nil { 1696 t.Fatalf("Expected no error with registered interest, got %v", err) 1697 } 1698 defer o.delete() 1699 1700 nc := clientConnectToServer(t, s) 1701 defer nc.Close() 1702 1703 toSend := 25 1704 for i := 0; i < toSend; i++ { 1705 sendStreamMsg(t, nc, "bar", "Hello World!") 1706 } 1707 1708 reply := "_.consumer._" 1709 sub, _ := nc.SubscribeSync(reply) 1710 defer sub.Unsubscribe() 1711 1712 getSubj := o.requestNextMsgSubject() 1713 1714 checkSubPending := func(numExpected int) { 1715 t.Helper() 1716 checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error { 1717 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != numExpected { 1718 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected) 1719 } 1720 return nil 1721 }) 1722 } 1723 1724 // Create a formal request object. 1725 req := &JSApiConsumerGetNextRequest{Batch: toSend} 1726 jreq, _ := json.Marshal(req) 1727 nc.PublishRequest(getSubj, reply, jreq) 1728 1729 checkSubPending(toSend) 1730 1731 // Now check that we can ask for NoWait 1732 req.Batch = 1 1733 req.NoWait = true 1734 jreq, _ = json.Marshal(req) 1735 1736 resp, err := nc.Request(getSubj, jreq, 100*time.Millisecond) 1737 require_NoError(t, err) 1738 if status := resp.Header.Get("Status"); !strings.HasPrefix(status, "404") { 1739 t.Fatalf("Expected status code of 404") 1740 } 1741 // Load up more messages. 1742 for i := 0; i < toSend; i++ { 1743 sendStreamMsg(t, nc, "foo", "Hello World!") 1744 } 1745 // Now we will ask for a batch larger then what is queued up. 1746 req.Batch = toSend + 10 1747 req.NoWait = true 1748 jreq, _ = json.Marshal(req) 1749 1750 nc.PublishRequest(getSubj, reply, jreq) 1751 // We should now have 2 * toSend + the 404 message. 1752 checkSubPending(2*toSend + 1) 1753 for i := 0; i < 2*toSend+1; i++ { 1754 sub.NextMsg(time.Millisecond) 1755 } 1756 checkSubPending(0) 1757 mset.purge(nil) 1758 1759 // Now do expiration 1760 req.Batch = 1 1761 req.NoWait = false 1762 req.Expires = 100 * time.Millisecond 1763 jreq, _ = json.Marshal(req) 1764 1765 nc.PublishRequest(getSubj, reply, jreq) 1766 // Let it expire 1767 time.Sleep(200 * time.Millisecond) 1768 1769 // Send a few more messages. These should not be delivered to the sub. 1770 sendStreamMsg(t, nc, "foo", "Hello World!") 1771 sendStreamMsg(t, nc, "bar", "Hello World!") 1772 time.Sleep(100 * time.Millisecond) 1773 1774 // Expect the request timed out message. 1775 checkSubPending(1) 1776 if resp, _ = sub.NextMsg(time.Millisecond); resp == nil { 1777 t.Fatalf("Expected an expired status message") 1778 } 1779 if status := resp.Header.Get("Status"); !strings.HasPrefix(status, "408") { 1780 t.Fatalf("Expected status code of 408") 1781 } 1782 1783 // Send a new request, we should not get the 408 because our previous request 1784 // should have expired. 1785 nc.PublishRequest(getSubj, reply, jreq) 1786 checkSubPending(1) 1787 sub.NextMsg(time.Second) 1788 checkSubPending(0) 1789 }) 1790 } 1791 } 1792 1793 func TestJetStreamSubjectFiltering(t *testing.T) { 1794 cases := []struct { 1795 name string 1796 mconfig *StreamConfig 1797 }{ 1798 {"MemoryStore", &StreamConfig{Name: "MSET", Storage: MemoryStorage, Subjects: []string{"foo.*"}}}, 1799 {"FileStore", &StreamConfig{Name: "MSET", Storage: FileStorage, Subjects: []string{"foo.*"}}}, 1800 } 1801 for _, c := range cases { 1802 t.Run(c.name, func(t *testing.T) { 1803 s := RunBasicJetStreamServer(t) 1804 defer s.Shutdown() 1805 1806 mset, err := s.GlobalAccount().addStream(c.mconfig) 1807 if err != nil { 1808 t.Fatalf("Unexpected error adding stream: %v", err) 1809 } 1810 defer mset.delete() 1811 1812 nc := clientConnectToServer(t, s) 1813 defer nc.Close() 1814 1815 toSend := 50 1816 subjA := "foo.A" 1817 subjB := "foo.B" 1818 1819 for i := 0; i < toSend; i++ { 1820 sendStreamMsg(t, nc, subjA, "Hello World!") 1821 sendStreamMsg(t, nc, subjB, "Hello World!") 1822 } 1823 state := mset.state() 1824 if state.Msgs != uint64(toSend*2) { 1825 t.Fatalf("Expected %d messages, got %d", toSend*2, state.Msgs) 1826 } 1827 1828 delivery := nats.NewInbox() 1829 sub, _ := nc.SubscribeSync(delivery) 1830 defer sub.Unsubscribe() 1831 nc.Flush() 1832 1833 o, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: delivery, FilterSubject: subjB}) 1834 if err != nil { 1835 t.Fatalf("Expected no error with registered interest, got %v", err) 1836 } 1837 defer o.delete() 1838 1839 // Now let's check the messages 1840 for i := 1; i <= toSend; i++ { 1841 m, err := sub.NextMsg(time.Second) 1842 require_NoError(t, err) 1843 // JetStream will have the subject match the stream subject, not delivery subject. 1844 // We want these to only be subjB. 1845 if m.Subject != subjB { 1846 t.Fatalf("Expected original subject of %q, but got %q", subjB, m.Subject) 1847 } 1848 // Now check that reply subject exists and has a sequence as the last token. 1849 if seq := o.seqFromReply(m.Reply); seq != uint64(i) { 1850 t.Fatalf("Expected sequence of %d , got %d", i, seq) 1851 } 1852 // Ack the message here. 1853 m.Respond(nil) 1854 } 1855 1856 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != 0 { 1857 t.Fatalf("Expected sub to have no pending") 1858 } 1859 }) 1860 } 1861 } 1862 1863 func TestJetStreamWorkQueueSubjectFiltering(t *testing.T) { 1864 cases := []struct { 1865 name string 1866 mconfig *StreamConfig 1867 }{ 1868 {"MemoryStore", &StreamConfig{Name: "MY_MSG_SET", Storage: MemoryStorage, Subjects: []string{"foo.*"}}}, 1869 {"FileStore", &StreamConfig{Name: "MY_MSG_SET", Storage: FileStorage, Subjects: []string{"foo.*"}}}, 1870 } 1871 for _, c := range cases { 1872 t.Run(c.name, func(t *testing.T) { 1873 s := RunBasicJetStreamServer(t) 1874 defer s.Shutdown() 1875 1876 mset, err := s.GlobalAccount().addStream(c.mconfig) 1877 if err != nil { 1878 t.Fatalf("Unexpected error adding stream: %v", err) 1879 } 1880 defer mset.delete() 1881 1882 nc := clientConnectToServer(t, s) 1883 defer nc.Close() 1884 1885 toSend := 50 1886 subjA := "foo.A" 1887 subjB := "foo.B" 1888 1889 for i := 0; i < toSend; i++ { 1890 sendStreamMsg(t, nc, subjA, "Hello World!") 1891 sendStreamMsg(t, nc, subjB, "Hello World!") 1892 } 1893 state := mset.state() 1894 if state.Msgs != uint64(toSend*2) { 1895 t.Fatalf("Expected %d messages, got %d", toSend*2, state.Msgs) 1896 } 1897 1898 oname := "WQ" 1899 o, err := mset.addConsumer(&ConsumerConfig{Durable: oname, FilterSubject: subjA, AckPolicy: AckExplicit}) 1900 if err != nil { 1901 t.Fatalf("Expected no error with registered interest, got %v", err) 1902 } 1903 defer o.delete() 1904 1905 if o.nextSeq() != 1 { 1906 t.Fatalf("Expected to be starting at sequence 1") 1907 } 1908 1909 getNext := func(seqno int) { 1910 t.Helper() 1911 nextMsg, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second) 1912 require_NoError(t, err) 1913 if nextMsg.Subject != subjA { 1914 t.Fatalf("Expected subject of %q, got %q", subjA, nextMsg.Subject) 1915 } 1916 if seq := o.seqFromReply(nextMsg.Reply); seq != uint64(seqno) { 1917 t.Fatalf("Expected sequence of %d , got %d", seqno, seq) 1918 } 1919 nextMsg.Respond(nil) 1920 } 1921 1922 // Make sure we can get the messages already there. 1923 for i := 1; i <= toSend; i++ { 1924 getNext(i) 1925 } 1926 }) 1927 } 1928 } 1929 1930 func TestJetStreamWildcardSubjectFiltering(t *testing.T) { 1931 cases := []struct { 1932 name string 1933 mconfig *StreamConfig 1934 }{ 1935 {"MemoryStore", &StreamConfig{Name: "ORDERS", Storage: MemoryStorage, Subjects: []string{"orders.*.*"}}}, 1936 {"FileStore", &StreamConfig{Name: "ORDERS", Storage: FileStorage, Subjects: []string{"orders.*.*"}}}, 1937 } 1938 for _, c := range cases { 1939 t.Run(c.name, func(t *testing.T) { 1940 s := RunBasicJetStreamServer(t) 1941 defer s.Shutdown() 1942 1943 mset, err := s.GlobalAccount().addStream(c.mconfig) 1944 if err != nil { 1945 t.Fatalf("Unexpected error adding stream: %v", err) 1946 } 1947 defer mset.delete() 1948 1949 nc := clientConnectToServer(t, s) 1950 defer nc.Close() 1951 1952 toSend := 100 1953 for i := 1; i <= toSend; i++ { 1954 subj := fmt.Sprintf("orders.%d.%s", i, "NEW") 1955 sendStreamMsg(t, nc, subj, "new order") 1956 } 1957 // Randomly move 25 to shipped. 1958 toShip := 25 1959 shipped := make(map[int]bool) 1960 for i := 0; i < toShip; { 1961 orderId := rand.Intn(toSend-1) + 1 1962 if shipped[orderId] { 1963 continue 1964 } 1965 subj := fmt.Sprintf("orders.%d.%s", orderId, "SHIPPED") 1966 sendStreamMsg(t, nc, subj, "shipped order") 1967 shipped[orderId] = true 1968 i++ 1969 } 1970 state := mset.state() 1971 if state.Msgs != uint64(toSend+toShip) { 1972 t.Fatalf("Expected %d messages, got %d", toSend+toShip, state.Msgs) 1973 } 1974 1975 delivery := nats.NewInbox() 1976 sub, _ := nc.SubscribeSync(delivery) 1977 defer sub.Unsubscribe() 1978 nc.Flush() 1979 1980 // Get all shipped. 1981 o, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: delivery, FilterSubject: "orders.*.SHIPPED"}) 1982 if err != nil { 1983 t.Fatalf("Expected no error with registered interest, got %v", err) 1984 } 1985 defer o.delete() 1986 1987 checkFor(t, time.Second, 25*time.Millisecond, func() error { 1988 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toShip { 1989 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toShip) 1990 } 1991 return nil 1992 }) 1993 for nmsgs, _, _ := sub.Pending(); nmsgs > 0; nmsgs, _, _ = sub.Pending() { 1994 sub.NextMsg(time.Second) 1995 } 1996 if nmsgs, _, _ := sub.Pending(); nmsgs != 0 { 1997 t.Fatalf("Expected no pending, got %d", nmsgs) 1998 } 1999 2000 // Get all new 2001 o, err = mset.addConsumer(&ConsumerConfig{DeliverSubject: delivery, FilterSubject: "orders.*.NEW"}) 2002 if err != nil { 2003 t.Fatalf("Expected no error with registered interest, got %v", err) 2004 } 2005 defer o.delete() 2006 2007 checkFor(t, time.Second, 25*time.Millisecond, func() error { 2008 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend { 2009 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend) 2010 } 2011 return nil 2012 }) 2013 for nmsgs, _, _ := sub.Pending(); nmsgs > 0; nmsgs, _, _ = sub.Pending() { 2014 sub.NextMsg(time.Second) 2015 } 2016 if nmsgs, _, _ := sub.Pending(); nmsgs != 0 { 2017 t.Fatalf("Expected no pending, got %d", nmsgs) 2018 } 2019 2020 // Now grab a single orderId that has shipped, so we should have two messages. 2021 var orderId int 2022 for orderId = range shipped { 2023 break 2024 } 2025 subj := fmt.Sprintf("orders.%d.*", orderId) 2026 o, err = mset.addConsumer(&ConsumerConfig{DeliverSubject: delivery, FilterSubject: subj}) 2027 if err != nil { 2028 t.Fatalf("Expected no error with registered interest, got %v", err) 2029 } 2030 defer o.delete() 2031 2032 checkFor(t, time.Second, 25*time.Millisecond, func() error { 2033 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != 2 { 2034 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, 2) 2035 } 2036 return nil 2037 }) 2038 }) 2039 } 2040 } 2041 2042 func TestJetStreamWorkQueueAckAndNext(t *testing.T) { 2043 cases := []struct { 2044 name string 2045 mconfig *StreamConfig 2046 }{ 2047 {"MemoryStore", &StreamConfig{Name: "MY_MSG_SET", Storage: MemoryStorage, Subjects: []string{"foo", "bar"}}}, 2048 {"FileStore", &StreamConfig{Name: "MY_MSG_SET", Storage: FileStorage, Subjects: []string{"foo", "bar"}}}, 2049 } 2050 for _, c := range cases { 2051 t.Run(c.name, func(t *testing.T) { 2052 s := RunBasicJetStreamServer(t) 2053 defer s.Shutdown() 2054 2055 mset, err := s.GlobalAccount().addStream(c.mconfig) 2056 if err != nil { 2057 t.Fatalf("Unexpected error adding stream: %v", err) 2058 } 2059 defer mset.delete() 2060 2061 // Create basic work queue mode consumer. 2062 oname := "WQ" 2063 o, err := mset.addConsumer(workerModeConfig(oname)) 2064 if err != nil { 2065 t.Fatalf("Expected no error with registered interest, got %v", err) 2066 } 2067 defer o.delete() 2068 2069 if o.nextSeq() != 1 { 2070 t.Fatalf("Expected to be starting at sequence 1") 2071 } 2072 2073 nc := clientConnectToServer(t, s) 2074 defer nc.Close() 2075 2076 // Now load up some messages. 2077 toSend := 100 2078 sendSubj := "bar" 2079 for i := 0; i < toSend; i++ { 2080 sendStreamMsg(t, nc, sendSubj, "Hello World!") 2081 } 2082 state := mset.state() 2083 if state.Msgs != uint64(toSend) { 2084 t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs) 2085 } 2086 2087 sub, _ := nc.SubscribeSync(nats.NewInbox()) 2088 defer sub.Unsubscribe() 2089 2090 // Kick things off. 2091 // For normal work queue semantics, you send requests to the subject with stream and consumer name. 2092 // We will do this to start it off then use ack+next to get other messages. 2093 nc.PublishRequest(o.requestNextMsgSubject(), sub.Subject, nil) 2094 2095 for i := 0; i < toSend; i++ { 2096 m, err := sub.NextMsg(time.Second) 2097 if err != nil { 2098 t.Fatalf("Unexpected error waiting for messages: %v", err) 2099 } 2100 2101 if !bytes.Equal(m.Data, []byte("Hello World!")) { 2102 t.Fatalf("Got an invalid message from the stream: %q", m.Data) 2103 } 2104 2105 nc.PublishRequest(m.Reply, sub.Subject, AckNext) 2106 } 2107 }) 2108 } 2109 } 2110 2111 func TestJetStreamWorkQueueRequestBatch(t *testing.T) { 2112 cases := []struct { 2113 name string 2114 mconfig *StreamConfig 2115 }{ 2116 {"MemoryStore", &StreamConfig{Name: "MY_MSG_SET", Storage: MemoryStorage, Subjects: []string{"foo", "bar"}}}, 2117 {"FileStore", &StreamConfig{Name: "MY_MSG_SET", Storage: FileStorage, Subjects: []string{"foo", "bar"}}}, 2118 } 2119 for _, c := range cases { 2120 t.Run(c.name, func(t *testing.T) { 2121 s := RunBasicJetStreamServer(t) 2122 defer s.Shutdown() 2123 2124 mset, err := s.GlobalAccount().addStream(c.mconfig) 2125 if err != nil { 2126 t.Fatalf("Unexpected error adding stream: %v", err) 2127 } 2128 defer mset.delete() 2129 2130 // Create basic work queue mode consumer. 2131 oname := "WQ" 2132 o, err := mset.addConsumer(workerModeConfig(oname)) 2133 if err != nil { 2134 t.Fatalf("Expected no error with registered interest, got %v", err) 2135 } 2136 defer o.delete() 2137 2138 if o.nextSeq() != 1 { 2139 t.Fatalf("Expected to be starting at sequence 1") 2140 } 2141 2142 nc := clientConnectToServer(t, s) 2143 defer nc.Close() 2144 2145 // Now load up some messages. 2146 toSend := 100 2147 sendSubj := "bar" 2148 for i := 0; i < toSend; i++ { 2149 sendStreamMsg(t, nc, sendSubj, "Hello World!") 2150 } 2151 state := mset.state() 2152 if state.Msgs != uint64(toSend) { 2153 t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs) 2154 } 2155 2156 sub, _ := nc.SubscribeSync(nats.NewInbox()) 2157 defer sub.Unsubscribe() 2158 2159 // For normal work queue semantics, you send requests to the subject with stream and consumer name. 2160 // We will do this to start it off then use ack+next to get other messages. 2161 // Kick things off with batch size of 50. 2162 batchSize := 50 2163 nc.PublishRequest(o.requestNextMsgSubject(), sub.Subject, []byte(strconv.Itoa(batchSize))) 2164 2165 // We should receive batchSize with no acks or additional requests. 2166 checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error { 2167 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != batchSize { 2168 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, batchSize) 2169 } 2170 return nil 2171 }) 2172 2173 // Now queue up the request without messages and add them after. 2174 sub, _ = nc.SubscribeSync(nats.NewInbox()) 2175 defer sub.Unsubscribe() 2176 mset.purge(nil) 2177 2178 nc.PublishRequest(o.requestNextMsgSubject(), sub.Subject, []byte(strconv.Itoa(batchSize))) 2179 nc.Flush() // Make sure its registered. 2180 2181 for i := 0; i < toSend; i++ { 2182 sendStreamMsg(t, nc, sendSubj, "Hello World!") 2183 } 2184 2185 // We should receive batchSize with no acks or additional requests. 2186 checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error { 2187 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != batchSize { 2188 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, batchSize) 2189 } 2190 return nil 2191 }) 2192 }) 2193 } 2194 } 2195 2196 func TestJetStreamWorkQueueRetentionStream(t *testing.T) { 2197 cases := []struct { 2198 name string 2199 mconfig *StreamConfig 2200 }{ 2201 {name: "MemoryStore", mconfig: &StreamConfig{ 2202 Name: "MWQ", 2203 Storage: MemoryStorage, 2204 Subjects: []string{"MY_WORK_QUEUE.>"}, 2205 Retention: WorkQueuePolicy}, 2206 }, 2207 {name: "FileStore", mconfig: &StreamConfig{ 2208 Name: "MWQ", 2209 Storage: FileStorage, 2210 Subjects: []string{"MY_WORK_QUEUE.>"}, 2211 Retention: WorkQueuePolicy}, 2212 }, 2213 } 2214 for _, c := range cases { 2215 t.Run(c.name, func(t *testing.T) { 2216 s := RunBasicJetStreamServer(t) 2217 defer s.Shutdown() 2218 2219 mset, err := s.GlobalAccount().addStream(c.mconfig) 2220 if err != nil { 2221 t.Fatalf("Unexpected error adding stream: %v", err) 2222 } 2223 defer mset.delete() 2224 2225 // This type of stream has restrictions which we will test here. 2226 // DeliverAll is only start mode allowed. 2227 if _, err := mset.addConsumer(&ConsumerConfig{DeliverPolicy: DeliverLast}); err == nil { 2228 t.Fatalf("Expected an error with anything but DeliverAll") 2229 } 2230 2231 // We will create a non-partitioned consumer. This should succeed. 2232 o, err := mset.addConsumer(&ConsumerConfig{Durable: "PBO", AckPolicy: AckExplicit}) 2233 require_NoError(t, err) 2234 defer o.delete() 2235 2236 // Now if we create another this should fail, only can have one non-partitioned. 2237 if _, err := mset.addConsumer(&ConsumerConfig{}); err == nil { 2238 t.Fatalf("Expected an error on attempt for second consumer for a workqueue") 2239 } 2240 o.delete() 2241 2242 if numo := mset.numConsumers(); numo != 0 { 2243 t.Fatalf("Expected to have zero consumers, got %d", numo) 2244 } 2245 2246 // Now add in an consumer that has a partition. 2247 pindex := 1 2248 pConfig := func(pname string) *ConsumerConfig { 2249 dname := fmt.Sprintf("PPBO-%d", pindex) 2250 pindex += 1 2251 return &ConsumerConfig{Durable: dname, FilterSubject: pname, AckPolicy: AckExplicit} 2252 } 2253 o, err = mset.addConsumer(pConfig("MY_WORK_QUEUE.A")) 2254 require_NoError(t, err) 2255 defer o.delete() 2256 2257 // Now creating another with separate partition should work. 2258 o2, err := mset.addConsumer(pConfig("MY_WORK_QUEUE.B")) 2259 require_NoError(t, err) 2260 defer o2.delete() 2261 2262 // Anything that would overlap should fail though. 2263 if _, err := mset.addConsumer(pConfig("MY_WORK_QUEUE.A")); err == nil { 2264 t.Fatalf("Expected an error on attempt for partitioned consumer for a workqueue") 2265 } 2266 if _, err := mset.addConsumer(pConfig("MY_WORK_QUEUE.B")); err == nil { 2267 t.Fatalf("Expected an error on attempt for partitioned consumer for a workqueue") 2268 } 2269 2270 o3, err := mset.addConsumer(pConfig("MY_WORK_QUEUE.C")) 2271 require_NoError(t, err) 2272 2273 o.delete() 2274 o2.delete() 2275 o3.delete() 2276 2277 // Test with wildcards, first from wider to narrower 2278 o, err = mset.addConsumer(pConfig("MY_WORK_QUEUE.>")) 2279 require_NoError(t, err) 2280 if _, err := mset.addConsumer(pConfig("MY_WORK_QUEUE.*.BAR")); err == nil { 2281 t.Fatalf("Expected an error on attempt for partitioned consumer for a workqueue") 2282 } 2283 o.delete() 2284 2285 // Now from narrower to wider 2286 o, err = mset.addConsumer(pConfig("MY_WORK_QUEUE.*.BAR")) 2287 require_NoError(t, err) 2288 if _, err := mset.addConsumer(pConfig("MY_WORK_QUEUE.>")); err == nil { 2289 t.Fatalf("Expected an error on attempt for partitioned consumer for a workqueue") 2290 } 2291 o.delete() 2292 2293 // Push based will be allowed now, including ephemerals. 2294 // They can not overlap etc meaning same rules as above apply. 2295 o4, err := mset.addConsumer(&ConsumerConfig{ 2296 Durable: "DURABLE", 2297 DeliverSubject: "SOME.SUBJ", 2298 AckPolicy: AckExplicit, 2299 }) 2300 if err != nil { 2301 t.Fatalf("Unexpected Error: %v", err) 2302 } 2303 defer o4.delete() 2304 2305 // Now try to create an ephemeral 2306 nc := clientConnectToServer(t, s) 2307 defer nc.Close() 2308 2309 sub, _ := nc.SubscribeSync(nats.NewInbox()) 2310 defer sub.Unsubscribe() 2311 nc.Flush() 2312 2313 // This should fail at first due to conflict above. 2314 ephCfg := &ConsumerConfig{DeliverSubject: sub.Subject, AckPolicy: AckExplicit} 2315 if _, err := mset.addConsumer(ephCfg); err == nil { 2316 t.Fatalf("Expected an error ") 2317 } 2318 // Delete of o4 should clear. 2319 o4.delete() 2320 o5, err := mset.addConsumer(ephCfg) 2321 if err != nil { 2322 t.Fatalf("Unexpected Error: %v", err) 2323 } 2324 defer o5.delete() 2325 }) 2326 } 2327 } 2328 2329 func TestJetStreamAckAllRedelivery(t *testing.T) { 2330 cases := []struct { 2331 name string 2332 mconfig *StreamConfig 2333 }{ 2334 {"MemoryStore", &StreamConfig{Name: "MY_S22", Storage: MemoryStorage}}, 2335 {"FileStore", &StreamConfig{Name: "MY_S22", Storage: FileStorage}}, 2336 } 2337 for _, c := range cases { 2338 t.Run(c.name, func(t *testing.T) { 2339 s := RunBasicJetStreamServer(t) 2340 defer s.Shutdown() 2341 2342 mset, err := s.GlobalAccount().addStream(c.mconfig) 2343 if err != nil { 2344 t.Fatalf("Unexpected error adding stream: %v", err) 2345 } 2346 defer mset.delete() 2347 2348 nc := clientConnectToServer(t, s) 2349 defer nc.Close() 2350 2351 // Now load up some messages. 2352 toSend := 100 2353 for i := 0; i < toSend; i++ { 2354 sendStreamMsg(t, nc, c.mconfig.Name, "Hello World!") 2355 } 2356 state := mset.state() 2357 if state.Msgs != uint64(toSend) { 2358 t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs) 2359 } 2360 2361 sub, _ := nc.SubscribeSync(nats.NewInbox()) 2362 defer sub.Unsubscribe() 2363 nc.Flush() 2364 2365 o, err := mset.addConsumer(&ConsumerConfig{ 2366 DeliverSubject: sub.Subject, 2367 AckWait: 50 * time.Millisecond, 2368 AckPolicy: AckAll, 2369 }) 2370 if err != nil { 2371 t.Fatalf("Unexpected error adding consumer: %v", err) 2372 } 2373 defer o.delete() 2374 2375 // Wait for messages. 2376 // We will do 5 redeliveries. 2377 for i := 1; i <= 5; i++ { 2378 checkFor(t, 500*time.Millisecond, 10*time.Millisecond, func() error { 2379 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend*i { 2380 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend*i) 2381 } 2382 return nil 2383 }) 2384 } 2385 // Stop redeliveries. 2386 o.delete() 2387 2388 // Now make sure that they are all redelivered in order for each redelivered batch. 2389 for l := 1; l <= 5; l++ { 2390 for i := 1; i <= toSend; i++ { 2391 m, _ := sub.NextMsg(time.Second) 2392 if seq := o.streamSeqFromReply(m.Reply); seq != uint64(i) { 2393 t.Fatalf("Expected stream sequence of %d, got %d", i, seq) 2394 } 2395 } 2396 } 2397 }) 2398 } 2399 } 2400 2401 func TestJetStreamAckReplyStreamPending(t *testing.T) { 2402 msc := StreamConfig{ 2403 Name: "MY_WQ", 2404 Subjects: []string{"foo.*"}, 2405 Storage: MemoryStorage, 2406 MaxAge: 1 * time.Second, 2407 Retention: WorkQueuePolicy, 2408 } 2409 fsc := msc 2410 fsc.Storage = FileStorage 2411 2412 cases := []struct { 2413 name string 2414 mconfig *StreamConfig 2415 }{ 2416 {"MemoryStore", &msc}, 2417 {"FileStore", &fsc}, 2418 } 2419 for _, c := range cases { 2420 t.Run(c.name, func(t *testing.T) { 2421 s := RunBasicJetStreamServer(t) 2422 defer s.Shutdown() 2423 2424 mset, err := s.GlobalAccount().addStream(c.mconfig) 2425 if err != nil { 2426 t.Fatalf("Unexpected error adding stream: %v", err) 2427 } 2428 defer mset.delete() 2429 2430 nc := clientConnectToServer(t, s) 2431 defer nc.Close() 2432 2433 // Now load up some messages. 2434 toSend := 100 2435 for i := 0; i < toSend; i++ { 2436 sendStreamMsg(t, nc, "foo.1", "Hello World!") 2437 } 2438 nc.Flush() 2439 2440 state := mset.state() 2441 if state.Msgs != uint64(toSend) { 2442 t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs) 2443 } 2444 2445 o, err := mset.addConsumer(&ConsumerConfig{Durable: "PBO", AckPolicy: AckExplicit}) 2446 require_NoError(t, err) 2447 defer o.delete() 2448 2449 expectPending := func(ep int) { 2450 t.Helper() 2451 // Now check consumer info. 2452 checkFor(t, time.Second, 10*time.Millisecond, func() error { 2453 if info, pep := o.info(), ep+1; int(info.NumPending) != pep { 2454 return fmt.Errorf("Expected consumer info pending of %d, got %d", pep, info.NumPending) 2455 } 2456 return nil 2457 }) 2458 m, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second) 2459 if err != nil { 2460 t.Fatalf("Unexpected error: %v", err) 2461 } 2462 _, _, _, _, pending := replyInfo(m.Reply) 2463 if pending != uint64(ep) { 2464 t.Fatalf("Expected ack reply pending of %d, got %d - reply: %q", ep, pending, m.Reply) 2465 } 2466 } 2467 2468 expectPending(toSend - 1) 2469 // Send some more while we are connected. 2470 for i := 0; i < toSend; i++ { 2471 sendStreamMsg(t, nc, "foo.1", "Hello World!") 2472 } 2473 nc.Flush() 2474 2475 expectPending(toSend*2 - 2) 2476 // Purge and send a new one. 2477 mset.purge(nil) 2478 nc.Flush() 2479 2480 sendStreamMsg(t, nc, "foo.1", "Hello World!") 2481 expectPending(0) 2482 for i := 0; i < toSend; i++ { 2483 sendStreamMsg(t, nc, "foo.22", "Hello World!") 2484 } 2485 expectPending(toSend - 1) // 201 2486 // Test that delete will not register for consumed messages. 2487 mset.removeMsg(mset.state().FirstSeq) 2488 expectPending(toSend - 2) // 202 2489 // Now remove one that has not been delivered. 2490 mset.removeMsg(250) 2491 expectPending(toSend - 4) // 203 2492 2493 // Test Expiration. 2494 mset.purge(nil) 2495 for i := 0; i < toSend; i++ { 2496 sendStreamMsg(t, nc, "foo.1", "Hello World!") 2497 } 2498 nc.Flush() 2499 2500 // Wait for expiration to kick in. 2501 checkFor(t, 5*time.Second, time.Second, func() error { 2502 if state := mset.state(); state.Msgs != 0 { 2503 return fmt.Errorf("Stream still has messages") 2504 } 2505 return nil 2506 }) 2507 sendStreamMsg(t, nc, "foo.33", "Hello World!") 2508 expectPending(0) 2509 2510 // Now do filtered consumers. 2511 o.delete() 2512 o, err = mset.addConsumer(&ConsumerConfig{Durable: "PBO-FILTERED", AckPolicy: AckExplicit, FilterSubject: "foo.22"}) 2513 require_NoError(t, err) 2514 defer o.delete() 2515 2516 for i := 0; i < toSend; i++ { 2517 sendStreamMsg(t, nc, "foo.33", "Hello World!") 2518 } 2519 nc.Flush() 2520 2521 if info := o.info(); info.NumPending != 0 { 2522 t.Fatalf("Expected no pending, got %d", info.NumPending) 2523 } 2524 // Now send one message that will match us. 2525 sendStreamMsg(t, nc, "foo.22", "Hello World!") 2526 expectPending(0) 2527 sendStreamMsg(t, nc, "foo.22", "Hello World!") // 504 2528 sendStreamMsg(t, nc, "foo.22", "Hello World!") // 505 2529 sendStreamMsg(t, nc, "foo.22", "Hello World!") // 506 2530 sendStreamMsg(t, nc, "foo.22", "Hello World!") // 507 2531 expectPending(3) 2532 mset.removeMsg(506) 2533 expectPending(1) 2534 for i := 0; i < toSend; i++ { 2535 sendStreamMsg(t, nc, "foo.22", "Hello World!") 2536 } 2537 nc.Flush() 2538 expectPending(100) 2539 mset.purge(nil) 2540 sendStreamMsg(t, nc, "foo.22", "Hello World!") 2541 expectPending(0) 2542 }) 2543 } 2544 } 2545 2546 func TestJetStreamAckReplyStreamPendingWithAcks(t *testing.T) { 2547 msc := StreamConfig{ 2548 Name: "MY_STREAM", 2549 Subjects: []string{"foo", "bar", "baz"}, 2550 Storage: MemoryStorage, 2551 } 2552 fsc := msc 2553 fsc.Storage = FileStorage 2554 2555 cases := []struct { 2556 name string 2557 mconfig *StreamConfig 2558 }{ 2559 {"MemoryStore", &msc}, 2560 {"FileStore", &fsc}, 2561 } 2562 for _, c := range cases { 2563 t.Run(c.name, func(t *testing.T) { 2564 s := RunBasicJetStreamServer(t) 2565 defer s.Shutdown() 2566 2567 mset, err := s.GlobalAccount().addStream(c.mconfig) 2568 if err != nil { 2569 t.Fatalf("Unexpected error adding stream: %v", err) 2570 } 2571 defer mset.delete() 2572 2573 nc := clientConnectToServer(t, s) 2574 defer nc.Close() 2575 2576 // Now load up some messages. 2577 toSend := 500 2578 for i := 0; i < toSend; i++ { 2579 sendStreamMsg(t, nc, "foo", "Hello Foo!") 2580 sendStreamMsg(t, nc, "bar", "Hello Bar!") 2581 sendStreamMsg(t, nc, "baz", "Hello Baz!") 2582 } 2583 state := mset.state() 2584 if state.Msgs != uint64(toSend*3) { 2585 t.Fatalf("Expected %d messages, got %d", toSend*3, state.Msgs) 2586 } 2587 dsubj := "_d_" 2588 o, err := mset.addConsumer(&ConsumerConfig{ 2589 Durable: "D-1", 2590 AckPolicy: AckExplicit, 2591 FilterSubject: "foo", 2592 DeliverSubject: dsubj, 2593 }) 2594 require_NoError(t, err) 2595 defer o.delete() 2596 2597 if info := o.info(); int(info.NumPending) != toSend { 2598 t.Fatalf("Expected consumer info pending of %d, got %d", toSend, info.NumPending) 2599 } 2600 2601 sub, _ := nc.SubscribeSync(dsubj) 2602 defer sub.Unsubscribe() 2603 2604 checkFor(t, 500*time.Millisecond, 10*time.Millisecond, func() error { 2605 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend { 2606 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend) 2607 } 2608 return nil 2609 }) 2610 2611 // Should be zero. 2612 if info := o.info(); int(info.NumPending) != 0 { 2613 t.Fatalf("Expected consumer info pending of %d, got %d", 0, info.NumPending) 2614 } else if info.NumAckPending != toSend { 2615 t.Fatalf("Expected %d to be pending acks, got %d", toSend, info.NumAckPending) 2616 } 2617 }) 2618 } 2619 } 2620 2621 func TestJetStreamWorkQueueAckWaitRedelivery(t *testing.T) { 2622 cases := []struct { 2623 name string 2624 mconfig *StreamConfig 2625 }{ 2626 {"MemoryStore", &StreamConfig{Name: "MY_WQ", Storage: MemoryStorage, Retention: WorkQueuePolicy}}, 2627 {"FileStore", &StreamConfig{Name: "MY_WQ", Storage: FileStorage, Retention: WorkQueuePolicy}}, 2628 } 2629 for _, c := range cases { 2630 t.Run(c.name, func(t *testing.T) { 2631 s := RunBasicJetStreamServer(t) 2632 defer s.Shutdown() 2633 2634 mset, err := s.GlobalAccount().addStream(c.mconfig) 2635 if err != nil { 2636 t.Fatalf("Unexpected error adding stream: %v", err) 2637 } 2638 defer mset.delete() 2639 2640 nc := clientConnectToServer(t, s) 2641 defer nc.Close() 2642 2643 // Now load up some messages. 2644 toSend := 100 2645 for i := 0; i < toSend; i++ { 2646 sendStreamMsg(t, nc, c.mconfig.Name, "Hello World!") 2647 } 2648 state := mset.state() 2649 if state.Msgs != uint64(toSend) { 2650 t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs) 2651 } 2652 2653 ackWait := 100 * time.Millisecond 2654 2655 o, err := mset.addConsumer(&ConsumerConfig{Durable: "PBO", AckPolicy: AckExplicit, AckWait: ackWait}) 2656 require_NoError(t, err) 2657 defer o.delete() 2658 2659 sub, _ := nc.SubscribeSync(nats.NewInbox()) 2660 defer sub.Unsubscribe() 2661 2662 reqNextMsgSubj := o.requestNextMsgSubject() 2663 2664 // Consume all the messages. But do not ack. 2665 for i := 0; i < toSend; i++ { 2666 nc.PublishRequest(reqNextMsgSubj, sub.Subject, nil) 2667 if _, err := sub.NextMsg(time.Second); err != nil { 2668 t.Fatalf("Unexpected error waiting for messages: %v", err) 2669 } 2670 } 2671 2672 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != 0 { 2673 t.Fatalf("Did not consume all messages, still have %d", nmsgs) 2674 } 2675 2676 // All messages should still be there. 2677 state = mset.state() 2678 if int(state.Msgs) != toSend { 2679 t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs) 2680 } 2681 2682 // Now consume and ack. 2683 for i := 1; i <= toSend; i++ { 2684 nc.PublishRequest(reqNextMsgSubj, sub.Subject, nil) 2685 m, err := sub.NextMsg(time.Second) 2686 if err != nil { 2687 t.Fatalf("Unexpected error waiting for message[%d]: %v", i, err) 2688 } 2689 sseq, dseq, dcount, _, _ := replyInfo(m.Reply) 2690 if sseq != uint64(i) { 2691 t.Fatalf("Expected set sequence of %d , got %d", i, sseq) 2692 } 2693 // Delivery sequences should always increase. 2694 if dseq != uint64(toSend+i) { 2695 t.Fatalf("Expected delivery sequence of %d , got %d", toSend+i, dseq) 2696 } 2697 if dcount == 1 { 2698 t.Fatalf("Expected these to be marked as redelivered") 2699 } 2700 // Ack the message here. 2701 m.AckSync() 2702 } 2703 2704 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != 0 { 2705 t.Fatalf("Did not consume all messages, still have %d", nmsgs) 2706 } 2707 2708 // Flush acks 2709 nc.Flush() 2710 2711 // Now check the mset as well, since we have a WorkQueue retention policy this should be empty. 2712 if state := mset.state(); state.Msgs != 0 { 2713 t.Fatalf("Expected no messages, got %d", state.Msgs) 2714 } 2715 }) 2716 } 2717 } 2718 2719 func TestJetStreamWorkQueueNakRedelivery(t *testing.T) { 2720 cases := []struct { 2721 name string 2722 mconfig *StreamConfig 2723 }{ 2724 {"MemoryStore", &StreamConfig{Name: "MY_WQ", Storage: MemoryStorage, Retention: WorkQueuePolicy}}, 2725 {"FileStore", &StreamConfig{Name: "MY_WQ", Storage: FileStorage, Retention: WorkQueuePolicy}}, 2726 } 2727 for _, c := range cases { 2728 t.Run(c.name, func(t *testing.T) { 2729 s := RunBasicJetStreamServer(t) 2730 defer s.Shutdown() 2731 2732 mset, err := s.GlobalAccount().addStream(c.mconfig) 2733 if err != nil { 2734 t.Fatalf("Unexpected error adding stream: %v", err) 2735 } 2736 defer mset.delete() 2737 2738 nc := clientConnectToServer(t, s) 2739 defer nc.Close() 2740 2741 // Now load up some messages. 2742 toSend := 10 2743 for i := 0; i < toSend; i++ { 2744 sendStreamMsg(t, nc, c.mconfig.Name, "Hello World!") 2745 } 2746 state := mset.state() 2747 if state.Msgs != uint64(toSend) { 2748 t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs) 2749 } 2750 2751 o, err := mset.addConsumer(&ConsumerConfig{Durable: "PBO", AckPolicy: AckExplicit}) 2752 require_NoError(t, err) 2753 defer o.delete() 2754 2755 getMsg := func(sseq, dseq int) *nats.Msg { 2756 t.Helper() 2757 m, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second) 2758 if err != nil { 2759 t.Fatalf("Unexpected error: %v", err) 2760 } 2761 rsseq, rdseq, _, _, _ := replyInfo(m.Reply) 2762 if rdseq != uint64(dseq) { 2763 t.Fatalf("Expected delivered sequence of %d , got %d", dseq, rdseq) 2764 } 2765 if rsseq != uint64(sseq) { 2766 t.Fatalf("Expected store sequence of %d , got %d", sseq, rsseq) 2767 } 2768 return m 2769 } 2770 2771 for i := 1; i <= 5; i++ { 2772 m := getMsg(i, i) 2773 // Ack the message here. 2774 m.Respond(nil) 2775 } 2776 2777 // Grab #6 2778 m := getMsg(6, 6) 2779 // NAK this one and make sure its processed. 2780 m.Respond(AckNak) 2781 nc.Flush() 2782 2783 // When we request again should be store sequence 6 again. 2784 getMsg(6, 7) 2785 // Then we should get 7, 8, etc. 2786 getMsg(7, 8) 2787 getMsg(8, 9) 2788 }) 2789 } 2790 } 2791 2792 func TestJetStreamWorkQueueWorkingIndicator(t *testing.T) { 2793 cases := []struct { 2794 name string 2795 mconfig *StreamConfig 2796 }{ 2797 {"MemoryStore", &StreamConfig{Name: "MY_WQ", Storage: MemoryStorage, Retention: WorkQueuePolicy}}, 2798 {"FileStore", &StreamConfig{Name: "MY_WQ", Storage: FileStorage, Retention: WorkQueuePolicy}}, 2799 } 2800 for _, c := range cases { 2801 t.Run(c.name, func(t *testing.T) { 2802 s := RunBasicJetStreamServer(t) 2803 defer s.Shutdown() 2804 2805 mset, err := s.GlobalAccount().addStream(c.mconfig) 2806 if err != nil { 2807 t.Fatalf("Unexpected error adding stream: %v", err) 2808 } 2809 defer mset.delete() 2810 2811 nc := clientConnectToServer(t, s) 2812 defer nc.Close() 2813 2814 // Now load up some messages. 2815 toSend := 2 2816 for i := 0; i < toSend; i++ { 2817 sendStreamMsg(t, nc, c.mconfig.Name, "Hello World!") 2818 } 2819 state := mset.state() 2820 if state.Msgs != uint64(toSend) { 2821 t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs) 2822 } 2823 2824 ackWait := 100 * time.Millisecond 2825 2826 o, err := mset.addConsumer(&ConsumerConfig{Durable: "PBO", AckPolicy: AckExplicit, AckWait: ackWait}) 2827 require_NoError(t, err) 2828 defer o.delete() 2829 2830 getMsg := func(sseq, dseq int) *nats.Msg { 2831 t.Helper() 2832 m, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second) 2833 if err != nil { 2834 t.Fatalf("Unexpected error: %v", err) 2835 } 2836 rsseq, rdseq, _, _, _ := replyInfo(m.Reply) 2837 if rdseq != uint64(dseq) { 2838 t.Fatalf("Expected delivered sequence of %d , got %d", dseq, rdseq) 2839 } 2840 if rsseq != uint64(sseq) { 2841 t.Fatalf("Expected store sequence of %d , got %d", sseq, rsseq) 2842 } 2843 return m 2844 } 2845 2846 getMsg(1, 1) 2847 // Now wait past ackWait 2848 time.Sleep(ackWait * 2) 2849 2850 // We should get 1 back. 2851 m := getMsg(1, 2) 2852 2853 // Now let's take longer than ackWait to process but signal we are working on the message. 2854 timeout := time.Now().Add(3 * ackWait) 2855 for time.Now().Before(timeout) { 2856 m.Respond(AckProgress) 2857 nc.Flush() 2858 time.Sleep(ackWait / 5) 2859 } 2860 // We should get 2 here, not 1 since we have indicated we are working on it. 2861 m2 := getMsg(2, 3) 2862 time.Sleep(ackWait / 2) 2863 m2.Respond(AckProgress) 2864 2865 // Now should get 1 back then 2. 2866 m = getMsg(1, 4) 2867 m.Respond(nil) 2868 getMsg(2, 5) 2869 }) 2870 } 2871 } 2872 2873 func TestJetStreamWorkQueueTerminateDelivery(t *testing.T) { 2874 cases := []struct { 2875 name string 2876 mconfig *StreamConfig 2877 }{ 2878 {"MemoryStore", &StreamConfig{Name: "MY_WQ", Storage: MemoryStorage, Retention: WorkQueuePolicy}}, 2879 {"FileStore", &StreamConfig{Name: "MY_WQ", Storage: FileStorage, Retention: WorkQueuePolicy}}, 2880 } 2881 for _, c := range cases { 2882 t.Run(c.name, func(t *testing.T) { 2883 s := RunBasicJetStreamServer(t) 2884 defer s.Shutdown() 2885 2886 mset, err := s.GlobalAccount().addStream(c.mconfig) 2887 if err != nil { 2888 t.Fatalf("Unexpected error adding stream: %v", err) 2889 } 2890 defer mset.delete() 2891 2892 nc := clientConnectToServer(t, s) 2893 defer nc.Close() 2894 2895 // Now load up some messages. 2896 toSend := 22 2897 for i := 0; i < toSend; i++ { 2898 sendStreamMsg(t, nc, c.mconfig.Name, "Hello World!") 2899 } 2900 state := mset.state() 2901 if state.Msgs != uint64(toSend) { 2902 t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs) 2903 } 2904 2905 ackWait := 25 * time.Millisecond 2906 2907 o, err := mset.addConsumer(&ConsumerConfig{Durable: "PBO", AckPolicy: AckExplicit, AckWait: ackWait}) 2908 require_NoError(t, err) 2909 defer o.delete() 2910 2911 getMsg := func(sseq, dseq int) *nats.Msg { 2912 t.Helper() 2913 m, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second) 2914 if err != nil { 2915 t.Fatalf("Unexpected error: %v", err) 2916 } 2917 rsseq, rdseq, _, _, _ := replyInfo(m.Reply) 2918 if rdseq != uint64(dseq) { 2919 t.Fatalf("Expected delivered sequence of %d , got %d", dseq, rdseq) 2920 } 2921 if rsseq != uint64(sseq) { 2922 t.Fatalf("Expected store sequence of %d , got %d", sseq, rsseq) 2923 } 2924 return m 2925 } 2926 2927 // Make sure we get the correct advisory 2928 sub, _ := nc.SubscribeSync(JSAdvisoryConsumerMsgTerminatedPre + ".>") 2929 defer sub.Unsubscribe() 2930 2931 getMsg(1, 1) 2932 // Now wait past ackWait 2933 time.Sleep(ackWait * 2) 2934 2935 // We should get 1 back. 2936 m := getMsg(1, 2) 2937 // Now terminate 2938 m.Respond([]byte(fmt.Sprintf("%s with reason", string(AckTerm)))) 2939 time.Sleep(ackWait * 2) 2940 2941 // We should get 2 here, not 1 since we have indicated we wanted to terminate. 2942 getMsg(2, 3) 2943 2944 // Check advisory was delivered. 2945 am, err := sub.NextMsg(time.Second) 2946 if err != nil { 2947 t.Fatalf("Unexpected error: %v", err) 2948 } 2949 var adv JSConsumerDeliveryTerminatedAdvisory 2950 json.Unmarshal(am.Data, &adv) 2951 if adv.Stream != "MY_WQ" { 2952 t.Fatalf("Expected stream of %s, got %s", "MY_WQ", adv.Stream) 2953 } 2954 if adv.Consumer != "PBO" { 2955 t.Fatalf("Expected consumer of %s, got %s", "PBO", adv.Consumer) 2956 } 2957 if adv.StreamSeq != 1 { 2958 t.Fatalf("Expected stream sequence of %d, got %d", 1, adv.StreamSeq) 2959 } 2960 if adv.ConsumerSeq != 2 { 2961 t.Fatalf("Expected consumer sequence of %d, got %d", 2, adv.ConsumerSeq) 2962 } 2963 if adv.Deliveries != 2 { 2964 t.Fatalf("Expected delivery count of %d, got %d", 2, adv.Deliveries) 2965 } 2966 if adv.Reason != "with reason" { 2967 t.Fatalf("Advisory did not have a reason") 2968 } 2969 }) 2970 } 2971 } 2972 2973 func TestJetStreamConsumerAckAck(t *testing.T) { 2974 s := RunBasicJetStreamServer(t) 2975 defer s.Shutdown() 2976 2977 mname := "ACK-ACK" 2978 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: mname, Storage: MemoryStorage}) 2979 if err != nil { 2980 t.Fatalf("Unexpected error adding stream: %v", err) 2981 } 2982 defer mset.delete() 2983 2984 o, err := mset.addConsumer(&ConsumerConfig{Durable: "worker", AckPolicy: AckExplicit}) 2985 if err != nil { 2986 t.Fatalf("Expected no error with registered interest, got %v", err) 2987 } 2988 defer o.delete() 2989 rqn := o.requestNextMsgSubject() 2990 2991 nc := clientConnectToServer(t, s) 2992 defer nc.Close() 2993 2994 // 4 for number of ack protocols to test them all. 2995 for i := 0; i < 4; i++ { 2996 sendStreamMsg(t, nc, mname, "Hello World!") 2997 } 2998 2999 testAck := func(ackType []byte) { 3000 m, err := nc.Request(rqn, nil, 10*time.Millisecond) 3001 if err != nil { 3002 t.Fatalf("Unexpected error: %v", err) 3003 } 3004 // Send a request for the ack and make sure the server "ack's" the ack. 3005 if _, err := nc.Request(m.Reply, ackType, 10*time.Millisecond); err != nil { 3006 t.Fatalf("Unexpected error on ack/ack: %v", err) 3007 } 3008 } 3009 3010 testAck(AckAck) 3011 testAck(AckNak) 3012 testAck(AckProgress) 3013 testAck(AckTerm) 3014 } 3015 3016 func TestJetStreamAckNext(t *testing.T) { 3017 s := RunBasicJetStreamServer(t) 3018 defer s.Shutdown() 3019 3020 mname := "ACKNXT" 3021 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: mname, Storage: MemoryStorage}) 3022 if err != nil { 3023 t.Fatalf("Unexpected error adding stream: %v", err) 3024 } 3025 defer mset.delete() 3026 3027 o, err := mset.addConsumer(&ConsumerConfig{Durable: "worker", AckPolicy: AckExplicit}) 3028 if err != nil { 3029 t.Fatalf("Expected no error with registered interest, got %v", err) 3030 } 3031 defer o.delete() 3032 3033 nc := clientConnectToServer(t, s) 3034 defer nc.Close() 3035 3036 for i := 0; i < 12; i++ { 3037 sendStreamMsg(t, nc, mname, fmt.Sprintf("msg %d", i)) 3038 } 3039 3040 q := make(chan *nats.Msg, 10) 3041 sub, err := nc.ChanSubscribe(nats.NewInbox(), q) 3042 if err != nil { 3043 t.Fatalf("SubscribeSync failed: %s", err) 3044 } 3045 3046 nc.PublishRequest(o.requestNextMsgSubject(), sub.Subject, []byte("1")) 3047 3048 // normal next should imply 1 3049 msg := <-q 3050 err = msg.RespondMsg(&nats.Msg{Reply: sub.Subject, Subject: msg.Reply, Data: AckNext}) 3051 if err != nil { 3052 t.Fatalf("RespondMsg failed: %s", err) 3053 } 3054 3055 // read 1 message and check ack was done etc 3056 msg = <-q 3057 if len(q) != 0 { 3058 t.Fatalf("Expected empty q got %d", len(q)) 3059 } 3060 if o.info().AckFloor.Stream != 1 { 3061 t.Fatalf("First message was not acknowledged") 3062 } 3063 if !bytes.Equal(msg.Data, []byte("msg 1")) { 3064 t.Fatalf("wrong message received, expected: msg 1 got %q", msg.Data) 3065 } 3066 3067 // now ack and request 5 more using a naked number 3068 err = msg.RespondMsg(&nats.Msg{Reply: sub.Subject, Subject: msg.Reply, Data: append(AckNext, []byte(" 5")...)}) 3069 if err != nil { 3070 t.Fatalf("RespondMsg failed: %s", err) 3071 } 3072 3073 getMsgs := func(start, count int) { 3074 t.Helper() 3075 3076 ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) 3077 defer cancel() 3078 3079 for i := start; i < count+1; i++ { 3080 select { 3081 case msg := <-q: 3082 expect := fmt.Sprintf("msg %d", i+1) 3083 if !bytes.Equal(msg.Data, []byte(expect)) { 3084 t.Fatalf("wrong message received, expected: %s got %#v", expect, msg) 3085 } 3086 case <-ctx.Done(): 3087 t.Fatalf("did not receive all messages") 3088 } 3089 } 3090 3091 } 3092 3093 getMsgs(1, 5) 3094 3095 // now ack and request 5 more using the full request 3096 err = msg.RespondMsg(&nats.Msg{Reply: sub.Subject, Subject: msg.Reply, Data: append(AckNext, []byte(`{"batch": 5}`)...)}) 3097 if err != nil { 3098 t.Fatalf("RespondMsg failed: %s", err) 3099 } 3100 3101 getMsgs(6, 10) 3102 3103 if o.info().AckFloor.Stream != 2 { 3104 t.Fatalf("second message was not acknowledged") 3105 } 3106 } 3107 3108 func TestJetStreamPublishDeDupe(t *testing.T) { 3109 s := RunBasicJetStreamServer(t) 3110 defer s.Shutdown() 3111 3112 mname := "DeDupe" 3113 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: mname, Storage: FileStorage, MaxAge: time.Hour, Subjects: []string{"foo.*"}}) 3114 if err != nil { 3115 t.Fatalf("Unexpected error adding stream: %v", err) 3116 } 3117 defer mset.delete() 3118 3119 // Check Duplicates setting. 3120 duplicates := mset.config().Duplicates 3121 if duplicates != StreamDefaultDuplicatesWindow { 3122 t.Fatalf("Expected a default of %v, got %v", StreamDefaultDuplicatesWindow, duplicates) 3123 } 3124 3125 cfg := mset.config() 3126 // Make sure can't be negative. 3127 cfg.Duplicates = -25 * time.Millisecond 3128 if err := mset.update(&cfg); err == nil { 3129 t.Fatalf("Expected an error but got none") 3130 } 3131 // Make sure can't be longer than age if its set. 3132 cfg.Duplicates = 2 * time.Hour 3133 if err := mset.update(&cfg); err == nil { 3134 t.Fatalf("Expected an error but got none") 3135 } 3136 3137 nc := clientConnectToServer(t, s) 3138 defer nc.Close() 3139 3140 sendMsg := func(seq uint64, id, msg string) *PubAck { 3141 t.Helper() 3142 m := nats.NewMsg(fmt.Sprintf("foo.%d", seq)) 3143 m.Header.Add(JSMsgId, id) 3144 m.Data = []byte(msg) 3145 resp, _ := nc.RequestMsg(m, 100*time.Millisecond) 3146 if resp == nil { 3147 t.Fatalf("No response for %q, possible timeout?", msg) 3148 } 3149 pa := getPubAckResponse(resp.Data) 3150 if pa == nil || pa.Error != nil { 3151 t.Fatalf("Expected a JetStreamPubAck, got %q", resp.Data) 3152 } 3153 if pa.Sequence != seq { 3154 t.Fatalf("Did not get correct sequence in PubAck, expected %d, got %d", seq, pa.Sequence) 3155 } 3156 return pa.PubAck 3157 } 3158 3159 expect := func(n uint64) { 3160 t.Helper() 3161 state := mset.state() 3162 if state.Msgs != n { 3163 t.Fatalf("Expected %d messages, got %d", n, state.Msgs) 3164 } 3165 } 3166 3167 sendMsg(1, "AA", "Hello DeDupe!") 3168 sendMsg(2, "BB", "Hello DeDupe!") 3169 sendMsg(3, "CC", "Hello DeDupe!") 3170 sendMsg(4, "ZZ", "Hello DeDupe!") 3171 expect(4) 3172 3173 sendMsg(1, "AA", "Hello DeDupe!") 3174 sendMsg(2, "BB", "Hello DeDupe!") 3175 sendMsg(4, "ZZ", "Hello DeDupe!") 3176 expect(4) 3177 3178 cfg = mset.config() 3179 cfg.Duplicates = 100 * time.Millisecond 3180 if err := mset.update(&cfg); err != nil { 3181 t.Fatalf("Unexpected error: %v", err) 3182 } 3183 3184 nmids := func(expected int) { 3185 t.Helper() 3186 checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error { 3187 if nids := mset.numMsgIds(); nids != expected { 3188 return fmt.Errorf("Expected %d message ids, got %d", expected, nids) 3189 } 3190 return nil 3191 }) 3192 } 3193 3194 nmids(4) 3195 time.Sleep(cfg.Duplicates * 2) 3196 3197 sendMsg(5, "AAA", "Hello DeDupe!") 3198 sendMsg(6, "BBB", "Hello DeDupe!") 3199 sendMsg(7, "CCC", "Hello DeDupe!") 3200 sendMsg(8, "DDD", "Hello DeDupe!") 3201 sendMsg(9, "ZZZ", "Hello DeDupe!") 3202 nmids(5) 3203 // Eventually will drop to zero. 3204 nmids(0) 3205 3206 // Now test server restart 3207 cfg.Duplicates = 30 * time.Minute 3208 if err := mset.update(&cfg); err != nil { 3209 t.Fatalf("Unexpected error: %v", err) 3210 } 3211 mset.purge(nil) 3212 3213 // Send 5 new messages. 3214 sendMsg(10, "AAAA", "Hello DeDupe!") 3215 sendMsg(11, "BBBB", "Hello DeDupe!") 3216 sendMsg(12, "CCCC", "Hello DeDupe!") 3217 sendMsg(13, "DDDD", "Hello DeDupe!") 3218 sendMsg(14, "EEEE", "Hello DeDupe!") 3219 3220 // Stop current 3221 sd := s.JetStreamConfig().StoreDir 3222 s.Shutdown() 3223 // Restart. 3224 s = RunJetStreamServerOnPort(-1, sd) 3225 defer s.Shutdown() 3226 3227 nc = clientConnectToServer(t, s) 3228 defer nc.Close() 3229 3230 mset, _ = s.GlobalAccount().lookupStream(mname) 3231 if nms := mset.state().Msgs; nms != 5 { 3232 t.Fatalf("Expected 5 restored messages, got %d", nms) 3233 } 3234 nmids(5) 3235 3236 // Send same and make sure duplicate detection still works. 3237 // Send 5 duplicate messages. 3238 sendMsg(10, "AAAA", "Hello DeDupe!") 3239 sendMsg(11, "BBBB", "Hello DeDupe!") 3240 sendMsg(12, "CCCC", "Hello DeDupe!") 3241 sendMsg(13, "DDDD", "Hello DeDupe!") 3242 sendMsg(14, "EEEE", "Hello DeDupe!") 3243 3244 if nms := mset.state().Msgs; nms != 5 { 3245 t.Fatalf("Expected 5 restored messages, got %d", nms) 3246 } 3247 nmids(5) 3248 3249 // Check we set duplicate properly. 3250 pa := sendMsg(10, "AAAA", "Hello DeDupe!") 3251 if !pa.Duplicate { 3252 t.Fatalf("Expected duplicate to be set") 3253 } 3254 3255 // Purge should NOT wipe the msgIds. They should still persist. 3256 mset.purge(nil) 3257 nmids(5) 3258 } 3259 3260 func getPubAckResponse(msg []byte) *JSPubAckResponse { 3261 var par JSPubAckResponse 3262 if err := json.Unmarshal(msg, &par); err != nil { 3263 return nil 3264 } 3265 return &par 3266 } 3267 3268 func TestJetStreamPublishExpect(t *testing.T) { 3269 s := RunBasicJetStreamServer(t) 3270 defer s.Shutdown() 3271 3272 mname := "EXPECT" 3273 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: mname, Storage: FileStorage, MaxAge: time.Hour, Subjects: []string{"foo.*"}}) 3274 if err != nil { 3275 t.Fatalf("Unexpected error adding stream: %v", err) 3276 } 3277 defer mset.delete() 3278 3279 nc := clientConnectToServer(t, s) 3280 defer nc.Close() 3281 3282 // Test that we get no error when expected stream is correct. 3283 m := nats.NewMsg("foo.bar") 3284 m.Data = []byte("HELLO") 3285 m.Header.Set(JSExpectedStream, mname) 3286 resp, err := nc.RequestMsg(m, 100*time.Millisecond) 3287 require_NoError(t, err) 3288 if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error != nil { 3289 t.Fatalf("Expected a valid JetStreamPubAck, got %q", resp.Data) 3290 } 3291 3292 // Now test that we get an error back when expecting a different stream. 3293 m.Header.Set(JSExpectedStream, "ORDERS") 3294 resp, err = nc.RequestMsg(m, 100*time.Millisecond) 3295 require_NoError(t, err) 3296 if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error == nil { 3297 t.Fatalf("Expected an error, got %q", resp.Data) 3298 } 3299 3300 // Now test that we get an error back when expecting a different sequence number. 3301 m.Header.Set(JSExpectedStream, mname) 3302 m.Header.Set(JSExpectedLastSeq, "10") 3303 resp, err = nc.RequestMsg(m, 100*time.Millisecond) 3304 require_NoError(t, err) 3305 if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error == nil { 3306 t.Fatalf("Expected an error, got %q", resp.Data) 3307 } 3308 3309 // Or if we expect that there are no messages by setting "0" as the expected last seq 3310 m.Header.Set(JSExpectedLastSeq, "0") 3311 resp, err = nc.RequestMsg(m, 100*time.Millisecond) 3312 require_NoError(t, err) 3313 if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error == nil { 3314 t.Fatalf("Expected an error, got %q", resp.Data) 3315 } 3316 3317 // Now send a message with a message ID and make sure we can match that. 3318 m = nats.NewMsg("foo.bar") 3319 m.Data = []byte("HELLO") 3320 m.Header.Set(JSMsgId, "AAA") 3321 if _, err = nc.RequestMsg(m, 100*time.Millisecond); err != nil { 3322 t.Fatalf("Unexpected error: %v", err) 3323 } 3324 3325 // Now try again with new message ID but require last one to be 'BBB' 3326 m.Header.Set(JSMsgId, "ZZZ") 3327 m.Header.Set(JSExpectedLastMsgId, "BBB") 3328 resp, err = nc.RequestMsg(m, 100*time.Millisecond) 3329 require_NoError(t, err) 3330 if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error == nil { 3331 t.Fatalf("Expected an error, got %q", resp.Data) 3332 } 3333 3334 // Restart the server and make sure we remember/rebuild last seq and last msgId. 3335 // Stop current 3336 sd := s.JetStreamConfig().StoreDir 3337 s.Shutdown() 3338 // Restart. 3339 s = RunJetStreamServerOnPort(-1, sd) 3340 defer s.Shutdown() 3341 3342 nc = clientConnectToServer(t, s) 3343 defer nc.Close() 3344 3345 // Our last sequence was 2 and last msgId was "AAA" 3346 m = nats.NewMsg("foo.baz") 3347 m.Data = []byte("HELLO AGAIN") 3348 m.Header.Set(JSExpectedLastSeq, "2") 3349 m.Header.Set(JSExpectedLastMsgId, "AAA") 3350 m.Header.Set(JSMsgId, "BBB") 3351 resp, err = nc.RequestMsg(m, 100*time.Millisecond) 3352 require_NoError(t, err) 3353 if pa := getPubAckResponse(resp.Data); pa == nil || pa.Error != nil { 3354 t.Fatalf("Expected a valid JetStreamPubAck, got %q", resp.Data) 3355 } 3356 } 3357 3358 func TestJetStreamPullConsumerRemoveInterest(t *testing.T) { 3359 s := RunBasicJetStreamServer(t) 3360 defer s.Shutdown() 3361 3362 mname := "MYS-PULL" 3363 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: mname, Storage: MemoryStorage}) 3364 if err != nil { 3365 t.Fatalf("Unexpected error adding stream: %v", err) 3366 } 3367 defer mset.delete() 3368 3369 wcfg := &ConsumerConfig{Durable: "worker", AckPolicy: AckExplicit} 3370 o, err := mset.addConsumer(wcfg) 3371 if err != nil { 3372 t.Fatalf("Expected no error with registered interest, got %v", err) 3373 } 3374 rqn := o.requestNextMsgSubject() 3375 defer o.delete() 3376 3377 nc := clientConnectToServer(t, s) 3378 defer nc.Close() 3379 3380 // Ask for a message even though one is not there. This will queue us up for waiting. 3381 if _, err := nc.Request(rqn, nil, 10*time.Millisecond); err == nil { 3382 t.Fatalf("Expected an error, got none") 3383 } 3384 3385 // This is using new style request mechanism. so drop the connection itself to get rid of interest. 3386 nc.Close() 3387 3388 // Wait for client cleanup 3389 checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error { 3390 if n := s.NumClients(); err != nil || n != 0 { 3391 return fmt.Errorf("Still have %d clients", n) 3392 } 3393 return nil 3394 }) 3395 3396 nc = clientConnectToServer(t, s) 3397 defer nc.Close() 3398 3399 // Send a message 3400 sendStreamMsg(t, nc, mname, "Hello World!") 3401 3402 msg, err := nc.Request(rqn, nil, time.Second) 3403 require_NoError(t, err) 3404 _, dseq, dc, _, _ := replyInfo(msg.Reply) 3405 if dseq != 1 { 3406 t.Fatalf("Expected consumer sequence of 1, got %d", dseq) 3407 } 3408 if dc != 1 { 3409 t.Fatalf("Expected delivery count of 1, got %d", dc) 3410 } 3411 3412 // Now do old school request style and more than one waiting. 3413 nc = clientConnectWithOldRequest(t, s) 3414 defer nc.Close() 3415 3416 // Now queue up 10 waiting via failed requests. 3417 for i := 0; i < 10; i++ { 3418 if _, err := nc.Request(rqn, nil, 1*time.Millisecond); err == nil { 3419 t.Fatalf("Expected an error, got none") 3420 } 3421 } 3422 3423 // Send a second message 3424 sendStreamMsg(t, nc, mname, "Hello World!") 3425 3426 msg, err = nc.Request(rqn, nil, time.Second) 3427 require_NoError(t, err) 3428 _, dseq, dc, _, _ = replyInfo(msg.Reply) 3429 if dseq != 2 { 3430 t.Fatalf("Expected consumer sequence of 2, got %d", dseq) 3431 } 3432 if dc != 1 { 3433 t.Fatalf("Expected delivery count of 1, got %d", dc) 3434 } 3435 } 3436 3437 func TestJetStreamConsumerRateLimit(t *testing.T) { 3438 s := RunBasicJetStreamServer(t) 3439 defer s.Shutdown() 3440 3441 mname := "RATELIMIT" 3442 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: mname, Storage: FileStorage}) 3443 if err != nil { 3444 t.Fatalf("Unexpected error adding stream: %v", err) 3445 } 3446 3447 nc := clientConnectToServer(t, s) 3448 defer nc.Close() 3449 3450 msgSize := 128 * 1024 3451 msg := make([]byte, msgSize) 3452 crand.Read(msg) 3453 3454 // 10MB 3455 totalSize := 10 * 1024 * 1024 3456 toSend := totalSize / msgSize 3457 for i := 0; i < toSend; i++ { 3458 nc.Publish(mname, msg) 3459 } 3460 nc.Flush() 3461 3462 checkFor(t, 5*time.Second, 100*time.Millisecond, func() error { 3463 state := mset.state() 3464 if state.Msgs != uint64(toSend) { 3465 return fmt.Errorf("Expected %d messages, got %d", toSend, state.Msgs) 3466 } 3467 return nil 3468 }) 3469 3470 // 100Mbit 3471 rateLimit := uint64(100 * 1024 * 1024) 3472 // Make sure if you set a rate with a pull based consumer it errors. 3473 _, err = mset.addConsumer(&ConsumerConfig{Durable: "to", AckPolicy: AckExplicit, RateLimit: rateLimit}) 3474 if err == nil { 3475 t.Fatalf("Expected an error, got none") 3476 } 3477 3478 // Now create one and measure the rate delivered. 3479 o, err := mset.addConsumer(&ConsumerConfig{ 3480 Durable: "rate", 3481 DeliverSubject: "to", 3482 RateLimit: rateLimit, 3483 AckPolicy: AckNone}) 3484 require_NoError(t, err) 3485 defer o.delete() 3486 3487 var received int 3488 done := make(chan bool) 3489 3490 start := time.Now() 3491 3492 nc.Subscribe("to", func(m *nats.Msg) { 3493 received++ 3494 if received >= toSend { 3495 done <- true 3496 } 3497 }) 3498 nc.Flush() 3499 3500 select { 3501 case <-done: 3502 case <-time.After(5 * time.Second): 3503 t.Fatalf("Did not receive all the messages in time") 3504 } 3505 3506 tt := time.Since(start) 3507 rate := float64(8*toSend*msgSize) / tt.Seconds() 3508 if rate > float64(rateLimit)*1.25 { 3509 t.Fatalf("Exceeded desired rate of %d mbps, got %0.f mbps", rateLimit/(1024*1024), rate/(1024*1024)) 3510 } 3511 } 3512 3513 func TestJetStreamEphemeralConsumerRecoveryAfterServerRestart(t *testing.T) { 3514 s := RunBasicJetStreamServer(t) 3515 defer s.Shutdown() 3516 3517 mname := "MYS" 3518 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: mname, Storage: FileStorage}) 3519 if err != nil { 3520 t.Fatalf("Unexpected error adding stream: %v", err) 3521 } 3522 defer mset.delete() 3523 3524 nc := clientConnectToServer(t, s) 3525 defer nc.Close() 3526 3527 sub, _ := nc.SubscribeSync(nats.NewInbox()) 3528 defer sub.Unsubscribe() 3529 nc.Flush() 3530 3531 o, err := mset.addConsumer(&ConsumerConfig{ 3532 DeliverSubject: sub.Subject, 3533 AckPolicy: AckExplicit, 3534 }) 3535 if err != nil { 3536 t.Fatalf("Error creating consumer: %v", err) 3537 } 3538 defer o.delete() 3539 3540 // Snapshot our name. 3541 oname := o.String() 3542 3543 // Send 100 messages 3544 for i := 0; i < 100; i++ { 3545 sendStreamMsg(t, nc, mname, "Hello World!") 3546 } 3547 if state := mset.state(); state.Msgs != 100 { 3548 t.Fatalf("Expected %d messages, got %d", 100, state.Msgs) 3549 } 3550 3551 // Read 6 messages 3552 for i := 0; i <= 6; i++ { 3553 if m, err := sub.NextMsg(time.Second); err == nil { 3554 m.Respond(nil) 3555 } else { 3556 t.Fatalf("Unexpected error: %v", err) 3557 } 3558 } 3559 3560 // Capture port since it was dynamic. 3561 u, _ := url.Parse(s.ClientURL()) 3562 port, _ := strconv.Atoi(u.Port()) 3563 3564 restartServer := func() { 3565 t.Helper() 3566 // Stop current 3567 sd := s.JetStreamConfig().StoreDir 3568 s.Shutdown() 3569 // Restart. 3570 s = RunJetStreamServerOnPort(port, sd) 3571 } 3572 3573 // Do twice 3574 for i := 0; i < 2; i++ { 3575 // Restart. 3576 restartServer() 3577 defer s.Shutdown() 3578 3579 mset, err = s.GlobalAccount().lookupStream(mname) 3580 if err != nil { 3581 t.Fatalf("Expected to find a stream for %q", mname) 3582 } 3583 o = mset.lookupConsumer(oname) 3584 if o == nil { 3585 t.Fatalf("Error looking up consumer %q", oname) 3586 } 3587 // Make sure config does not have durable. 3588 if cfg := o.config(); cfg.Durable != _EMPTY_ { 3589 t.Fatalf("Expected no durable to be set") 3590 } 3591 // Wait for it to become active 3592 checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error { 3593 if !o.isActive() { 3594 return fmt.Errorf("Consumer not active") 3595 } 3596 return nil 3597 }) 3598 } 3599 3600 // Now close the connection. Make sure this acts like an ephemeral and goes away. 3601 o.setInActiveDeleteThreshold(10 * time.Millisecond) 3602 nc.Close() 3603 3604 checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error { 3605 if o := mset.lookupConsumer(oname); o != nil { 3606 return fmt.Errorf("Consumer still active") 3607 } 3608 return nil 3609 }) 3610 } 3611 3612 func TestJetStreamConsumerMaxDeliveryAndServerRestart(t *testing.T) { 3613 s := RunBasicJetStreamServer(t) 3614 defer s.Shutdown() 3615 3616 mname := "MYS" 3617 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: mname, Storage: FileStorage}) 3618 if err != nil { 3619 t.Fatalf("Unexpected error adding stream: %v", err) 3620 } 3621 defer mset.delete() 3622 3623 streamCreated := mset.createdTime() 3624 3625 dsubj := "D.TO" 3626 max := 3 3627 3628 o, err := mset.addConsumer(&ConsumerConfig{ 3629 Durable: "TO", 3630 DeliverSubject: dsubj, 3631 AckPolicy: AckExplicit, 3632 AckWait: 100 * time.Millisecond, 3633 MaxDeliver: max, 3634 }) 3635 defer o.delete() 3636 3637 consumerCreated := o.createdTime() 3638 // For calculation of consumer created times below. 3639 time.Sleep(5 * time.Millisecond) 3640 3641 nc := clientConnectToServer(t, s) 3642 defer nc.Close() 3643 3644 sub, _ := nc.SubscribeSync(dsubj) 3645 nc.Flush() 3646 defer sub.Unsubscribe() 3647 3648 // Send one message. 3649 sendStreamMsg(t, nc, mname, "order-1") 3650 3651 checkSubPending := func(numExpected int) { 3652 t.Helper() 3653 checkFor(t, time.Second, 10*time.Millisecond, func() error { 3654 if nmsgs, _, _ := sub.Pending(); nmsgs != numExpected { 3655 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected) 3656 } 3657 return nil 3658 }) 3659 } 3660 3661 checkNumMsgs := func(numExpected uint64) { 3662 t.Helper() 3663 mset, err = s.GlobalAccount().lookupStream(mname) 3664 if err != nil { 3665 t.Fatalf("Expected to find a stream for %q", mname) 3666 } 3667 state := mset.state() 3668 if state.Msgs != numExpected { 3669 t.Fatalf("Expected %d msgs, got %d", numExpected, state.Msgs) 3670 } 3671 } 3672 3673 // Wait til we know we have max queued up. 3674 checkSubPending(max) 3675 3676 // Once here we have gone over the limit for the 1st message for max deliveries. 3677 // Send second 3678 sendStreamMsg(t, nc, mname, "order-2") 3679 3680 // Just wait for first delivery + one redelivery. 3681 checkSubPending(max + 2) 3682 3683 // Capture port since it was dynamic. 3684 u, _ := url.Parse(s.ClientURL()) 3685 port, _ := strconv.Atoi(u.Port()) 3686 3687 restartServer := func() { 3688 t.Helper() 3689 sd := s.JetStreamConfig().StoreDir 3690 // Stop current 3691 s.Shutdown() 3692 // Restart. 3693 s = RunJetStreamServerOnPort(port, sd) 3694 } 3695 3696 waitForClientReconnect := func() { 3697 checkFor(t, 2500*time.Millisecond, 5*time.Millisecond, func() error { 3698 if !nc.IsConnected() { 3699 return fmt.Errorf("Not connected") 3700 } 3701 return nil 3702 }) 3703 } 3704 3705 // Restart. 3706 restartServer() 3707 defer s.Shutdown() 3708 3709 checkNumMsgs(2) 3710 3711 // Wait for client to be reconnected. 3712 waitForClientReconnect() 3713 3714 // Once we are here send third order. 3715 sendStreamMsg(t, nc, mname, "order-3") 3716 checkNumMsgs(3) 3717 3718 // Restart. 3719 restartServer() 3720 defer s.Shutdown() 3721 3722 checkNumMsgs(3) 3723 3724 // Wait for client to be reconnected. 3725 waitForClientReconnect() 3726 3727 // Now we should have max times three on our sub. 3728 checkSubPending(max * 3) 3729 3730 // Now do some checks on created timestamps. 3731 mset, err = s.GlobalAccount().lookupStream(mname) 3732 if err != nil { 3733 t.Fatalf("Expected to find a stream for %q", mname) 3734 } 3735 if mset.createdTime() != streamCreated { 3736 t.Fatalf("Stream creation time not restored, wanted %v, got %v", streamCreated, mset.createdTime()) 3737 } 3738 o = mset.lookupConsumer("TO") 3739 if o == nil { 3740 t.Fatalf("Error looking up consumer: %v", err) 3741 } 3742 // Consumer created times can have a very small skew. 3743 delta := o.createdTime().Sub(consumerCreated) 3744 if delta > 5*time.Millisecond { 3745 t.Fatalf("Consumer creation time not restored, wanted %v, got %v", consumerCreated, o.createdTime()) 3746 } 3747 } 3748 3749 func TestJetStreamDeleteConsumerAndServerRestart(t *testing.T) { 3750 s := RunBasicJetStreamServer(t) 3751 defer s.Shutdown() 3752 3753 sendSubj := "MYQ" 3754 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: sendSubj, Storage: FileStorage}) 3755 if err != nil { 3756 t.Fatalf("Unexpected error adding stream: %v", err) 3757 } 3758 defer mset.delete() 3759 3760 // Create basic work queue mode consumer. 3761 oname := "WQ" 3762 o, err := mset.addConsumer(workerModeConfig(oname)) 3763 if err != nil { 3764 t.Fatalf("Expected no error with registered interest, got %v", err) 3765 } 3766 3767 // Now delete and then we will restart the 3768 o.delete() 3769 3770 if numo := mset.numConsumers(); numo != 0 { 3771 t.Fatalf("Expected to have zero consumers, got %d", numo) 3772 } 3773 3774 // Capture port since it was dynamic. 3775 u, _ := url.Parse(s.ClientURL()) 3776 port, _ := strconv.Atoi(u.Port()) 3777 sd := s.JetStreamConfig().StoreDir 3778 3779 // Stop current 3780 s.Shutdown() 3781 3782 // Restart. 3783 s = RunJetStreamServerOnPort(port, sd) 3784 defer s.Shutdown() 3785 3786 mset, err = s.GlobalAccount().lookupStream(sendSubj) 3787 if err != nil { 3788 t.Fatalf("Expected to find a stream for %q", sendSubj) 3789 } 3790 3791 if numo := mset.numConsumers(); numo != 0 { 3792 t.Fatalf("Expected to have zero consumers, got %d", numo) 3793 } 3794 } 3795 3796 func TestJetStreamRedeliveryAfterServerRestart(t *testing.T) { 3797 s := RunBasicJetStreamServer(t) 3798 defer s.Shutdown() 3799 3800 sendSubj := "MYQ" 3801 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: sendSubj, Storage: FileStorage}) 3802 if err != nil { 3803 t.Fatalf("Unexpected error adding stream: %v", err) 3804 } 3805 defer mset.delete() 3806 3807 nc := clientConnectToServer(t, s) 3808 defer nc.Close() 3809 3810 // Now load up some messages. 3811 toSend := 25 3812 for i := 0; i < toSend; i++ { 3813 sendStreamMsg(t, nc, sendSubj, "Hello World!") 3814 } 3815 state := mset.state() 3816 if state.Msgs != uint64(toSend) { 3817 t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs) 3818 } 3819 3820 sub, _ := nc.SubscribeSync(nats.NewInbox()) 3821 defer sub.Unsubscribe() 3822 nc.Flush() 3823 3824 o, err := mset.addConsumer(&ConsumerConfig{ 3825 Durable: "TO", 3826 DeliverSubject: sub.Subject, 3827 AckPolicy: AckExplicit, 3828 AckWait: 100 * time.Millisecond, 3829 }) 3830 require_NoError(t, err) 3831 defer o.delete() 3832 3833 checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error { 3834 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend { 3835 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend) 3836 } 3837 return nil 3838 }) 3839 3840 // Capture port since it was dynamic. 3841 u, _ := url.Parse(s.ClientURL()) 3842 port, _ := strconv.Atoi(u.Port()) 3843 sd := s.JetStreamConfig().StoreDir 3844 3845 // Stop current 3846 s.Shutdown() 3847 3848 // Restart. 3849 s = RunJetStreamServerOnPort(port, sd) 3850 defer s.Shutdown() 3851 3852 // Don't wait for reconnect from old client. 3853 nc = clientConnectToServer(t, s) 3854 defer nc.Close() 3855 3856 sub, _ = nc.SubscribeSync(sub.Subject) 3857 defer sub.Unsubscribe() 3858 3859 checkFor(t, time.Second, 50*time.Millisecond, func() error { 3860 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend { 3861 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend) 3862 } 3863 return nil 3864 }) 3865 } 3866 3867 func TestJetStreamSnapshots(t *testing.T) { 3868 s := RunBasicJetStreamServer(t) 3869 defer s.Shutdown() 3870 3871 mname := "MY-STREAM" 3872 subjects := []string{"foo", "bar", "baz"} 3873 cfg := StreamConfig{ 3874 Name: mname, 3875 Storage: FileStorage, 3876 Subjects: subjects, 3877 MaxMsgs: 1000, 3878 } 3879 3880 acc := s.GlobalAccount() 3881 mset, err := acc.addStream(&cfg) 3882 if err != nil { 3883 t.Fatalf("Unexpected error adding stream: %v", err) 3884 } 3885 3886 nc := clientConnectToServer(t, s) 3887 defer nc.Close() 3888 3889 // Make sure we send some as floor. 3890 toSend := rand.Intn(200) + 22 3891 for i := 1; i <= toSend; i++ { 3892 msg := fmt.Sprintf("Hello World %d", i) 3893 subj := subjects[rand.Intn(len(subjects))] 3894 sendStreamMsg(t, nc, subj, msg) 3895 } 3896 3897 // Create up to 10 consumers. 3898 numConsumers := rand.Intn(10) + 1 3899 var obs []obsi 3900 for i := 1; i <= numConsumers; i++ { 3901 cname := fmt.Sprintf("WQ-%d", i) 3902 o, err := mset.addConsumer(workerModeConfig(cname)) 3903 if err != nil { 3904 t.Fatalf("Unexpected error: %v", err) 3905 } 3906 // Now grab some messages. 3907 toReceive := rand.Intn(toSend/2) + 1 3908 for r := 0; r < toReceive; r++ { 3909 resp, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second) 3910 if err != nil { 3911 t.Fatalf("Unexpected error: %v", err) 3912 } 3913 if resp != nil { 3914 resp.Respond(nil) 3915 } 3916 } 3917 obs = append(obs, obsi{o.config(), toReceive}) 3918 } 3919 nc.Flush() 3920 3921 // Snapshot state of the stream and consumers. 3922 info := info{mset.config(), mset.state(), obs} 3923 3924 sr, err := mset.snapshot(5*time.Second, false, true) 3925 if err != nil { 3926 t.Fatalf("Error getting snapshot: %v", err) 3927 } 3928 zr := sr.Reader 3929 snapshot, err := io.ReadAll(zr) 3930 if err != nil { 3931 t.Fatalf("Error reading snapshot") 3932 } 3933 // Try to restore from snapshot with current stream present, should error. 3934 r := bytes.NewReader(snapshot) 3935 if _, err := acc.RestoreStream(&info.cfg, r); err == nil { 3936 t.Fatalf("Expected an error trying to restore existing stream") 3937 } else if !strings.Contains(err.Error(), "name already in use") { 3938 t.Fatalf("Incorrect error received: %v", err) 3939 } 3940 // Now delete so we can restore. 3941 pusage := acc.JetStreamUsage() 3942 mset.delete() 3943 r.Reset(snapshot) 3944 3945 mset, err = acc.RestoreStream(&info.cfg, r) 3946 require_NoError(t, err) 3947 // Now compare to make sure they are equal. 3948 if nusage := acc.JetStreamUsage(); !reflect.DeepEqual(nusage, pusage) { 3949 t.Fatalf("Usage does not match after restore: %+v vs %+v", nusage, pusage) 3950 } 3951 if state := mset.state(); !reflect.DeepEqual(state, info.state) { 3952 t.Fatalf("State does not match: %+v vs %+v", state, info.state) 3953 } 3954 if cfg := mset.config(); !reflect.DeepEqual(cfg, info.cfg) { 3955 t.Fatalf("Configs do not match: %+v vs %+v", cfg, info.cfg) 3956 } 3957 // Consumers. 3958 if mset.numConsumers() != len(info.obs) { 3959 t.Fatalf("Number of consumers do not match: %d vs %d", mset.numConsumers(), len(info.obs)) 3960 } 3961 for _, oi := range info.obs { 3962 if o := mset.lookupConsumer(oi.cfg.Durable); o != nil { 3963 if uint64(oi.ack+1) != o.nextSeq() { 3964 t.Fatalf("[%v] Consumer next seq is not correct: %d vs %d", o.String(), oi.ack+1, o.nextSeq()) 3965 } 3966 } else { 3967 t.Fatalf("Expected to get an consumer") 3968 } 3969 } 3970 3971 // Now try restoring to a different 3972 s2 := RunBasicJetStreamServer(t) 3973 defer s2.Shutdown() 3974 3975 acc = s2.GlobalAccount() 3976 r.Reset(snapshot) 3977 mset, err = acc.RestoreStream(&info.cfg, r) 3978 require_NoError(t, err) 3979 3980 o := mset.lookupConsumer("WQ-1") 3981 if o == nil { 3982 t.Fatalf("Could not lookup consumer") 3983 } 3984 3985 nc2 := clientConnectToServer(t, s2) 3986 defer nc2.Close() 3987 3988 // Make sure we can read messages. 3989 if _, err := nc2.Request(o.requestNextMsgSubject(), nil, 5*time.Second); err != nil { 3990 t.Fatalf("Unexpected error getting next message: %v", err) 3991 } 3992 } 3993 3994 func TestJetStreamSnapshotsAPI(t *testing.T) { 3995 lopts := DefaultTestOptions 3996 lopts.ServerName = "LS" 3997 lopts.Port = -1 3998 lopts.LeafNode.Host = lopts.Host 3999 lopts.LeafNode.Port = -1 4000 4001 ls := RunServer(&lopts) 4002 defer ls.Shutdown() 4003 4004 opts := DefaultTestOptions 4005 opts.ServerName = "S" 4006 opts.Port = -1 4007 tdir := t.TempDir() 4008 opts.JetStream = true 4009 opts.JetStreamDomain = "domain" 4010 opts.StoreDir = tdir 4011 maxStore := int64(1024 * 1024 * 1024) 4012 opts.maxStoreSet = true 4013 opts.JetStreamMaxStore = maxStore 4014 rurl, _ := url.Parse(fmt.Sprintf("nats-leaf://%s:%d", lopts.LeafNode.Host, lopts.LeafNode.Port)) 4015 opts.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{rurl}}} 4016 4017 s := RunServer(&opts) 4018 defer s.Shutdown() 4019 4020 checkLeafNodeConnected(t, s) 4021 4022 mname := "MY-STREAM" 4023 subjects := []string{"foo", "bar", "baz"} 4024 cfg := StreamConfig{ 4025 Name: mname, 4026 Storage: FileStorage, 4027 Subjects: subjects, 4028 MaxMsgs: 1000, 4029 } 4030 4031 acc := s.GlobalAccount() 4032 mset, err := acc.addStreamWithStore(&cfg, &FileStoreConfig{BlockSize: 128}) 4033 if err != nil { 4034 t.Fatalf("Unexpected error adding stream: %v", err) 4035 } 4036 4037 nc := clientConnectToServer(t, s) 4038 defer nc.Close() 4039 4040 toSend := rand.Intn(100) + 1 4041 for i := 1; i <= toSend; i++ { 4042 msg := fmt.Sprintf("Hello World %d", i) 4043 subj := subjects[rand.Intn(len(subjects))] 4044 sendStreamMsg(t, nc, subj, msg) 4045 } 4046 4047 o, err := mset.addConsumer(workerModeConfig("WQ")) 4048 require_NoError(t, err) 4049 // Now grab some messages. 4050 toReceive := rand.Intn(toSend) + 1 4051 for r := 0; r < toReceive; r++ { 4052 resp, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second) 4053 if err != nil { 4054 t.Fatalf("Unexpected error: %v", err) 4055 } 4056 if resp != nil { 4057 resp.Respond(nil) 4058 } 4059 } 4060 4061 // Make sure we get proper error for non-existent request, streams,etc, 4062 rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamSnapshotT, "foo"), nil, time.Second) 4063 if err != nil { 4064 t.Fatalf("Unexpected error on snapshot request: %v", err) 4065 } 4066 var resp JSApiStreamSnapshotResponse 4067 json.Unmarshal(rmsg.Data, &resp) 4068 if resp.Error == nil || resp.Error.Code != 400 || resp.Error.Description != "bad request" { 4069 t.Fatalf("Did not get correct error response: %+v", resp.Error) 4070 } 4071 4072 sreq := &JSApiStreamSnapshotRequest{} 4073 req, _ := json.Marshal(sreq) 4074 rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamSnapshotT, "foo"), req, time.Second) 4075 if err != nil { 4076 t.Fatalf("Unexpected error on snapshot request: %v", err) 4077 } 4078 json.Unmarshal(rmsg.Data, &resp) 4079 if resp.Error == nil || resp.Error.Code != 404 || resp.Error.Description != "stream not found" { 4080 t.Fatalf("Did not get correct error response: %+v", resp.Error) 4081 } 4082 4083 rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamSnapshotT, mname), req, time.Second) 4084 if err != nil { 4085 t.Fatalf("Unexpected error on snapshot request: %v", err) 4086 } 4087 json.Unmarshal(rmsg.Data, &resp) 4088 if resp.Error == nil || resp.Error.Code != 400 || resp.Error.Description != "deliver subject not valid" { 4089 t.Fatalf("Did not get correct error response: %+v", resp.Error) 4090 } 4091 4092 // Set delivery subject, do not subscribe yet. Want this to be an ok pattern. 4093 sreq.DeliverSubject = nats.NewInbox() 4094 // Just for test, usually left alone. 4095 sreq.ChunkSize = 1024 4096 req, _ = json.Marshal(sreq) 4097 rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamSnapshotT, mname), req, time.Second) 4098 if err != nil { 4099 t.Fatalf("Unexpected error on snapshot request: %v", err) 4100 } 4101 resp.Error = nil 4102 json.Unmarshal(rmsg.Data, &resp) 4103 if resp.Error != nil { 4104 t.Fatalf("Did not get correct error response: %+v", resp.Error) 4105 } 4106 // Check that we have the config and the state. 4107 if resp.Config == nil { 4108 t.Fatalf("Expected a stream config in the response, got %+v\n", resp) 4109 } 4110 if resp.State == nil { 4111 t.Fatalf("Expected a stream state in the response, got %+v\n", resp) 4112 } 4113 4114 // Grab state for comparison. 4115 state := *resp.State 4116 config := *resp.Config 4117 4118 // Setup to process snapshot chunks. 4119 var snapshot []byte 4120 done := make(chan bool) 4121 4122 sub, _ := nc.Subscribe(sreq.DeliverSubject, func(m *nats.Msg) { 4123 // EOF 4124 if len(m.Data) == 0 { 4125 done <- true 4126 return 4127 } 4128 // Could be writing to a file here too. 4129 snapshot = append(snapshot, m.Data...) 4130 // Flow ack 4131 m.Respond(nil) 4132 }) 4133 defer sub.Unsubscribe() 4134 4135 // Wait to receive the snapshot. 4136 select { 4137 case <-done: 4138 case <-time.After(5 * time.Second): 4139 t.Fatalf("Did not receive our snapshot in time") 4140 } 4141 4142 // Now make sure this snapshot is legit. 4143 var rresp JSApiStreamRestoreResponse 4144 rreq := &JSApiStreamRestoreRequest{ 4145 Config: config, 4146 State: state, 4147 } 4148 req, _ = json.Marshal(rreq) 4149 4150 // Make sure we get an error since stream still exists. 4151 rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamRestoreT, mname), req, time.Second) 4152 if err != nil { 4153 t.Fatalf("Unexpected error on snapshot request: %v", err) 4154 } 4155 json.Unmarshal(rmsg.Data, &rresp) 4156 if !IsNatsErr(rresp.Error, JSStreamNameExistRestoreFailedErr) { 4157 t.Fatalf("Did not get correct error response: %+v", rresp.Error) 4158 } 4159 4160 // Delete this stream. 4161 mset.delete() 4162 4163 // Sending no request message will error now. 4164 rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamRestoreT, mname), nil, time.Second) 4165 if err != nil { 4166 t.Fatalf("Unexpected error on snapshot request: %v", err) 4167 } 4168 // Make sure to clear. 4169 rresp.Error = nil 4170 json.Unmarshal(rmsg.Data, &rresp) 4171 if rresp.Error == nil || rresp.Error.Code != 400 || rresp.Error.Description != "bad request" { 4172 t.Fatalf("Did not get correct error response: %+v", rresp.Error) 4173 } 4174 4175 // This should work. 4176 rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamRestoreT, mname), req, time.Second) 4177 if err != nil { 4178 t.Fatalf("Unexpected error on snapshot request: %v", err) 4179 } 4180 // Make sure to clear. 4181 rresp.Error = nil 4182 json.Unmarshal(rmsg.Data, &rresp) 4183 if rresp.Error != nil { 4184 t.Fatalf("Got an unexpected error response: %+v", rresp.Error) 4185 } 4186 4187 // Can be any size message. 4188 var chunk [512]byte 4189 for r := bytes.NewReader(snapshot); ; { 4190 n, err := r.Read(chunk[:]) 4191 if err != nil { 4192 break 4193 } 4194 nc.Request(rresp.DeliverSubject, chunk[:n], time.Second) 4195 } 4196 nc.Request(rresp.DeliverSubject, nil, time.Second) 4197 4198 mset, err = acc.lookupStream(mname) 4199 if err != nil { 4200 t.Fatalf("Expected to find a stream for %q", mname) 4201 } 4202 if !reflect.DeepEqual(mset.state(), state) { 4203 t.Fatalf("Did not match states, %+v vs %+v", mset.state(), state) 4204 } 4205 4206 // Now ask that the stream be checked first. 4207 sreq.ChunkSize = 0 4208 sreq.CheckMsgs = true 4209 snapshot = snapshot[:0] 4210 4211 req, _ = json.Marshal(sreq) 4212 if _, err = nc.Request(fmt.Sprintf(JSApiStreamSnapshotT, mname), req, 5*time.Second); err != nil { 4213 t.Fatalf("Unexpected error on snapshot request: %v", err) 4214 } 4215 // Wait to receive the snapshot. 4216 select { 4217 case <-done: 4218 case <-time.After(5 * time.Second): 4219 t.Fatalf("Did not receive our snapshot in time") 4220 } 4221 4222 // Now connect through a cluster server and make sure we can get things to work this way as well. 4223 // This client, connecting to a leaf without shared system account and domain needs to provide the domain explicitly. 4224 nc2 := clientConnectToServer(t, ls) 4225 defer nc2.Close() 4226 // Wait a bit for interest to propagate. 4227 time.Sleep(100 * time.Millisecond) 4228 4229 snapshot = snapshot[:0] 4230 4231 req, _ = json.Marshal(sreq) 4232 rmsg, err = nc2.Request(fmt.Sprintf(strings.ReplaceAll(JSApiStreamSnapshotT, JSApiPrefix, "$JS.domain.API"), mname), req, time.Second) 4233 if err != nil { 4234 t.Fatalf("Unexpected error on snapshot request: %v", err) 4235 } 4236 resp.Error = nil 4237 json.Unmarshal(rmsg.Data, &resp) 4238 if resp.Error != nil { 4239 t.Fatalf("Did not get correct error response: %+v", resp.Error) 4240 } 4241 // Wait to receive the snapshot. 4242 select { 4243 case <-done: 4244 case <-time.After(5 * time.Second): 4245 t.Fatalf("Did not receive our snapshot in time") 4246 } 4247 4248 // Now do a restore through the new client connection. 4249 // Delete this stream first. 4250 mset, err = acc.lookupStream(mname) 4251 if err != nil { 4252 t.Fatalf("Expected to find a stream for %q", mname) 4253 } 4254 state = mset.state() 4255 mset.delete() 4256 4257 rmsg, err = nc2.Request(strings.ReplaceAll(JSApiStreamRestoreT, JSApiPrefix, "$JS.domain.API"), req, time.Second) 4258 if err != nil { 4259 t.Fatalf("Unexpected error on snapshot request: %v", err) 4260 } 4261 // Make sure to clear. 4262 rresp.Error = nil 4263 json.Unmarshal(rmsg.Data, &rresp) 4264 if rresp.Error != nil { 4265 t.Fatalf("Got an unexpected error response: %+v", rresp.Error) 4266 } 4267 4268 // Make sure when we send something without a reply subject the subscription is shutoff. 4269 r := bytes.NewReader(snapshot) 4270 n, _ := r.Read(chunk[:]) 4271 nc2.Publish(rresp.DeliverSubject, chunk[:n]) 4272 nc2.Flush() 4273 n, _ = r.Read(chunk[:]) 4274 if _, err := nc2.Request(rresp.DeliverSubject, chunk[:n], 100*time.Millisecond); err == nil { 4275 t.Fatalf("Expected restore subscription to be closed") 4276 } 4277 4278 req, _ = json.Marshal(rreq) 4279 rmsg, err = nc2.Request(strings.ReplaceAll(JSApiStreamRestoreT, JSApiPrefix, "$JS.domain.API"), req, time.Second) 4280 if err != nil { 4281 t.Fatalf("Unexpected error on snapshot request: %v", err) 4282 } 4283 // Make sure to clear. 4284 rresp.Error = nil 4285 json.Unmarshal(rmsg.Data, &rresp) 4286 if rresp.Error != nil { 4287 t.Fatalf("Got an unexpected error response: %+v", rresp.Error) 4288 } 4289 4290 for r := bytes.NewReader(snapshot); ; { 4291 n, err := r.Read(chunk[:]) 4292 if err != nil { 4293 break 4294 } 4295 // Make sure other side responds to reply subjects for ack flow. Optional. 4296 if _, err := nc2.Request(rresp.DeliverSubject, chunk[:n], time.Second); err != nil { 4297 t.Fatalf("Restore not honoring reply subjects for ack flow") 4298 } 4299 } 4300 4301 // For EOF this will send back stream info or an error. 4302 si, err := nc2.Request(rresp.DeliverSubject, nil, time.Second) 4303 if err != nil { 4304 t.Fatalf("Got an error restoring stream: %v", err) 4305 } 4306 var scResp JSApiStreamCreateResponse 4307 if err := json.Unmarshal(si.Data, &scResp); err != nil { 4308 t.Fatalf("Unexpected error: %v", err) 4309 } 4310 if scResp.Error != nil { 4311 t.Fatalf("Got an unexpected error from EOF on restore: %+v", scResp.Error) 4312 } 4313 4314 if !reflect.DeepEqual(scResp.StreamInfo.State, state) { 4315 t.Fatalf("Did not match states, %+v vs %+v", scResp.StreamInfo.State, state) 4316 } 4317 4318 // Now make sure that if we try to change the name/identity of the stream we get an error. 4319 mset, err = acc.lookupStream(mname) 4320 if err != nil { 4321 t.Fatalf("Expected to find a stream for %q", mname) 4322 } 4323 mset.state() 4324 mset.delete() 4325 4326 rreq.Config.Name = "NEW_STREAM" 4327 req, _ = json.Marshal(rreq) 4328 4329 rmsg, err = nc.Request(fmt.Sprintf(JSApiStreamRestoreT, rreq.Config.Name), req, time.Second) 4330 if err != nil { 4331 t.Fatalf("Unexpected error on snapshot request: %v", err) 4332 } 4333 // Make sure to clear. 4334 rresp.Error = nil 4335 json.Unmarshal(rmsg.Data, &rresp) 4336 // We should not get an error here. 4337 if rresp.Error != nil { 4338 t.Fatalf("Got an unexpected error response: %+v", rresp.Error) 4339 } 4340 for r := bytes.NewReader(snapshot); ; { 4341 n, err := r.Read(chunk[:]) 4342 if err != nil { 4343 break 4344 } 4345 nc.Request(rresp.DeliverSubject, chunk[:n], time.Second) 4346 } 4347 4348 si, err = nc2.Request(rresp.DeliverSubject, nil, time.Second) 4349 require_NoError(t, err) 4350 4351 scResp.Error = nil 4352 if err := json.Unmarshal(si.Data, &scResp); err != nil { 4353 t.Fatalf("Unexpected error: %v", err) 4354 } 4355 if scResp.Error == nil { 4356 t.Fatalf("Expected an error but got none") 4357 } 4358 expect := "names do not match" 4359 if !strings.Contains(scResp.Error.Description, expect) { 4360 t.Fatalf("Expected description of %q, got %q", expect, scResp.Error.Description) 4361 } 4362 } 4363 4364 func TestJetStreamPubAckPerf(t *testing.T) { 4365 // Comment out to run, holding place for now. 4366 t.SkipNow() 4367 4368 s := RunBasicJetStreamServer(t) 4369 defer s.Shutdown() 4370 4371 nc, js := jsClientConnect(t, s) 4372 defer nc.Close() 4373 4374 if _, err := js.AddStream(&nats.StreamConfig{Name: "foo", Storage: nats.MemoryStorage}); err != nil { 4375 t.Fatalf("Unexpected error: %v", err) 4376 } 4377 4378 toSend := 1_000_000 4379 start := time.Now() 4380 for i := 0; i < toSend; i++ { 4381 js.PublishAsync("foo", []byte("OK")) 4382 } 4383 <-js.PublishAsyncComplete() 4384 tt := time.Since(start) 4385 fmt.Printf("time is %v\n", tt) 4386 fmt.Printf("%.0f msgs/sec\n", float64(toSend)/tt.Seconds()) 4387 } 4388 4389 func TestJetStreamPubPerfWithFullStream(t *testing.T) { 4390 // Comment out to run, holding place for now. 4391 t.SkipNow() 4392 4393 s := RunBasicJetStreamServer(t) 4394 defer s.Shutdown() 4395 4396 nc, js := jsClientConnect(t, s) 4397 defer nc.Close() 4398 4399 toSend, msg := 1_000_000, []byte("OK") 4400 4401 _, err := js.AddStream(&nats.StreamConfig{Name: "foo", MaxMsgs: int64(toSend)}) 4402 require_NoError(t, err) 4403 4404 start := time.Now() 4405 for i := 0; i < toSend; i++ { 4406 js.PublishAsync("foo", msg) 4407 } 4408 <-js.PublishAsyncComplete() 4409 tt := time.Since(start) 4410 fmt.Printf("time is %v\n", tt) 4411 fmt.Printf("%.0f msgs/sec\n", float64(toSend)/tt.Seconds()) 4412 4413 // Now do same amount but knowing we are at our limit. 4414 start = time.Now() 4415 for i := 0; i < toSend; i++ { 4416 js.PublishAsync("foo", msg) 4417 } 4418 <-js.PublishAsyncComplete() 4419 tt = time.Since(start) 4420 fmt.Printf("\ntime is %v\n", tt) 4421 fmt.Printf("%.0f msgs/sec\n", float64(toSend)/tt.Seconds()) 4422 } 4423 4424 func TestJetStreamSnapshotsAPIPerf(t *testing.T) { 4425 // Comment out to run, holding place for now. 4426 t.SkipNow() 4427 4428 s := RunBasicJetStreamServer(t) 4429 defer s.Shutdown() 4430 4431 cfg := StreamConfig{ 4432 Name: "snap-perf", 4433 Storage: FileStorage, 4434 } 4435 4436 acc := s.GlobalAccount() 4437 if _, err := acc.addStream(&cfg); err != nil { 4438 t.Fatalf("Unexpected error adding stream: %v", err) 4439 } 4440 4441 nc := clientConnectToServer(t, s) 4442 defer nc.Close() 4443 4444 msg := make([]byte, 128*1024) 4445 // If you don't give gzip some data will spend too much time compressing everything to zero. 4446 crand.Read(msg) 4447 4448 for i := 0; i < 10000; i++ { 4449 nc.Publish("snap-perf", msg) 4450 } 4451 nc.Flush() 4452 4453 sreq := &JSApiStreamSnapshotRequest{DeliverSubject: nats.NewInbox()} 4454 req, _ := json.Marshal(sreq) 4455 rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamSnapshotT, "snap-perf"), req, time.Second) 4456 if err != nil { 4457 t.Fatalf("Unexpected error on snapshot request: %v", err) 4458 } 4459 4460 var resp JSApiStreamSnapshotResponse 4461 json.Unmarshal(rmsg.Data, &resp) 4462 if resp.Error != nil { 4463 t.Fatalf("Did not get correct error response: %+v", resp.Error) 4464 } 4465 4466 done := make(chan bool) 4467 total := 0 4468 sub, _ := nc.Subscribe(sreq.DeliverSubject, func(m *nats.Msg) { 4469 // EOF 4470 if len(m.Data) == 0 { 4471 m.Sub.Unsubscribe() 4472 done <- true 4473 return 4474 } 4475 // We don't do anything with the snapshot, just take 4476 // note of the size. 4477 total += len(m.Data) 4478 // Flow ack 4479 m.Respond(nil) 4480 }) 4481 defer sub.Unsubscribe() 4482 4483 start := time.Now() 4484 // Wait to receive the snapshot. 4485 select { 4486 case <-done: 4487 case <-time.After(30 * time.Second): 4488 t.Fatalf("Did not receive our snapshot in time") 4489 } 4490 td := time.Since(start) 4491 fmt.Printf("Received %d bytes in %v\n", total, td) 4492 fmt.Printf("Rate %.0f MB/s\n", float64(total)/td.Seconds()/(1024*1024)) 4493 } 4494 4495 func TestJetStreamActiveDelivery(t *testing.T) { 4496 cases := []struct { 4497 name string 4498 mconfig *StreamConfig 4499 }{ 4500 {"MemoryStore", &StreamConfig{Name: "ADS", Storage: MemoryStorage, Subjects: []string{"foo.*"}}}, 4501 {"FileStore", &StreamConfig{Name: "ADS", Storage: FileStorage, Subjects: []string{"foo.*"}}}, 4502 } 4503 for _, c := range cases { 4504 t.Run(c.name, func(t *testing.T) { 4505 s := RunBasicJetStreamServer(t) 4506 defer s.Shutdown() 4507 4508 mset, err := s.GlobalAccount().addStream(c.mconfig) 4509 if err != nil { 4510 t.Fatalf("Unexpected error adding stream: %v", err) 4511 } 4512 defer mset.delete() 4513 4514 nc := clientConnectToServer(t, s) 4515 defer nc.Close() 4516 4517 // Now load up some messages. 4518 toSend := 100 4519 sendSubj := "foo.22" 4520 for i := 0; i < toSend; i++ { 4521 sendStreamMsg(t, nc, sendSubj, "Hello World!") 4522 } 4523 state := mset.state() 4524 if state.Msgs != uint64(toSend) { 4525 t.Fatalf("Expected %d messages, got %d", toSend, state.Msgs) 4526 } 4527 4528 o, err := mset.addConsumer(&ConsumerConfig{Durable: "to", DeliverSubject: "d"}) 4529 if err != nil { 4530 t.Fatalf("Expected no error with registered interest, got %v", err) 4531 } 4532 defer o.delete() 4533 4534 // We have no active interest above. So consumer will be considered inactive. Let's subscribe and make sure 4535 // we get the messages instantly. This will test that we hook interest activation correctly. 4536 sub, _ := nc.SubscribeSync("d") 4537 defer sub.Unsubscribe() 4538 nc.Flush() 4539 4540 checkFor(t, 100*time.Millisecond, 10*time.Millisecond, func() error { 4541 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend { 4542 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend) 4543 } 4544 return nil 4545 }) 4546 }) 4547 } 4548 } 4549 4550 func TestJetStreamEphemeralConsumers(t *testing.T) { 4551 cases := []struct { 4552 name string 4553 mconfig *StreamConfig 4554 }{ 4555 {"MemoryStore", &StreamConfig{Name: "EP", Storage: MemoryStorage, Subjects: []string{"foo.*"}}}, 4556 {"FileStore", &StreamConfig{Name: "EP", Storage: FileStorage, Subjects: []string{"foo.*"}}}, 4557 } 4558 for _, c := range cases { 4559 t.Run(c.name, func(t *testing.T) { 4560 s := RunBasicJetStreamServer(t) 4561 defer s.Shutdown() 4562 4563 mset, err := s.GlobalAccount().addStream(c.mconfig) 4564 if err != nil { 4565 t.Fatalf("Unexpected error adding stream: %v", err) 4566 } 4567 defer mset.delete() 4568 4569 nc := clientConnectToServer(t, s) 4570 defer nc.Close() 4571 4572 sub, _ := nc.SubscribeSync(nats.NewInbox()) 4573 defer sub.Unsubscribe() 4574 nc.Flush() 4575 4576 o, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: sub.Subject}) 4577 if err != nil { 4578 t.Fatalf("Unexpected error: %v", err) 4579 } 4580 if !o.isActive() { 4581 t.Fatalf("Expected the consumer to be considered active") 4582 } 4583 if numo := mset.numConsumers(); numo != 1 { 4584 t.Fatalf("Expected number of consumers to be 1, got %d", numo) 4585 } 4586 // Set our delete threshold to something low for testing purposes. 4587 o.setInActiveDeleteThreshold(100 * time.Millisecond) 4588 4589 // Make sure works now. 4590 nc.Request("foo.22", nil, 100*time.Millisecond) 4591 checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error { 4592 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != 1 { 4593 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, 1) 4594 } 4595 return nil 4596 }) 4597 4598 // Now close the subscription, this should trip active state on the ephemeral consumer. 4599 sub.Unsubscribe() 4600 checkFor(t, time.Second, 10*time.Millisecond, func() error { 4601 if o.isActive() { 4602 return fmt.Errorf("Expected the ephemeral consumer to be considered inactive") 4603 } 4604 return nil 4605 }) 4606 // The reason for this still being 1 is that we give some time in case of a reconnect scenario. 4607 // We detect right away on the interest change but we wait for interest to be re-established. 4608 // This is in case server goes away but app is fine, we do not want to recycle those consumers. 4609 if numo := mset.numConsumers(); numo != 1 { 4610 t.Fatalf("Expected number of consumers to be 1, got %d", numo) 4611 } 4612 4613 // We should delete this one after the delete threshold. 4614 checkFor(t, time.Second, 100*time.Millisecond, func() error { 4615 if numo := mset.numConsumers(); numo != 0 { 4616 return fmt.Errorf("Expected number of consumers to be 0, got %d", numo) 4617 } 4618 return nil 4619 }) 4620 }) 4621 } 4622 } 4623 4624 func TestJetStreamConsumerReconnect(t *testing.T) { 4625 cases := []struct { 4626 name string 4627 mconfig *StreamConfig 4628 }{ 4629 {"MemoryStore", &StreamConfig{Name: "ET", Storage: MemoryStorage, Subjects: []string{"foo.*"}}}, 4630 {"FileStore", &StreamConfig{Name: "ET", Storage: FileStorage, Subjects: []string{"foo.*"}}}, 4631 } 4632 for _, c := range cases { 4633 t.Run(c.name, func(t *testing.T) { 4634 s := RunBasicJetStreamServer(t) 4635 defer s.Shutdown() 4636 4637 mset, err := s.GlobalAccount().addStream(c.mconfig) 4638 if err != nil { 4639 t.Fatalf("Unexpected error adding stream: %v", err) 4640 } 4641 defer mset.delete() 4642 4643 nc := clientConnectToServer(t, s) 4644 defer nc.Close() 4645 4646 sub, _ := nc.SubscribeSync(nats.NewInbox()) 4647 defer sub.Unsubscribe() 4648 nc.Flush() 4649 4650 // Capture the subscription. 4651 delivery := sub.Subject 4652 4653 o, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: delivery, AckPolicy: AckExplicit}) 4654 if err != nil { 4655 t.Fatalf("Unexpected error: %v", err) 4656 } 4657 if !o.isActive() { 4658 t.Fatalf("Expected the consumer to be considered active") 4659 } 4660 if numo := mset.numConsumers(); numo != 1 { 4661 t.Fatalf("Expected number of consumers to be 1, got %d", numo) 4662 } 4663 4664 // We will simulate reconnect by unsubscribing on one connection and forming 4665 // the same on another. Once we have cluster tests we will do more testing on 4666 // reconnect scenarios. 4667 getMsg := func(seqno int) *nats.Msg { 4668 t.Helper() 4669 m, err := sub.NextMsg(time.Second) 4670 if err != nil { 4671 t.Fatalf("Unexpected error for %d: %v", seqno, err) 4672 } 4673 if seq := o.seqFromReply(m.Reply); seq != uint64(seqno) { 4674 t.Fatalf("Expected sequence of %d , got %d", seqno, seq) 4675 } 4676 m.Respond(nil) 4677 return m 4678 } 4679 4680 sendMsg := func() { 4681 t.Helper() 4682 if err := nc.Publish("foo.22", []byte("OK!")); err != nil { 4683 return 4684 } 4685 } 4686 4687 checkForInActive := func() { 4688 checkFor(t, 250*time.Millisecond, 50*time.Millisecond, func() error { 4689 if o.isActive() { 4690 return fmt.Errorf("Consumer is still active") 4691 } 4692 return nil 4693 }) 4694 } 4695 4696 // Send and Pull first message. 4697 sendMsg() // 1 4698 getMsg(1) 4699 // Cancel first one. 4700 sub.Unsubscribe() 4701 // Re-establish new sub on same subject. 4702 sub, _ = nc.SubscribeSync(delivery) 4703 nc.Flush() 4704 4705 // We should be getting 2 here. 4706 sendMsg() // 2 4707 getMsg(2) 4708 4709 sub.Unsubscribe() 4710 checkForInActive() 4711 4712 // send 3-10 4713 for i := 0; i <= 7; i++ { 4714 sendMsg() 4715 } 4716 // Make sure they are all queued up with no interest. 4717 nc.Flush() 4718 4719 // Restablish again. 4720 sub, _ = nc.SubscribeSync(delivery) 4721 nc.Flush() 4722 4723 // We should be getting 3-10 here. 4724 for i := 3; i <= 10; i++ { 4725 getMsg(i) 4726 } 4727 }) 4728 } 4729 } 4730 4731 func TestJetStreamDurableConsumerReconnect(t *testing.T) { 4732 cases := []struct { 4733 name string 4734 mconfig *StreamConfig 4735 }{ 4736 {"MemoryStore", &StreamConfig{Name: "DT", Storage: MemoryStorage, Subjects: []string{"foo.*"}}}, 4737 {"FileStore", &StreamConfig{Name: "DT", Storage: FileStorage, Subjects: []string{"foo.*"}}}, 4738 } 4739 for _, c := range cases { 4740 t.Run(c.name, func(t *testing.T) { 4741 s := RunBasicJetStreamServer(t) 4742 defer s.Shutdown() 4743 4744 mset, err := s.GlobalAccount().addStream(c.mconfig) 4745 if err != nil { 4746 t.Fatalf("Unexpected error adding stream: %v", err) 4747 } 4748 defer mset.delete() 4749 4750 nc := clientConnectToServer(t, s) 4751 defer nc.Close() 4752 4753 dname := "d22" 4754 subj1 := nats.NewInbox() 4755 4756 o, err := mset.addConsumer(&ConsumerConfig{ 4757 Durable: dname, 4758 DeliverSubject: subj1, 4759 AckPolicy: AckExplicit, 4760 AckWait: 50 * time.Millisecond}) 4761 if err != nil { 4762 t.Fatalf("Unexpected error: %v", err) 4763 } 4764 sendMsg := func() { 4765 t.Helper() 4766 if err := nc.Publish("foo.22", []byte("OK!")); err != nil { 4767 return 4768 } 4769 } 4770 4771 // Send 10 msgs 4772 toSend := 10 4773 for i := 0; i < toSend; i++ { 4774 sendMsg() 4775 } 4776 4777 sub, _ := nc.SubscribeSync(subj1) 4778 defer sub.Unsubscribe() 4779 4780 checkFor(t, 500*time.Millisecond, 10*time.Millisecond, func() error { 4781 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend { 4782 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend) 4783 } 4784 return nil 4785 }) 4786 4787 getMsg := func(seqno int) *nats.Msg { 4788 t.Helper() 4789 m, err := sub.NextMsg(time.Second) 4790 if err != nil { 4791 t.Fatalf("Unexpected error: %v", err) 4792 } 4793 if seq := o.streamSeqFromReply(m.Reply); seq != uint64(seqno) { 4794 t.Fatalf("Expected sequence of %d , got %d", seqno, seq) 4795 } 4796 m.Respond(nil) 4797 return m 4798 } 4799 4800 // Ack first half 4801 for i := 1; i <= toSend/2; i++ { 4802 m := getMsg(i) 4803 m.Respond(nil) 4804 } 4805 4806 // Now unsubscribe and wait to become inactive 4807 sub.Unsubscribe() 4808 checkFor(t, 250*time.Millisecond, 50*time.Millisecond, func() error { 4809 if o.isActive() { 4810 return fmt.Errorf("Consumer is still active") 4811 } 4812 return nil 4813 }) 4814 4815 // Now we should be able to replace the delivery subject. 4816 subj2 := nats.NewInbox() 4817 sub, _ = nc.SubscribeSync(subj2) 4818 defer sub.Unsubscribe() 4819 nc.Flush() 4820 4821 o, err = mset.addConsumer(&ConsumerConfig{ 4822 Durable: dname, 4823 DeliverSubject: subj2, 4824 AckPolicy: AckExplicit, 4825 AckWait: 50 * time.Millisecond}) 4826 if err != nil { 4827 t.Fatalf("Unexpected error trying to add a new durable consumer: %v", err) 4828 } 4829 4830 // We should get the remaining messages here. 4831 for i := toSend/2 + 1; i <= toSend; i++ { 4832 m := getMsg(i) 4833 m.Respond(nil) 4834 } 4835 }) 4836 } 4837 } 4838 4839 func TestJetStreamDurableConsumerReconnectWithOnlyPending(t *testing.T) { 4840 cases := []struct { 4841 name string 4842 mconfig *StreamConfig 4843 }{ 4844 {"MemoryStore", &StreamConfig{Name: "DT", Storage: MemoryStorage, Subjects: []string{"foo.*"}}}, 4845 {"FileStore", &StreamConfig{Name: "DT", Storage: FileStorage, Subjects: []string{"foo.*"}}}, 4846 } 4847 for _, c := range cases { 4848 t.Run(c.name, func(t *testing.T) { 4849 s := RunBasicJetStreamServer(t) 4850 defer s.Shutdown() 4851 4852 mset, err := s.GlobalAccount().addStream(c.mconfig) 4853 if err != nil { 4854 t.Fatalf("Unexpected error adding stream: %v", err) 4855 } 4856 defer mset.delete() 4857 4858 nc := clientConnectToServer(t, s) 4859 defer nc.Close() 4860 4861 dname := "d22" 4862 subj1 := nats.NewInbox() 4863 4864 o, err := mset.addConsumer(&ConsumerConfig{ 4865 Durable: dname, 4866 DeliverSubject: subj1, 4867 AckPolicy: AckExplicit, 4868 AckWait: 25 * time.Millisecond}) 4869 if err != nil { 4870 t.Fatalf("Unexpected error: %v", err) 4871 } 4872 4873 sendMsg := func(payload string) { 4874 t.Helper() 4875 if err := nc.Publish("foo.22", []byte(payload)); err != nil { 4876 return 4877 } 4878 } 4879 4880 sendMsg("1") 4881 4882 sub, _ := nc.SubscribeSync(subj1) 4883 defer sub.Unsubscribe() 4884 4885 checkFor(t, 500*time.Millisecond, 10*time.Millisecond, func() error { 4886 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != 1 { 4887 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, 1) 4888 } 4889 return nil 4890 }) 4891 4892 // Now unsubscribe and wait to become inactive 4893 sub.Unsubscribe() 4894 checkFor(t, 250*time.Millisecond, 50*time.Millisecond, func() error { 4895 if o.isActive() { 4896 return fmt.Errorf("Consumer is still active") 4897 } 4898 return nil 4899 }) 4900 4901 // Send the second message while delivery subscriber is not running 4902 sendMsg("2") 4903 4904 // Now we should be able to replace the delivery subject. 4905 subj2 := nats.NewInbox() 4906 o, err = mset.addConsumer(&ConsumerConfig{ 4907 Durable: dname, 4908 DeliverSubject: subj2, 4909 AckPolicy: AckExplicit, 4910 AckWait: 25 * time.Millisecond}) 4911 if err != nil { 4912 t.Fatalf("Unexpected error trying to add a new durable consumer: %v", err) 4913 } 4914 sub, _ = nc.SubscribeSync(subj2) 4915 defer sub.Unsubscribe() 4916 nc.Flush() 4917 4918 // We should get msg "1" and "2" delivered. They will be reversed. 4919 for i := 0; i < 2; i++ { 4920 msg, err := sub.NextMsg(500 * time.Millisecond) 4921 if err != nil { 4922 t.Fatalf("Unexpected error: %v", err) 4923 } 4924 sseq, _, dc, _, _ := replyInfo(msg.Reply) 4925 if sseq == 1 && dc == 1 { 4926 t.Fatalf("Expected a redelivery count greater then 1 for sseq 1, got %d", dc) 4927 } 4928 if sseq != 1 && sseq != 2 { 4929 t.Fatalf("Expected stream sequence of 1 or 2 but got %d", sseq) 4930 } 4931 } 4932 }) 4933 } 4934 } 4935 4936 func TestJetStreamDurableFilteredSubjectConsumerReconnect(t *testing.T) { 4937 cases := []struct { 4938 name string 4939 mconfig *StreamConfig 4940 }{ 4941 {"MemoryStore", &StreamConfig{Name: "DT", Storage: MemoryStorage, Subjects: []string{"foo.*"}}}, 4942 {"FileStore", &StreamConfig{Name: "DT", Storage: FileStorage, Subjects: []string{"foo.*"}}}, 4943 } 4944 for _, c := range cases { 4945 t.Run(c.name, func(t *testing.T) { 4946 s := RunBasicJetStreamServer(t) 4947 defer s.Shutdown() 4948 4949 mset, err := s.GlobalAccount().addStream(c.mconfig) 4950 if err != nil { 4951 t.Fatalf("Unexpected error adding stream: %v", err) 4952 } 4953 defer mset.delete() 4954 4955 nc, js := jsClientConnect(t, s) 4956 defer nc.Close() 4957 4958 sendMsgs := func(toSend int) { 4959 for i := 0; i < toSend; i++ { 4960 var subj string 4961 if i%2 == 0 { 4962 subj = "foo.AA" 4963 } else { 4964 subj = "foo.ZZ" 4965 } 4966 _, err := js.Publish(subj, []byte("OK!")) 4967 require_NoError(t, err) 4968 } 4969 } 4970 4971 // Send 50 msgs 4972 toSend := 50 4973 sendMsgs(toSend) 4974 4975 dname := "d33" 4976 dsubj := nats.NewInbox() 4977 4978 // Now create an consumer for foo.AA, only requesting the last one. 4979 _, err = mset.addConsumer(&ConsumerConfig{ 4980 Durable: dname, 4981 DeliverSubject: dsubj, 4982 FilterSubject: "foo.AA", 4983 DeliverPolicy: DeliverLast, 4984 AckPolicy: AckExplicit, 4985 AckWait: 100 * time.Millisecond, 4986 }) 4987 if err != nil { 4988 t.Fatalf("Unexpected error: %v", err) 4989 } 4990 sub, _ := nc.SubscribeSync(dsubj) 4991 defer sub.Unsubscribe() 4992 4993 // Used to calculate difference between store seq and delivery seq. 4994 storeBaseOff := 47 4995 4996 getMsg := func(seq int) *nats.Msg { 4997 t.Helper() 4998 sseq := 2*seq + storeBaseOff 4999 m, err := sub.NextMsg(time.Second) 5000 if err != nil { 5001 t.Fatalf("Unexpected error: %v", err) 5002 } 5003 rsseq, roseq, dcount, _, _ := replyInfo(m.Reply) 5004 if roseq != uint64(seq) { 5005 t.Fatalf("Expected consumer sequence of %d , got %d", seq, roseq) 5006 } 5007 if rsseq != uint64(sseq) { 5008 t.Fatalf("Expected stream sequence of %d , got %d", sseq, rsseq) 5009 } 5010 if dcount != 1 { 5011 t.Fatalf("Expected message to not be marked as redelivered") 5012 } 5013 return m 5014 } 5015 5016 getRedeliveredMsg := func(seq int) *nats.Msg { 5017 t.Helper() 5018 m, err := sub.NextMsg(time.Second) 5019 if err != nil { 5020 t.Fatalf("Unexpected error: %v", err) 5021 } 5022 _, roseq, dcount, _, _ := replyInfo(m.Reply) 5023 if roseq != uint64(seq) { 5024 t.Fatalf("Expected consumer sequence of %d , got %d", seq, roseq) 5025 } 5026 if dcount < 2 { 5027 t.Fatalf("Expected message to be marked as redelivered") 5028 } 5029 // Ack this message. 5030 m.Respond(nil) 5031 return m 5032 } 5033 5034 // All consumers start at 1 and always have increasing sequence numbers. 5035 m := getMsg(1) 5036 m.Respond(nil) 5037 5038 // Now send 50 more, so 100 total, 26 (last + 50/2) for this consumer. 5039 sendMsgs(toSend) 5040 5041 state := mset.state() 5042 if state.Msgs != uint64(toSend*2) { 5043 t.Fatalf("Expected %d messages, got %d", toSend*2, state.Msgs) 5044 } 5045 5046 // For tracking next expected. 5047 nextSeq := 2 5048 noAcks := 0 5049 for i := 0; i < toSend/2; i++ { 5050 m := getMsg(nextSeq) 5051 if i%2 == 0 { 5052 m.Respond(nil) // Ack evens. 5053 } else { 5054 noAcks++ 5055 } 5056 nextSeq++ 5057 } 5058 5059 // We should now get those redelivered. 5060 for i := 0; i < noAcks; i++ { 5061 getRedeliveredMsg(nextSeq) 5062 nextSeq++ 5063 } 5064 5065 // Now send 50 more. 5066 sendMsgs(toSend) 5067 5068 storeBaseOff -= noAcks * 2 5069 5070 for i := 0; i < toSend/2; i++ { 5071 m := getMsg(nextSeq) 5072 m.Respond(nil) 5073 nextSeq++ 5074 } 5075 }) 5076 } 5077 } 5078 5079 func TestJetStreamConsumerInactiveNoDeadlock(t *testing.T) { 5080 cases := []struct { 5081 name string 5082 mconfig *StreamConfig 5083 }{ 5084 {"MemoryStore", &StreamConfig{Name: "DC", Storage: MemoryStorage}}, 5085 {"FileStore", &StreamConfig{Name: "DC", Storage: FileStorage}}, 5086 } 5087 for _, c := range cases { 5088 t.Run(c.name, func(t *testing.T) { 5089 s := RunBasicJetStreamServer(t) 5090 defer s.Shutdown() 5091 5092 mset, err := s.GlobalAccount().addStream(c.mconfig) 5093 if err != nil { 5094 t.Fatalf("Unexpected error adding stream: %v", err) 5095 } 5096 defer mset.delete() 5097 5098 nc, js := jsClientConnect(t, s) 5099 defer nc.Close() 5100 5101 // Send lots of msgs and have them queued up. 5102 for i := 0; i < 10000; i++ { 5103 js.Publish("DC", []byte("OK!")) 5104 } 5105 5106 if state := mset.state(); state.Msgs != 10000 { 5107 t.Fatalf("Expected %d messages, got %d", 10000, state.Msgs) 5108 } 5109 5110 sub, _ := nc.SubscribeSync(nats.NewInbox()) 5111 sub.SetPendingLimits(-1, -1) 5112 defer sub.Unsubscribe() 5113 nc.Flush() 5114 5115 o, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: sub.Subject}) 5116 if err != nil { 5117 t.Fatalf("Unexpected error: %v", err) 5118 } 5119 defer o.delete() 5120 5121 for i := 0; i < 10; i++ { 5122 if _, err := sub.NextMsg(time.Second); err != nil { 5123 t.Fatalf("Unexpected error: %v", err) 5124 } 5125 } 5126 // Force us to become inactive but we want to make sure we do not lock up 5127 // the internal sendq. 5128 sub.Unsubscribe() 5129 nc.Flush() 5130 }) 5131 } 5132 } 5133 5134 func TestJetStreamMetadata(t *testing.T) { 5135 cases := []struct { 5136 name string 5137 mconfig *StreamConfig 5138 }{ 5139 {"MemoryStore", &StreamConfig{Name: "DC", Retention: WorkQueuePolicy, Storage: MemoryStorage}}, 5140 {"FileStore", &StreamConfig{Name: "DC", Retention: WorkQueuePolicy, Storage: FileStorage}}, 5141 } 5142 5143 for _, c := range cases { 5144 t.Run(c.name, func(t *testing.T) { 5145 s := RunBasicJetStreamServer(t) 5146 defer s.Shutdown() 5147 5148 mset, err := s.GlobalAccount().addStream(c.mconfig) 5149 if err != nil { 5150 t.Fatalf("Unexpected error adding stream: %v", err) 5151 } 5152 defer mset.delete() 5153 5154 nc := clientConnectToServer(t, s) 5155 defer nc.Close() 5156 5157 for i := 0; i < 10; i++ { 5158 nc.Publish("DC", []byte("OK!")) 5159 nc.Flush() 5160 time.Sleep(time.Millisecond) 5161 } 5162 5163 if state := mset.state(); state.Msgs != 10 { 5164 t.Fatalf("Expected %d messages, got %d", 10, state.Msgs) 5165 } 5166 5167 o, err := mset.addConsumer(workerModeConfig("WQ")) 5168 if err != nil { 5169 t.Fatalf("Expected no error with registered interest, got %v", err) 5170 } 5171 defer o.delete() 5172 5173 for i := uint64(1); i <= 10; i++ { 5174 m, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second) 5175 if err != nil { 5176 t.Fatalf("Unexpected error: %v", err) 5177 } 5178 5179 sseq, dseq, dcount, ts, _ := replyInfo(m.Reply) 5180 5181 mreq := &JSApiMsgGetRequest{Seq: sseq} 5182 req, err := json.Marshal(mreq) 5183 if err != nil { 5184 t.Fatalf("Unexpected error: %v", err) 5185 } 5186 // Load the original message from the stream to verify ReplyInfo ts against stored message 5187 smsgj, err := nc.Request(fmt.Sprintf(JSApiMsgGetT, c.mconfig.Name), req, time.Second) 5188 if err != nil { 5189 t.Fatalf("Could not retrieve stream message: %v", err) 5190 } 5191 5192 var resp JSApiMsgGetResponse 5193 err = json.Unmarshal(smsgj.Data, &resp) 5194 if err != nil { 5195 t.Fatalf("Could not parse stream message: %v", err) 5196 } 5197 if resp.Message == nil || resp.Error != nil { 5198 t.Fatalf("Did not receive correct response") 5199 } 5200 smsg := resp.Message 5201 if ts != smsg.Time.UnixNano() { 5202 t.Fatalf("Wrong timestamp in ReplyInfo for msg %d, expected %v got %v", i, ts, smsg.Time.UnixNano()) 5203 } 5204 if sseq != i { 5205 t.Fatalf("Expected set sequence of %d, got %d", i, sseq) 5206 } 5207 if dseq != i { 5208 t.Fatalf("Expected delivery sequence of %d, got %d", i, dseq) 5209 } 5210 if dcount != 1 { 5211 t.Fatalf("Expected delivery count to be 1, got %d", dcount) 5212 } 5213 m.Respond(AckAck) 5214 } 5215 5216 // Now make sure we get right response when message is missing. 5217 mreq := &JSApiMsgGetRequest{Seq: 1} 5218 req, err := json.Marshal(mreq) 5219 if err != nil { 5220 t.Fatalf("Unexpected error: %v", err) 5221 } 5222 // Load the original message from the stream to verify ReplyInfo ts against stored message 5223 rmsg, err := nc.Request(fmt.Sprintf(JSApiMsgGetT, c.mconfig.Name), req, time.Second) 5224 if err != nil { 5225 t.Fatalf("Could not retrieve stream message: %v", err) 5226 } 5227 var resp JSApiMsgGetResponse 5228 err = json.Unmarshal(rmsg.Data, &resp) 5229 if err != nil { 5230 t.Fatalf("Could not parse stream message: %v", err) 5231 } 5232 if resp.Error == nil || resp.Error.Code != 404 || resp.Error.Description != "no message found" { 5233 t.Fatalf("Did not get correct error response: %+v", resp.Error) 5234 } 5235 }) 5236 } 5237 } 5238 func TestJetStreamRedeliverCount(t *testing.T) { 5239 cases := []struct { 5240 name string 5241 mconfig *StreamConfig 5242 }{ 5243 {"MemoryStore", &StreamConfig{Name: "DC", Storage: MemoryStorage}}, 5244 {"FileStore", &StreamConfig{Name: "DC", Storage: FileStorage}}, 5245 } 5246 for _, c := range cases { 5247 t.Run(c.name, func(t *testing.T) { 5248 s := RunBasicJetStreamServer(t) 5249 defer s.Shutdown() 5250 5251 mset, err := s.GlobalAccount().addStream(c.mconfig) 5252 if err != nil { 5253 t.Fatalf("Unexpected error adding stream: %v", err) 5254 } 5255 defer mset.delete() 5256 5257 nc, js := jsClientConnect(t, s) 5258 defer nc.Close() 5259 5260 if _, err = js.Publish("DC", []byte("OK!")); err != nil { 5261 t.Fatal(err) 5262 } 5263 checkFor(t, time.Second, time.Millisecond*250, func() error { 5264 if state := mset.state(); state.Msgs != 1 { 5265 return fmt.Errorf("Expected %d messages, got %d", 1, state.Msgs) 5266 } 5267 return nil 5268 }) 5269 5270 o, err := mset.addConsumer(workerModeConfig("WQ")) 5271 if err != nil { 5272 t.Fatalf("Expected no error with registered interest, got %v", err) 5273 } 5274 defer o.delete() 5275 5276 for i := uint64(1); i <= 10; i++ { 5277 m, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second) 5278 if err != nil { 5279 t.Fatalf("Unexpected error: %v", err) 5280 } 5281 5282 sseq, dseq, dcount, _, _ := replyInfo(m.Reply) 5283 5284 // Make sure we keep getting stream sequence #1 5285 if sseq != 1 { 5286 t.Fatalf("Expected set sequence of 1, got %d", sseq) 5287 } 5288 if dseq != i { 5289 t.Fatalf("Expected delivery sequence of %d, got %d", i, dseq) 5290 } 5291 // Now make sure dcount is same as dseq (or i). 5292 if dcount != i { 5293 t.Fatalf("Expected delivery count to be %d, got %d", i, dcount) 5294 } 5295 5296 // Make sure it keeps getting sent back. 5297 m.Respond(AckNak) 5298 nc.Flush() 5299 } 5300 }) 5301 } 5302 } 5303 5304 // We want to make sure that for pull based consumers that if we ack 5305 // late with no interest the redelivery attempt is removed and we do 5306 // not get the message back. 5307 func TestJetStreamRedeliverAndLateAck(t *testing.T) { 5308 s := RunBasicJetStreamServer(t) 5309 defer s.Shutdown() 5310 5311 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: "LA", Storage: MemoryStorage}) 5312 if err != nil { 5313 t.Fatalf("Unexpected error adding stream: %v", err) 5314 } 5315 defer mset.delete() 5316 5317 o, err := mset.addConsumer(&ConsumerConfig{Durable: "DDD", AckPolicy: AckExplicit, AckWait: 100 * time.Millisecond}) 5318 if err != nil { 5319 t.Fatalf("Expected no error with registered interest, got %v", err) 5320 } 5321 defer o.delete() 5322 5323 nc := clientConnectToServer(t, s) 5324 defer nc.Close() 5325 5326 // Queue up message 5327 sendStreamMsg(t, nc, "LA", "Hello World!") 5328 5329 nextSubj := o.requestNextMsgSubject() 5330 msg, err := nc.Request(nextSubj, nil, time.Second) 5331 require_NoError(t, err) 5332 5333 // Wait for past ackwait time 5334 time.Sleep(150 * time.Millisecond) 5335 // Now ack! 5336 msg.AckSync() 5337 // We should not get this back. 5338 if _, err := nc.Request(nextSubj, nil, 10*time.Millisecond); err == nil { 5339 t.Fatalf("Message should not have been sent back") 5340 } 5341 } 5342 5343 // https://github.com/nats-io/nats-server/issues/1502 5344 func TestJetStreamPendingNextTimer(t *testing.T) { 5345 s := RunBasicJetStreamServer(t) 5346 defer s.Shutdown() 5347 5348 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: "NT", Storage: MemoryStorage, Subjects: []string{"ORDERS.*"}}) 5349 if err != nil { 5350 t.Fatalf("Unexpected error adding stream: %v", err) 5351 } 5352 defer mset.delete() 5353 5354 o, err := mset.addConsumer(&ConsumerConfig{ 5355 Durable: "DDD", 5356 AckPolicy: AckExplicit, 5357 FilterSubject: "ORDERS.test", 5358 AckWait: 100 * time.Millisecond, 5359 }) 5360 if err != nil { 5361 t.Fatalf("Expected no error with registered interest, got %v", err) 5362 } 5363 defer o.delete() 5364 5365 sendAndReceive := func() { 5366 nc := clientConnectToServer(t, s) 5367 defer nc.Close() 5368 5369 // Queue up message 5370 sendStreamMsg(t, nc, "ORDERS.test", "Hello World! #1") 5371 sendStreamMsg(t, nc, "ORDERS.test", "Hello World! #2") 5372 5373 nextSubj := o.requestNextMsgSubject() 5374 for i := 0; i < 2; i++ { 5375 if _, err := nc.Request(nextSubj, nil, time.Second); err != nil { 5376 t.Fatalf("Unexpected error: %v", err) 5377 } 5378 } 5379 nc.Close() 5380 time.Sleep(200 * time.Millisecond) 5381 } 5382 5383 sendAndReceive() 5384 sendAndReceive() 5385 sendAndReceive() 5386 } 5387 5388 func TestJetStreamCanNotNakAckd(t *testing.T) { 5389 cases := []struct { 5390 name string 5391 mconfig *StreamConfig 5392 }{ 5393 {"MemoryStore", &StreamConfig{Name: "DC", Storage: MemoryStorage}}, 5394 {"FileStore", &StreamConfig{Name: "DC", Storage: FileStorage}}, 5395 } 5396 for _, c := range cases { 5397 t.Run(c.name, func(t *testing.T) { 5398 s := RunBasicJetStreamServer(t) 5399 defer s.Shutdown() 5400 5401 mset, err := s.GlobalAccount().addStream(c.mconfig) 5402 if err != nil { 5403 t.Fatalf("Unexpected error adding stream: %v", err) 5404 } 5405 defer mset.delete() 5406 5407 nc, js := jsClientConnect(t, s) 5408 defer nc.Close() 5409 5410 // Send 10 msgs 5411 for i := 0; i < 10; i++ { 5412 js.Publish("DC", []byte("OK!")) 5413 } 5414 if state := mset.state(); state.Msgs != 10 { 5415 t.Fatalf("Expected %d messages, got %d", 10, state.Msgs) 5416 } 5417 5418 o, err := mset.addConsumer(workerModeConfig("WQ")) 5419 if err != nil { 5420 t.Fatalf("Expected no error with registered interest, got %v", err) 5421 } 5422 defer o.delete() 5423 5424 for i := uint64(1); i <= 10; i++ { 5425 m, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second) 5426 if err != nil { 5427 t.Fatalf("Unexpected error: %v", err) 5428 } 5429 // Ack evens. 5430 if i%2 == 0 { 5431 m.Respond(nil) 5432 } 5433 } 5434 nc.Flush() 5435 5436 // Fake these for now. 5437 ackReplyT := "$JS.A.DC.WQ.1.%d.%d" 5438 checkBadNak := func(seq int) { 5439 t.Helper() 5440 if err := nc.Publish(fmt.Sprintf(ackReplyT, seq, seq), AckNak); err != nil { 5441 t.Fatalf("Error sending nak: %v", err) 5442 } 5443 nc.Flush() 5444 if _, err := nc.Request(o.requestNextMsgSubject(), nil, 10*time.Millisecond); err != nats.ErrTimeout { 5445 t.Fatalf("Did not expect new delivery on nak of %d", seq) 5446 } 5447 } 5448 5449 // If the nak took action it will deliver another message, incrementing the next delivery seq. 5450 // We ack evens above, so these should fail 5451 for i := 2; i <= 10; i += 2 { 5452 checkBadNak(i) 5453 } 5454 5455 // Now check we can not nak something we do not have. 5456 checkBadNak(22) 5457 }) 5458 } 5459 } 5460 5461 func TestJetStreamStreamPurge(t *testing.T) { 5462 cases := []struct { 5463 name string 5464 mconfig *StreamConfig 5465 }{ 5466 {"MemoryStore", &StreamConfig{Name: "DC", Storage: MemoryStorage}}, 5467 {"FileStore", &StreamConfig{Name: "DC", Storage: FileStorage}}, 5468 } 5469 for _, c := range cases { 5470 t.Run(c.name, func(t *testing.T) { 5471 s := RunBasicJetStreamServer(t) 5472 defer s.Shutdown() 5473 5474 mset, err := s.GlobalAccount().addStream(c.mconfig) 5475 if err != nil { 5476 t.Fatalf("Unexpected error adding stream: %v", err) 5477 } 5478 defer mset.delete() 5479 5480 nc, js := jsClientConnect(t, s) 5481 defer nc.Close() 5482 5483 // Send 100 msgs 5484 for i := 0; i < 100; i++ { 5485 js.Publish("DC", []byte("OK!")) 5486 } 5487 if state := mset.state(); state.Msgs != 100 { 5488 t.Fatalf("Expected %d messages, got %d", 100, state.Msgs) 5489 } 5490 mset.purge(nil) 5491 state := mset.state() 5492 if state.Msgs != 0 { 5493 t.Fatalf("Expected %d messages, got %d", 0, state.Msgs) 5494 } 5495 // Make sure first timestamp are reset. 5496 if !state.FirstTime.IsZero() { 5497 t.Fatalf("Expected the state's first time to be zero after purge") 5498 } 5499 time.Sleep(10 * time.Millisecond) 5500 now := time.Now() 5501 js.Publish("DC", []byte("OK!")) 5502 5503 state = mset.state() 5504 if state.Msgs != 1 { 5505 t.Fatalf("Expected %d message, got %d", 1, state.Msgs) 5506 } 5507 if state.FirstTime.Before(now) { 5508 t.Fatalf("First time is incorrect after adding messages back in") 5509 } 5510 if state.FirstTime != state.LastTime { 5511 t.Fatalf("Expected first and last times to be the same for only message") 5512 } 5513 }) 5514 } 5515 } 5516 5517 func TestJetStreamStreamPurgeWithConsumer(t *testing.T) { 5518 cases := []struct { 5519 name string 5520 mconfig *StreamConfig 5521 }{ 5522 {"MemoryStore", &StreamConfig{Name: "DC", Storage: MemoryStorage}}, 5523 {"FileStore", &StreamConfig{Name: "DC", Storage: FileStorage}}, 5524 } 5525 for _, c := range cases { 5526 t.Run(c.name, func(t *testing.T) { 5527 s := RunBasicJetStreamServer(t) 5528 defer s.Shutdown() 5529 5530 mset, err := s.GlobalAccount().addStream(c.mconfig) 5531 if err != nil { 5532 t.Fatalf("Unexpected error adding stream: %v", err) 5533 } 5534 defer mset.delete() 5535 5536 nc, js := jsClientConnect(t, s) 5537 defer nc.Close() 5538 5539 // Send 100 msgs 5540 for i := 0; i < 100; i++ { 5541 js.Publish("DC", []byte("OK!")) 5542 } 5543 if state := mset.state(); state.Msgs != 100 { 5544 t.Fatalf("Expected %d messages, got %d", 100, state.Msgs) 5545 } 5546 // Now create an consumer and make sure it functions properly. 5547 o, err := mset.addConsumer(workerModeConfig("WQ")) 5548 if err != nil { 5549 t.Fatalf("Expected no error with registered interest, got %v", err) 5550 } 5551 defer o.delete() 5552 nextSubj := o.requestNextMsgSubject() 5553 for i := 0; i < 50; i++ { 5554 msg, err := nc.Request(nextSubj, nil, time.Second) 5555 if err != nil { 5556 t.Fatalf("Unexpected error: %v", err) 5557 } 5558 // Ack. 5559 msg.Respond(nil) 5560 } 5561 // Now grab next 25 without ack. 5562 for i := 0; i < 25; i++ { 5563 if _, err := nc.Request(nextSubj, nil, time.Second); err != nil { 5564 t.Fatalf("Unexpected error: %v", err) 5565 } 5566 } 5567 state := o.info() 5568 if state.AckFloor.Consumer != 50 { 5569 t.Fatalf("Expected ack floor of 50, got %d", state.AckFloor.Consumer) 5570 } 5571 if state.NumAckPending != 25 { 5572 t.Fatalf("Expected len(pending) to be 25, got %d", state.NumAckPending) 5573 } 5574 // Now do purge. 5575 mset.purge(nil) 5576 if state := mset.state(); state.Msgs != 0 { 5577 t.Fatalf("Expected %d messages, got %d", 0, state.Msgs) 5578 } 5579 // Now re-acquire state and check that we did the right thing. 5580 // Pending should be cleared, and stream sequences should have been set 5581 // to the total messages before purge + 1. 5582 state = o.info() 5583 if state.NumAckPending != 0 { 5584 t.Fatalf("Expected no pending, got %d", state.NumAckPending) 5585 } 5586 if state.Delivered.Stream != 100 { 5587 t.Fatalf("Expected to have setseq now at next seq of 100, got %d", state.Delivered.Stream) 5588 } 5589 // Check AckFloors which should have also been adjusted. 5590 if state.AckFloor.Stream != 100 { 5591 t.Fatalf("Expected ackfloor for setseq to be 100, got %d", state.AckFloor.Stream) 5592 } 5593 if state.AckFloor.Consumer != 75 { 5594 t.Fatalf("Expected ackfloor for obsseq to be 75, got %d", state.AckFloor.Consumer) 5595 } 5596 // Also make sure we can get new messages correctly. 5597 js.Publish("DC", []byte("OK-22")) 5598 if msg, err := nc.Request(nextSubj, nil, time.Second); err != nil { 5599 t.Fatalf("Unexpected error: %v", err) 5600 } else if string(msg.Data) != "OK-22" { 5601 t.Fatalf("Received wrong message, wanted 'OK-22', got %q", msg.Data) 5602 } 5603 }) 5604 } 5605 } 5606 5607 func TestJetStreamStreamPurgeWithConsumerAndRedelivery(t *testing.T) { 5608 cases := []struct { 5609 name string 5610 mconfig *StreamConfig 5611 }{ 5612 {"MemoryStore", &StreamConfig{Name: "DC", Storage: MemoryStorage}}, 5613 {"FileStore", &StreamConfig{Name: "DC", Storage: FileStorage}}, 5614 } 5615 for _, c := range cases { 5616 t.Run(c.name, func(t *testing.T) { 5617 s := RunBasicJetStreamServer(t) 5618 defer s.Shutdown() 5619 5620 mset, err := s.GlobalAccount().addStream(c.mconfig) 5621 if err != nil { 5622 t.Fatalf("Unexpected error adding stream: %v", err) 5623 } 5624 defer mset.delete() 5625 5626 nc, js := jsClientConnect(t, s) 5627 defer nc.Close() 5628 5629 // Send 100 msgs 5630 for i := 0; i < 100; i++ { 5631 js.Publish("DC", []byte("OK!")) 5632 } 5633 if state := mset.state(); state.Msgs != 100 { 5634 t.Fatalf("Expected %d messages, got %d", 100, state.Msgs) 5635 } 5636 // Now create an consumer and make sure it functions properly. 5637 // This will test redelivery state and purge of the stream. 5638 wcfg := &ConsumerConfig{ 5639 Durable: "WQ", 5640 AckPolicy: AckExplicit, 5641 AckWait: 20 * time.Millisecond, 5642 } 5643 o, err := mset.addConsumer(wcfg) 5644 if err != nil { 5645 t.Fatalf("Expected no error with registered interest, got %v", err) 5646 } 5647 defer o.delete() 5648 nextSubj := o.requestNextMsgSubject() 5649 for i := 0; i < 50; i++ { 5650 // Do not ack these. 5651 if _, err := nc.Request(nextSubj, nil, time.Second); err != nil { 5652 t.Fatalf("Unexpected error: %v", err) 5653 } 5654 } 5655 // Now wait to make sure we are in a redelivered state. 5656 time.Sleep(wcfg.AckWait * 2) 5657 // Now do purge. 5658 mset.purge(nil) 5659 if state := mset.state(); state.Msgs != 0 { 5660 t.Fatalf("Expected %d messages, got %d", 0, state.Msgs) 5661 } 5662 // Now get the state and check that we did the right thing. 5663 // Pending should be cleared, and stream sequences should have been set 5664 // to the total messages before purge + 1. 5665 state := o.info() 5666 if state.NumAckPending != 0 { 5667 t.Fatalf("Expected no pending, got %d", state.NumAckPending) 5668 } 5669 if state.Delivered.Stream != 100 { 5670 t.Fatalf("Expected to have setseq now at next seq of 100, got %d", state.Delivered.Stream) 5671 } 5672 // Check AckFloors which should have also been adjusted. 5673 if state.AckFloor.Stream != 100 { 5674 t.Fatalf("Expected ackfloor for setseq to be 100, got %d", state.AckFloor.Stream) 5675 } 5676 if state.AckFloor.Consumer != 50 { 5677 t.Fatalf("Expected ackfloor for obsseq to be 75, got %d", state.AckFloor.Consumer) 5678 } 5679 // Also make sure we can get new messages correctly. 5680 js.Publish("DC", []byte("OK-22")) 5681 if msg, err := nc.Request(nextSubj, nil, time.Second); err != nil { 5682 t.Fatalf("Unexpected error: %v", err) 5683 } else if string(msg.Data) != "OK-22" { 5684 t.Fatalf("Received wrong message, wanted 'OK-22', got %q", msg.Data) 5685 } 5686 }) 5687 } 5688 } 5689 5690 func TestJetStreamInterestRetentionStream(t *testing.T) { 5691 cases := []struct { 5692 name string 5693 mconfig *StreamConfig 5694 }{ 5695 {"MemoryStore", &StreamConfig{Name: "DC", Storage: MemoryStorage, Retention: InterestPolicy}}, 5696 {"FileStore", &StreamConfig{Name: "DC", Storage: FileStorage, Retention: InterestPolicy}}, 5697 } 5698 for _, c := range cases { 5699 t.Run(c.name, func(t *testing.T) { 5700 s := RunBasicJetStreamServer(t) 5701 defer s.Shutdown() 5702 5703 mset, err := s.GlobalAccount().addStream(c.mconfig) 5704 if err != nil { 5705 t.Fatalf("Unexpected error adding stream: %v", err) 5706 } 5707 defer mset.delete() 5708 5709 nc, js := jsClientConnect(t, s) 5710 defer nc.Close() 5711 5712 // Send 100 msgs 5713 totalMsgs := 100 5714 5715 for i := 0; i < totalMsgs; i++ { 5716 js.Publish("DC", []byte("OK!")) 5717 } 5718 5719 checkNumMsgs := func(numExpected int) { 5720 t.Helper() 5721 checkFor(t, time.Second, 15*time.Millisecond, func() error { 5722 if state := mset.state(); state.Msgs != uint64(numExpected) { 5723 return fmt.Errorf("Expected %d messages, got %d", numExpected, state.Msgs) 5724 } 5725 return nil 5726 }) 5727 } 5728 5729 // Since we had no interest this should be 0. 5730 checkNumMsgs(0) 5731 5732 syncSub := func() *nats.Subscription { 5733 sub, _ := nc.SubscribeSync(nats.NewInbox()) 5734 nc.Flush() 5735 return sub 5736 } 5737 5738 // Now create three consumers. 5739 // 1. AckExplicit 5740 // 2. AckAll 5741 // 3. AckNone 5742 5743 sub1 := syncSub() 5744 mset.addConsumer(&ConsumerConfig{DeliverSubject: sub1.Subject, AckPolicy: AckExplicit}) 5745 5746 sub2 := syncSub() 5747 mset.addConsumer(&ConsumerConfig{DeliverSubject: sub2.Subject, AckPolicy: AckAll}) 5748 5749 sub3 := syncSub() 5750 mset.addConsumer(&ConsumerConfig{DeliverSubject: sub3.Subject, AckPolicy: AckNone}) 5751 5752 for i := 0; i < totalMsgs; i++ { 5753 js.Publish("DC", []byte("OK!")) 5754 } 5755 5756 checkNumMsgs(totalMsgs) 5757 5758 // Wait for all messsages to be pending for each sub. 5759 for i, sub := range []*nats.Subscription{sub1, sub2, sub3} { 5760 checkFor(t, 5*time.Second, 25*time.Millisecond, func() error { 5761 if nmsgs, _, _ := sub.Pending(); nmsgs != totalMsgs { 5762 return fmt.Errorf("Did not receive correct number of messages: %d vs %d for sub %d", nmsgs, totalMsgs, i+1) 5763 } 5764 return nil 5765 }) 5766 } 5767 5768 getAndAck := func(sub *nats.Subscription) { 5769 t.Helper() 5770 if m, err := sub.NextMsg(time.Second); err != nil { 5771 t.Fatalf("Unexpected error: %v", err) 5772 } else { 5773 m.Respond(nil) 5774 } 5775 nc.Flush() 5776 } 5777 5778 // Ack evens for the explicit ack sub. 5779 var odds []*nats.Msg 5780 for i := 1; i <= totalMsgs; i++ { 5781 if m, err := sub1.NextMsg(time.Second); err != nil { 5782 t.Fatalf("Unexpected error: %v", err) 5783 } else if i%2 == 0 { 5784 m.Respond(nil) // Ack evens. 5785 } else { 5786 odds = append(odds, m) 5787 } 5788 } 5789 nc.Flush() 5790 5791 checkNumMsgs(totalMsgs) 5792 5793 // Now ack first for AckAll sub2 5794 getAndAck(sub2) 5795 // We should be at the same number since we acked 1, explicit acked 2 5796 checkNumMsgs(totalMsgs) 5797 // Now ack second for AckAll sub2 5798 getAndAck(sub2) 5799 // We should now have 1 removed. 5800 checkNumMsgs(totalMsgs - 1) 5801 // Now ack third for AckAll sub2 5802 getAndAck(sub2) 5803 // We should still only have 1 removed. 5804 checkNumMsgs(totalMsgs - 1) 5805 5806 // Now ack odds from explicit. 5807 for _, m := range odds { 5808 m.Respond(nil) // Ack 5809 } 5810 nc.Flush() 5811 5812 // we should have 1, 2, 3 acks now. 5813 checkNumMsgs(totalMsgs - 3) 5814 5815 nm, _, _ := sub2.Pending() 5816 // Now ack last ackAll message. This should clear all of them. 5817 for i := 1; i <= nm; i++ { 5818 if m, err := sub2.NextMsg(time.Second); err != nil { 5819 t.Fatalf("Unexpected error: %v", err) 5820 } else if i == nm { 5821 m.Respond(nil) 5822 } 5823 } 5824 nc.Flush() 5825 5826 // Should be zero now. 5827 checkNumMsgs(0) 5828 }) 5829 } 5830 } 5831 5832 func TestJetStreamInterestRetentionStreamWithFilteredConsumers(t *testing.T) { 5833 cases := []struct { 5834 name string 5835 mconfig *StreamConfig 5836 }{ 5837 {"MemoryStore", &StreamConfig{Name: "DC", Subjects: []string{"*"}, Storage: MemoryStorage, Retention: InterestPolicy}}, 5838 {"FileStore", &StreamConfig{Name: "DC", Subjects: []string{"*"}, Storage: FileStorage, Retention: InterestPolicy}}, 5839 } 5840 for _, c := range cases { 5841 t.Run(c.name, func(t *testing.T) { 5842 s := RunBasicJetStreamServer(t) 5843 defer s.Shutdown() 5844 5845 mset, err := s.GlobalAccount().addStream(c.mconfig) 5846 if err != nil { 5847 t.Fatalf("Unexpected error adding stream: %v", err) 5848 } 5849 defer mset.delete() 5850 5851 nc, js := jsClientConnect(t, s) 5852 defer nc.Close() 5853 5854 fsub, err := js.SubscribeSync("foo") 5855 if err != nil { 5856 t.Fatalf("Unexpected error: %v", err) 5857 } 5858 defer fsub.Unsubscribe() 5859 5860 bsub, err := js.SubscribeSync("bar") 5861 if err != nil { 5862 t.Fatalf("Unexpected error: %v", err) 5863 } 5864 defer bsub.Unsubscribe() 5865 5866 msg := []byte("FILTERED") 5867 sendMsg := func(subj string) { 5868 t.Helper() 5869 if _, err = js.Publish(subj, msg); err != nil { 5870 t.Fatalf("Unexpected publish error: %v", err) 5871 } 5872 } 5873 5874 getAndAck := func(sub *nats.Subscription) { 5875 t.Helper() 5876 m, err := sub.NextMsg(time.Second) 5877 if err != nil { 5878 t.Fatalf("Unexpected error getting msg: %v", err) 5879 } 5880 m.AckSync() 5881 } 5882 5883 checkState := func(expected uint64) { 5884 t.Helper() 5885 si, err := js.StreamInfo("DC") 5886 if err != nil { 5887 t.Fatalf("Unexpected error: %v", err) 5888 } 5889 if si.State.Msgs != expected { 5890 t.Fatalf("Expected %d msgs, got %d", expected, si.State.Msgs) 5891 } 5892 } 5893 5894 sendMsg("foo") 5895 checkState(1) 5896 getAndAck(fsub) 5897 checkState(0) 5898 sendMsg("bar") 5899 sendMsg("foo") 5900 checkState(2) 5901 getAndAck(bsub) 5902 checkState(1) 5903 getAndAck(fsub) 5904 checkState(0) 5905 }) 5906 } 5907 } 5908 5909 func TestJetStreamInterestRetentionWithWildcardsAndFilteredConsumers(t *testing.T) { 5910 msc := StreamConfig{ 5911 Name: "DCWC", 5912 Subjects: []string{"foo.*"}, 5913 Storage: MemoryStorage, 5914 Retention: InterestPolicy, 5915 } 5916 fsc := msc 5917 fsc.Storage = FileStorage 5918 5919 cases := []struct { 5920 name string 5921 mconfig *StreamConfig 5922 }{ 5923 {"MemoryStore", &msc}, 5924 {"FileStore", &fsc}, 5925 } 5926 for _, c := range cases { 5927 t.Run(c.name, func(t *testing.T) { 5928 s := RunBasicJetStreamServer(t) 5929 defer s.Shutdown() 5930 5931 mset, err := s.GlobalAccount().addStream(c.mconfig) 5932 if err != nil { 5933 t.Fatalf("Unexpected error adding stream: %v", err) 5934 } 5935 defer mset.delete() 5936 5937 nc := clientConnectToServer(t, s) 5938 defer nc.Close() 5939 5940 // Send 10 msgs 5941 for i := 0; i < 10; i++ { 5942 sendStreamMsg(t, nc, "foo.bar", "Hello World!") 5943 } 5944 if state := mset.state(); state.Msgs != 0 { 5945 t.Fatalf("Expected %d messages, got %d", 0, state.Msgs) 5946 } 5947 5948 cfg := &ConsumerConfig{Durable: "ddd", FilterSubject: "foo.bar", AckPolicy: AckExplicit} 5949 o, err := mset.addConsumer(cfg) 5950 if err != nil { 5951 t.Fatalf("Unexpected error: %v", err) 5952 } 5953 defer o.delete() 5954 5955 sendStreamMsg(t, nc, "foo.bar", "Hello World!") 5956 if state := mset.state(); state.Msgs != 1 { 5957 t.Fatalf("Expected %d message, got %d", 1, state.Msgs) 5958 } else if state.FirstSeq != 11 { 5959 t.Fatalf("Expected %d for first seq, got %d", 11, state.FirstSeq) 5960 } 5961 // Now send to foo.baz, which has no interest, so we should not hold onto this message. 5962 sendStreamMsg(t, nc, "foo.baz", "Hello World!") 5963 if state := mset.state(); state.Msgs != 1 { 5964 t.Fatalf("Expected %d message, got %d", 1, state.Msgs) 5965 } 5966 }) 5967 } 5968 } 5969 5970 func TestJetStreamInterestRetentionStreamWithDurableRestart(t *testing.T) { 5971 cases := []struct { 5972 name string 5973 mconfig *StreamConfig 5974 }{ 5975 {"MemoryStore", &StreamConfig{Name: "IK", Storage: MemoryStorage, Retention: InterestPolicy}}, 5976 {"FileStore", &StreamConfig{Name: "IK", Storage: FileStorage, Retention: InterestPolicy}}, 5977 } 5978 for _, c := range cases { 5979 t.Run(c.name, func(t *testing.T) { 5980 s := RunBasicJetStreamServer(t) 5981 defer s.Shutdown() 5982 5983 mset, err := s.GlobalAccount().addStream(c.mconfig) 5984 if err != nil { 5985 t.Fatalf("Unexpected error adding stream: %v", err) 5986 } 5987 defer mset.delete() 5988 5989 checkNumMsgs := func(numExpected int) { 5990 t.Helper() 5991 checkFor(t, time.Second, 50*time.Millisecond, func() error { 5992 if state := mset.state(); state.Msgs != uint64(numExpected) { 5993 return fmt.Errorf("Expected %d messages, got %d", numExpected, state.Msgs) 5994 } 5995 return nil 5996 }) 5997 } 5998 5999 nc := clientConnectToServer(t, s) 6000 defer nc.Close() 6001 6002 sub, _ := nc.SubscribeSync(nats.NewInbox()) 6003 nc.Flush() 6004 6005 cfg := &ConsumerConfig{Durable: "ivan", DeliverPolicy: DeliverNew, DeliverSubject: sub.Subject, AckPolicy: AckNone} 6006 6007 o, _ := mset.addConsumer(cfg) 6008 6009 sendStreamMsg(t, nc, "IK", "M1") 6010 sendStreamMsg(t, nc, "IK", "M2") 6011 6012 checkSubPending := func(numExpected int) { 6013 t.Helper() 6014 checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error { 6015 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != numExpected { 6016 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected) 6017 } 6018 return nil 6019 }) 6020 } 6021 6022 checkSubPending(2) 6023 checkNumMsgs(0) 6024 6025 // Now stop the subscription. 6026 sub.Unsubscribe() 6027 checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error { 6028 if o.isActive() { 6029 return fmt.Errorf("Still active consumer") 6030 } 6031 return nil 6032 }) 6033 6034 sendStreamMsg(t, nc, "IK", "M3") 6035 sendStreamMsg(t, nc, "IK", "M4") 6036 6037 checkNumMsgs(2) 6038 6039 // Now restart the durable. 6040 sub, _ = nc.SubscribeSync(nats.NewInbox()) 6041 nc.Flush() 6042 cfg.DeliverSubject = sub.Subject 6043 if o, err = mset.addConsumer(cfg); err != nil { 6044 t.Fatalf("Error re-establishing the durable consumer: %v", err) 6045 } 6046 checkSubPending(2) 6047 6048 for _, expected := range []string{"M3", "M4"} { 6049 if m, err := sub.NextMsg(time.Second); err != nil { 6050 t.Fatalf("Unexpected error: %v", err) 6051 } else if string(m.Data) != expected { 6052 t.Fatalf("Expected %q, got %q", expected, m.Data) 6053 } 6054 } 6055 6056 // Should all be gone now. 6057 checkNumMsgs(0) 6058 6059 // Now restart again and make sure we do not get any messages. 6060 sub.Unsubscribe() 6061 checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error { 6062 if o.isActive() { 6063 return fmt.Errorf("Still active consumer") 6064 } 6065 return nil 6066 }) 6067 o.delete() 6068 6069 sub, _ = nc.SubscribeSync(nats.NewInbox()) 6070 nc.Flush() 6071 6072 cfg.DeliverSubject = sub.Subject 6073 cfg.AckPolicy = AckExplicit // Set ack 6074 if o, err = mset.addConsumer(cfg); err != nil { 6075 t.Fatalf("Error re-establishing the durable consumer: %v", err) 6076 } 6077 time.Sleep(100 * time.Millisecond) 6078 checkSubPending(0) 6079 checkNumMsgs(0) 6080 6081 // Now queue up some messages. 6082 for i := 1; i <= 10; i++ { 6083 sendStreamMsg(t, nc, "IK", fmt.Sprintf("M%d", i)) 6084 } 6085 checkNumMsgs(10) 6086 checkSubPending(10) 6087 6088 // Create second consumer 6089 sub2, _ := nc.SubscribeSync(nats.NewInbox()) 6090 nc.Flush() 6091 cfg.DeliverSubject = sub2.Subject 6092 cfg.Durable = "derek" 6093 o2, err := mset.addConsumer(cfg) 6094 if err != nil { 6095 t.Fatalf("Error creating second durable consumer: %v", err) 6096 } 6097 6098 // Now queue up some messages. 6099 for i := 11; i <= 20; i++ { 6100 sendStreamMsg(t, nc, "IK", fmt.Sprintf("M%d", i)) 6101 } 6102 checkNumMsgs(20) 6103 checkSubPending(20) 6104 6105 // Now make sure deleting the consumers will remove messages from 6106 // the stream since we are interest retention based. 6107 o.delete() 6108 checkNumMsgs(10) 6109 6110 o2.delete() 6111 checkNumMsgs(0) 6112 }) 6113 } 6114 } 6115 6116 func TestJetStreamConsumerReplayRate(t *testing.T) { 6117 cases := []struct { 6118 name string 6119 mconfig *StreamConfig 6120 }{ 6121 {"MemoryStore", &StreamConfig{Name: "DC", Storage: MemoryStorage}}, 6122 {"FileStore", &StreamConfig{Name: "DC", Storage: FileStorage}}, 6123 } 6124 for _, c := range cases { 6125 t.Run(c.name, func(t *testing.T) { 6126 s := RunBasicJetStreamServer(t) 6127 defer s.Shutdown() 6128 6129 mset, err := s.GlobalAccount().addStream(c.mconfig) 6130 if err != nil { 6131 t.Fatalf("Unexpected error adding stream: %v", err) 6132 } 6133 defer mset.delete() 6134 6135 nc := clientConnectToServer(t, s) 6136 defer nc.Close() 6137 6138 // Send 10 msgs 6139 totalMsgs := 10 6140 6141 var gaps []time.Duration 6142 lst := time.Now() 6143 6144 for i := 0; i < totalMsgs; i++ { 6145 gaps = append(gaps, time.Since(lst)) 6146 lst = time.Now() 6147 nc.Publish("DC", []byte("OK!")) 6148 // Calculate a gap between messages. 6149 gap := 10*time.Millisecond + time.Duration(rand.Intn(20))*time.Millisecond 6150 time.Sleep(gap) 6151 } 6152 6153 if state := mset.state(); state.Msgs != uint64(totalMsgs) { 6154 t.Fatalf("Expected %d messages, got %d", totalMsgs, state.Msgs) 6155 } 6156 6157 sub, _ := nc.SubscribeSync(nats.NewInbox()) 6158 defer sub.Unsubscribe() 6159 nc.Flush() 6160 6161 o, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: sub.Subject}) 6162 if err != nil { 6163 t.Fatalf("Unexpected error: %v", err) 6164 } 6165 defer o.delete() 6166 6167 // Firehose/instant which is default. 6168 last := time.Now() 6169 for i := 0; i < totalMsgs; i++ { 6170 if _, err := sub.NextMsg(time.Second); err != nil { 6171 t.Fatalf("Unexpected error: %v", err) 6172 } 6173 now := time.Now() 6174 // Delivery from addConsumer starts in a go routine, so be 6175 // more tolerant for the first message. 6176 limit := 5 * time.Millisecond 6177 if i == 0 { 6178 limit = 10 * time.Millisecond 6179 } 6180 if now.Sub(last) > limit { 6181 t.Fatalf("Expected firehose/instant delivery, got message gap of %v", now.Sub(last)) 6182 } 6183 last = now 6184 } 6185 6186 // Now do replay rate to match original. 6187 o, err = mset.addConsumer(&ConsumerConfig{DeliverSubject: sub.Subject, ReplayPolicy: ReplayOriginal}) 6188 if err != nil { 6189 t.Fatalf("Unexpected error: %v", err) 6190 } 6191 defer o.delete() 6192 6193 // Original rate messsages were received for push based consumer. 6194 for i := 0; i < totalMsgs; i++ { 6195 start := time.Now() 6196 if _, err := sub.NextMsg(time.Second); err != nil { 6197 t.Fatalf("Unexpected error: %v", err) 6198 } 6199 gap := time.Since(start) 6200 // 15ms is high but on macs time.Sleep(delay) does not sleep only delay. 6201 // Also on travis if things get bogged down this could be delayed. 6202 gl, gh := gaps[i]-10*time.Millisecond, gaps[i]+15*time.Millisecond 6203 if gap < gl || gap > gh { 6204 t.Fatalf("Gap is off for %d, expected %v got %v", i, gaps[i], gap) 6205 } 6206 } 6207 6208 // Now create pull based. 6209 oc := workerModeConfig("PM") 6210 oc.ReplayPolicy = ReplayOriginal 6211 o, err = mset.addConsumer(oc) 6212 if err != nil { 6213 t.Fatalf("Unexpected error: %v", err) 6214 } 6215 defer o.delete() 6216 6217 for i := 0; i < totalMsgs; i++ { 6218 start := time.Now() 6219 if _, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second); err != nil { 6220 t.Fatalf("Unexpected error: %v", err) 6221 } 6222 gap := time.Since(start) 6223 // 10ms is high but on macs time.Sleep(delay) does not sleep only delay. 6224 gl, gh := gaps[i]-5*time.Millisecond, gaps[i]+10*time.Millisecond 6225 if gap < gl || gap > gh { 6226 t.Fatalf("Gap is incorrect for %d, expected %v got %v", i, gaps[i], gap) 6227 } 6228 } 6229 }) 6230 } 6231 } 6232 6233 func TestJetStreamConsumerReplayRateNoAck(t *testing.T) { 6234 cases := []struct { 6235 name string 6236 mconfig *StreamConfig 6237 }{ 6238 {"MemoryStore", &StreamConfig{Name: "DC", Storage: MemoryStorage}}, 6239 {"FileStore", &StreamConfig{Name: "DC", Storage: FileStorage}}, 6240 } 6241 for _, c := range cases { 6242 t.Run(c.name, func(t *testing.T) { 6243 s := RunBasicJetStreamServer(t) 6244 defer s.Shutdown() 6245 6246 mset, err := s.GlobalAccount().addStream(c.mconfig) 6247 if err != nil { 6248 t.Fatalf("Unexpected error adding stream: %v", err) 6249 } 6250 defer mset.delete() 6251 6252 nc := clientConnectToServer(t, s) 6253 defer nc.Close() 6254 6255 // Send 10 msgs 6256 totalMsgs := 10 6257 for i := 0; i < totalMsgs; i++ { 6258 nc.Request("DC", []byte("Hello World"), time.Second) 6259 time.Sleep(time.Duration(rand.Intn(5)) * time.Millisecond) 6260 } 6261 if state := mset.state(); state.Msgs != uint64(totalMsgs) { 6262 t.Fatalf("Expected %d messages, got %d", totalMsgs, state.Msgs) 6263 } 6264 subj := "d.dc" 6265 o, err := mset.addConsumer(&ConsumerConfig{ 6266 Durable: "derek", 6267 DeliverSubject: subj, 6268 AckPolicy: AckNone, 6269 ReplayPolicy: ReplayOriginal, 6270 }) 6271 if err != nil { 6272 t.Fatalf("Unexpected error: %v", err) 6273 } 6274 defer o.delete() 6275 // Sleep a random amount of time. 6276 time.Sleep(time.Duration(rand.Intn(20)) * time.Millisecond) 6277 6278 sub, _ := nc.SubscribeSync(subj) 6279 nc.Flush() 6280 6281 checkFor(t, time.Second, 25*time.Millisecond, func() error { 6282 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != totalMsgs { 6283 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, totalMsgs) 6284 } 6285 return nil 6286 }) 6287 }) 6288 } 6289 } 6290 6291 func TestJetStreamConsumerReplayQuit(t *testing.T) { 6292 cases := []struct { 6293 name string 6294 mconfig *StreamConfig 6295 }{ 6296 {"MemoryStore", &StreamConfig{Name: "DC", Storage: MemoryStorage}}, 6297 {"FileStore", &StreamConfig{Name: "DC", Storage: FileStorage}}, 6298 } 6299 for _, c := range cases { 6300 t.Run(c.name, func(t *testing.T) { 6301 s := RunBasicJetStreamServer(t) 6302 defer s.Shutdown() 6303 6304 mset, err := s.GlobalAccount().addStream(c.mconfig) 6305 if err != nil { 6306 t.Fatalf("Unexpected error adding stream: %v", err) 6307 } 6308 defer mset.delete() 6309 6310 nc := clientConnectToServer(t, s) 6311 defer nc.Close() 6312 6313 // Send 2 msgs 6314 nc.Request("DC", []byte("OK!"), time.Second) 6315 time.Sleep(100 * time.Millisecond) 6316 nc.Request("DC", []byte("OK!"), time.Second) 6317 6318 if state := mset.state(); state.Msgs != 2 { 6319 t.Fatalf("Expected %d messages, got %d", 2, state.Msgs) 6320 } 6321 6322 sub, _ := nc.SubscribeSync(nats.NewInbox()) 6323 defer sub.Unsubscribe() 6324 nc.Flush() 6325 6326 // Now do replay rate to match original. 6327 o, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: sub.Subject, ReplayPolicy: ReplayOriginal}) 6328 if err != nil { 6329 t.Fatalf("Unexpected error: %v", err) 6330 } 6331 6332 // Allow loop and deliver / replay go routine to spin up 6333 time.Sleep(50 * time.Millisecond) 6334 base := runtime.NumGoroutine() 6335 o.delete() 6336 6337 checkFor(t, 100*time.Millisecond, 10*time.Millisecond, func() error { 6338 if runtime.NumGoroutine() >= base { 6339 return fmt.Errorf("Consumer go routines still running") 6340 } 6341 return nil 6342 }) 6343 }) 6344 } 6345 } 6346 6347 func TestJetStreamSystemLimits(t *testing.T) { 6348 s := RunRandClientPortServer(t) 6349 defer s.Shutdown() 6350 6351 if _, _, err := s.JetStreamReservedResources(); err == nil { 6352 t.Fatalf("Expected error requesting jetstream reserved resources when not enabled") 6353 } 6354 // Create some accounts. 6355 facc, _ := s.LookupOrRegisterAccount("FOO") 6356 bacc, _ := s.LookupOrRegisterAccount("BAR") 6357 zacc, _ := s.LookupOrRegisterAccount("BAZ") 6358 6359 jsconfig := &JetStreamConfig{MaxMemory: 1024, MaxStore: 8192, StoreDir: t.TempDir()} 6360 if err := s.EnableJetStream(jsconfig); err != nil { 6361 t.Fatalf("Expected no error, got %v", err) 6362 } 6363 6364 if rm, rd, err := s.JetStreamReservedResources(); err != nil { 6365 t.Fatalf("Unexpected error requesting jetstream reserved resources: %v", err) 6366 } else if rm != 0 || rd != 0 { 6367 t.Fatalf("Expected reserved memory and store to be 0, got %d and %d", rm, rd) 6368 } 6369 6370 limits := func(mem int64, store int64) map[string]JetStreamAccountLimits { 6371 return map[string]JetStreamAccountLimits{ 6372 _EMPTY_: { 6373 MaxMemory: mem, 6374 MaxStore: store, 6375 MaxStreams: -1, 6376 MaxConsumers: -1, 6377 }, 6378 } 6379 } 6380 6381 if err := facc.EnableJetStream(limits(24, 192)); err != nil { 6382 t.Fatalf("Unexpected error: %v", err) 6383 } 6384 // Use up rest of our resources in memory 6385 if err := bacc.EnableJetStream(limits(1000, 0)); err != nil { 6386 t.Fatalf("Unexpected error: %v", err) 6387 } 6388 6389 // Now ask for more memory. Should error. 6390 if err := zacc.EnableJetStream(limits(1000, 0)); err == nil { 6391 t.Fatalf("Expected an error when exhausting memory resource limits") 6392 } 6393 // Disk too. 6394 if err := zacc.EnableJetStream(limits(0, 10000)); err == nil { 6395 t.Fatalf("Expected an error when exhausting memory resource limits") 6396 } 6397 facc.DisableJetStream() 6398 bacc.DisableJetStream() 6399 zacc.DisableJetStream() 6400 6401 // Make sure we unreserved resources. 6402 if rm, rd, err := s.JetStreamReservedResources(); err != nil { 6403 t.Fatalf("Unexpected error requesting jetstream reserved resources: %v", err) 6404 } else if rm != 0 || rd != 0 { 6405 t.Fatalf("Expected reserved memory and store to be 0, got %v and %v", friendlyBytes(rm), friendlyBytes(rd)) 6406 } 6407 6408 if err := facc.EnableJetStream(limits(24, 192)); err != nil { 6409 t.Fatalf("Unexpected error: %v", err) 6410 } 6411 // Test Adjust 6412 lim := limits(jsconfig.MaxMemory, jsconfig.MaxStore) 6413 l := lim[_EMPTY_] 6414 l.MaxStreams = 10 6415 l.MaxConsumers = 10 6416 lim[_EMPTY_] = l 6417 if err := facc.UpdateJetStreamLimits(lim); err != nil { 6418 t.Fatalf("Unexpected error updating jetstream account limits: %v", err) 6419 } 6420 6421 var msets []*stream 6422 // Now test max streams and max consumers. Note max consumers is per stream. 6423 for i := 0; i < 10; i++ { 6424 mname := fmt.Sprintf("foo.%d", i) 6425 mset, err := facc.addStream(&StreamConfig{Name: strconv.Itoa(i), Storage: MemoryStorage, Subjects: []string{mname}}) 6426 if err != nil { 6427 t.Fatalf("Unexpected error adding stream: %v", err) 6428 } 6429 msets = append(msets, mset) 6430 } 6431 6432 // Remove them all 6433 for _, mset := range msets { 6434 mset.delete() 6435 } 6436 6437 // Now try to add one with bytes limit that would exceed the account limit. 6438 if _, err := facc.addStream(&StreamConfig{Name: "22", Storage: MemoryStorage, MaxBytes: jsconfig.MaxStore * 2}); err == nil { 6439 t.Fatalf("Expected error adding stream over limit") 6440 } 6441 6442 // Replicas can't be > 1 6443 if _, err := facc.addStream(&StreamConfig{Name: "22", Storage: MemoryStorage, Replicas: 10}); err == nil { 6444 t.Fatalf("Expected error adding stream over limit") 6445 } 6446 6447 // Test consumers limit against account limit when the stream does not set a limit 6448 mset, err := facc.addStream(&StreamConfig{Name: "22", Storage: MemoryStorage, Subjects: []string{"foo.22"}}) 6449 if err != nil { 6450 t.Fatalf("Unexpected error adding stream: %v", err) 6451 } 6452 6453 for i := 0; i < 10; i++ { 6454 oname := fmt.Sprintf("O:%d", i) 6455 _, err := mset.addConsumer(&ConsumerConfig{Durable: oname, AckPolicy: AckExplicit}) 6456 if err != nil { 6457 t.Fatalf("Unexpected error: %v", err) 6458 } 6459 } 6460 6461 // This one should fail. 6462 if _, err := mset.addConsumer(&ConsumerConfig{Durable: "O:22", AckPolicy: AckExplicit}); err == nil { 6463 t.Fatalf("Expected error adding consumer over the limit") 6464 } 6465 6466 // Test consumer limit against stream limit 6467 mset.delete() 6468 mset, err = facc.addStream(&StreamConfig{Name: "22", Storage: MemoryStorage, Subjects: []string{"foo.22"}, MaxConsumers: 5}) 6469 if err != nil { 6470 t.Fatalf("Unexpected error adding stream: %v", err) 6471 } 6472 6473 for i := 0; i < 5; i++ { 6474 oname := fmt.Sprintf("O:%d", i) 6475 _, err := mset.addConsumer(&ConsumerConfig{Durable: oname, AckPolicy: AckExplicit}) 6476 if err != nil { 6477 t.Fatalf("Unexpected error: %v", err) 6478 } 6479 } 6480 6481 // This one should fail. 6482 if _, err := mset.addConsumer(&ConsumerConfig{Durable: "O:22", AckPolicy: AckExplicit}); err == nil { 6483 t.Fatalf("Expected error adding consumer over the limit") 6484 } 6485 6486 // Test the account having smaller limits than the stream 6487 mset.delete() 6488 6489 mset, err = facc.addStream(&StreamConfig{Name: "22", Storage: MemoryStorage, Subjects: []string{"foo.22"}, MaxConsumers: 10}) 6490 if err != nil { 6491 t.Fatalf("Unexpected error adding stream: %v", err) 6492 } 6493 6494 l.MaxConsumers = 5 6495 lim[_EMPTY_] = l 6496 if err := facc.UpdateJetStreamLimits(lim); err != nil { 6497 t.Fatalf("Unexpected error updating jetstream account limits: %v", err) 6498 } 6499 6500 for i := 0; i < 5; i++ { 6501 oname := fmt.Sprintf("O:%d", i) 6502 _, err := mset.addConsumer(&ConsumerConfig{Durable: oname, AckPolicy: AckExplicit}) 6503 if err != nil { 6504 t.Fatalf("Unexpected error: %v", err) 6505 } 6506 } 6507 6508 // This one should fail. 6509 if _, err := mset.addConsumer(&ConsumerConfig{Durable: "O:22", AckPolicy: AckExplicit}); err == nil { 6510 t.Fatalf("Expected error adding consumer over the limit") 6511 } 6512 } 6513 6514 func TestJetStreamSystemLimitsPlacement(t *testing.T) { 6515 const smallSystemLimit = 128 6516 const mediumSystemLimit = smallSystemLimit * 2 6517 const largeSystemLimit = smallSystemLimit * 3 6518 6519 tmpl := ` 6520 listen: 127.0.0.1:-1 6521 server_name: %s 6522 jetstream: { 6523 max_mem_store: _MAXMEM_ 6524 max_file_store: _MAXFILE_ 6525 store_dir: '%s' 6526 } 6527 6528 server_tags: [ 6529 _TAG_ 6530 ] 6531 6532 leaf { 6533 listen: 127.0.0.1:-1 6534 } 6535 cluster { 6536 name: %s 6537 listen: 127.0.0.1:%d 6538 routes = [%s] 6539 } 6540 ` 6541 storeCnf := func(serverName, clusterName, storeDir, conf string) string { 6542 switch serverName { 6543 case "S-1": 6544 conf = strings.Replace(conf, "_MAXMEM_", fmt.Sprint(smallSystemLimit), 1) 6545 conf = strings.Replace(conf, "_MAXFILE_", fmt.Sprint(smallSystemLimit), 1) 6546 return strings.Replace(conf, "_TAG_", "small", 1) 6547 case "S-2": 6548 conf = strings.Replace(conf, "_MAXMEM_", fmt.Sprint(mediumSystemLimit), 1) 6549 conf = strings.Replace(conf, "_MAXFILE_", fmt.Sprint(mediumSystemLimit), 1) 6550 return strings.Replace(conf, "_TAG_", "medium", 1) 6551 case "S-3": 6552 conf = strings.Replace(conf, "_MAXMEM_", fmt.Sprint(largeSystemLimit), 1) 6553 conf = strings.Replace(conf, "_MAXFILE_", fmt.Sprint(largeSystemLimit), 1) 6554 return strings.Replace(conf, "_TAG_", "large", 1) 6555 default: 6556 return conf 6557 } 6558 } 6559 6560 cluster := createJetStreamClusterWithTemplateAndModHook(t, tmpl, "cluster-a", 3, storeCnf) 6561 defer cluster.shutdown() 6562 6563 requestLeaderStepDown := func(clientURL string) error { 6564 nc, err := nats.Connect(clientURL) 6565 if err != nil { 6566 return err 6567 } 6568 defer nc.Close() 6569 6570 ncResp, err := nc.Request(JSApiLeaderStepDown, nil, 3*time.Second) 6571 if err != nil { 6572 return err 6573 } 6574 6575 var resp JSApiLeaderStepDownResponse 6576 if err := json.Unmarshal(ncResp.Data, &resp); err != nil { 6577 return err 6578 } 6579 if resp.Error != nil { 6580 return resp.Error 6581 } 6582 if !resp.Success { 6583 return fmt.Errorf("leader step down request not successful") 6584 } 6585 6586 return nil 6587 } 6588 6589 largeSrv := cluster.servers[2] 6590 // Force large server to be leader 6591 err := checkForErr(15*time.Second, 500*time.Millisecond, func() error { 6592 if largeSrv.JetStreamIsLeader() { 6593 return nil 6594 } 6595 6596 if err := requestLeaderStepDown(largeSrv.ClientURL()); err != nil { 6597 return err 6598 } 6599 return fmt.Errorf("large server is not leader") 6600 }) 6601 if err != nil { 6602 t.Skipf("failed to get desired layout: %s", err) 6603 } 6604 6605 nc, js := jsClientConnect(t, largeSrv) 6606 defer nc.Close() 6607 6608 cases := []struct { 6609 name string 6610 storage nats.StorageType 6611 createMaxBytes int64 6612 serverTag string 6613 wantErr bool 6614 }{ 6615 { 6616 name: "file create large stream on small server", 6617 storage: nats.FileStorage, 6618 createMaxBytes: largeSystemLimit, 6619 serverTag: "small", 6620 wantErr: true, 6621 }, 6622 { 6623 name: "memory create large stream on small server", 6624 storage: nats.MemoryStorage, 6625 createMaxBytes: largeSystemLimit, 6626 serverTag: "small", 6627 wantErr: true, 6628 }, 6629 { 6630 name: "file create large stream on medium server", 6631 storage: nats.FileStorage, 6632 createMaxBytes: largeSystemLimit, 6633 serverTag: "medium", 6634 wantErr: true, 6635 }, 6636 { 6637 name: "memory create large stream on medium server", 6638 storage: nats.MemoryStorage, 6639 createMaxBytes: largeSystemLimit, 6640 serverTag: "medium", 6641 wantErr: true, 6642 }, 6643 { 6644 name: "file create large stream on large server", 6645 storage: nats.FileStorage, 6646 createMaxBytes: largeSystemLimit, 6647 serverTag: "large", 6648 }, 6649 { 6650 name: "memory create large stream on large server", 6651 storage: nats.MemoryStorage, 6652 createMaxBytes: largeSystemLimit, 6653 serverTag: "large", 6654 }, 6655 } 6656 6657 for i := 0; i < len(cases) && !t.Failed(); i++ { 6658 c := cases[i] 6659 t.Run(c.name, func(st *testing.T) { 6660 _, err := js.AddStream(&nats.StreamConfig{ 6661 Name: "TEST", 6662 Subjects: []string{"foo"}, 6663 Storage: c.storage, 6664 MaxBytes: c.createMaxBytes, 6665 Placement: &nats.Placement{ 6666 Cluster: "cluster-a", 6667 Tags: []string{c.serverTag}, 6668 }, 6669 }) 6670 if c.wantErr && err == nil { 6671 st.Fatalf("unexpected stream create success, maxBytes=%d, tag=%s", 6672 c.createMaxBytes, c.serverTag) 6673 } else if !c.wantErr && err != nil { 6674 st.Fatalf("unexpected error: %s", err) 6675 } 6676 6677 if err == nil { 6678 err = js.DeleteStream("TEST") 6679 require_NoError(st, err) 6680 } 6681 }) 6682 } 6683 6684 // These next two tests should fail because although the stream fits in the 6685 // large and medium server, it doesn't fit on the small server. 6686 si, err := js.AddStream(&nats.StreamConfig{ 6687 Name: "TEST", 6688 Subjects: []string{"foo"}, 6689 Storage: nats.FileStorage, 6690 MaxBytes: smallSystemLimit + 1, 6691 Replicas: 3, 6692 }) 6693 if err == nil { 6694 t.Fatalf("unexpected file stream create success, maxBytes=%d, replicas=%d", 6695 si.Config.MaxBytes, si.Config.Replicas) 6696 } 6697 6698 si, err = js.AddStream(&nats.StreamConfig{ 6699 Name: "TEST", 6700 Subjects: []string{"foo"}, 6701 Storage: nats.MemoryStorage, 6702 MaxBytes: smallSystemLimit + 1, 6703 Replicas: 3, 6704 }) 6705 if err == nil { 6706 t.Fatalf("unexpected memory stream create success, maxBytes=%d, replicas=%d", 6707 si.Config.MaxBytes, si.Config.Replicas) 6708 } 6709 } 6710 6711 func TestJetStreamStreamLimitUpdate(t *testing.T) { 6712 s := RunBasicJetStreamServer(t) 6713 defer s.Shutdown() 6714 6715 err := s.GlobalAccount().UpdateJetStreamLimits(map[string]JetStreamAccountLimits{ 6716 _EMPTY_: { 6717 MaxMemory: 128, 6718 MaxStore: 128, 6719 MaxStreams: 1, 6720 }, 6721 }) 6722 require_NoError(t, err) 6723 6724 nc, js := jsClientConnect(t, s) 6725 defer nc.Close() 6726 6727 for _, storage := range []nats.StorageType{nats.MemoryStorage, nats.FileStorage} { 6728 _, err = js.AddStream(&nats.StreamConfig{ 6729 Name: "TEST", 6730 Subjects: []string{"foo"}, 6731 Storage: storage, 6732 MaxBytes: 32, 6733 }) 6734 require_NoError(t, err) 6735 6736 _, err = js.UpdateStream(&nats.StreamConfig{ 6737 Name: "TEST", 6738 Subjects: []string{"foo"}, 6739 Storage: storage, 6740 MaxBytes: 16, 6741 }) 6742 require_NoError(t, err) 6743 6744 require_NoError(t, js.DeleteStream("TEST")) 6745 } 6746 } 6747 6748 func TestJetStreamStreamStorageTrackingAndLimits(t *testing.T) { 6749 s := RunBasicJetStreamServer(t) 6750 defer s.Shutdown() 6751 6752 gacc := s.GlobalAccount() 6753 6754 al := map[string]JetStreamAccountLimits{ 6755 _EMPTY_: { 6756 MaxMemory: 8192, 6757 MaxStore: -1, 6758 MaxStreams: -1, 6759 MaxConsumers: -1, 6760 }, 6761 } 6762 6763 if err := gacc.UpdateJetStreamLimits(al); err != nil { 6764 t.Fatalf("Unexpected error updating jetstream account limits: %v", err) 6765 } 6766 6767 mset, err := gacc.addStream(&StreamConfig{Name: "LIMITS", Storage: MemoryStorage, Retention: WorkQueuePolicy}) 6768 if err != nil { 6769 t.Fatalf("Unexpected error adding stream: %v", err) 6770 } 6771 defer mset.delete() 6772 6773 nc := clientConnectToServer(t, s) 6774 defer nc.Close() 6775 6776 toSend := 100 6777 for i := 0; i < toSend; i++ { 6778 sendStreamMsg(t, nc, "LIMITS", "Hello World!") 6779 } 6780 6781 state := mset.state() 6782 usage := gacc.JetStreamUsage() 6783 6784 // Make sure these are working correctly. 6785 if state.Bytes != usage.Memory { 6786 t.Fatalf("Expected to have stream bytes match memory usage, %d vs %d", state.Bytes, usage.Memory) 6787 } 6788 if usage.Streams != 1 { 6789 t.Fatalf("Expected to have 1 stream, got %d", usage.Streams) 6790 } 6791 6792 // Do second stream. 6793 mset2, err := gacc.addStream(&StreamConfig{Name: "NUM22", Storage: MemoryStorage, Retention: WorkQueuePolicy}) 6794 if err != nil { 6795 t.Fatalf("Unexpected error adding stream: %v", err) 6796 } 6797 defer mset2.delete() 6798 6799 for i := 0; i < toSend; i++ { 6800 sendStreamMsg(t, nc, "NUM22", "Hello World!") 6801 } 6802 6803 stats2 := mset2.state() 6804 usage = gacc.JetStreamUsage() 6805 6806 if usage.Memory != (state.Bytes + stats2.Bytes) { 6807 t.Fatalf("Expected to track both streams, account is %v, stream1 is %v, stream2 is %v", usage.Memory, state.Bytes, stats2.Bytes) 6808 } 6809 6810 // Make sure delete works. 6811 mset2.delete() 6812 stats2 = mset2.state() 6813 usage = gacc.JetStreamUsage() 6814 6815 if usage.Memory != (state.Bytes + stats2.Bytes) { 6816 t.Fatalf("Expected to track both streams, account is %v, stream1 is %v, stream2 is %v", usage.Memory, state.Bytes, stats2.Bytes) 6817 } 6818 6819 // Now drain the first one by consuming the messages. 6820 o, err := mset.addConsumer(workerModeConfig("WQ")) 6821 if err != nil { 6822 t.Fatalf("Expected no error with registered interest, got %v", err) 6823 } 6824 defer o.delete() 6825 6826 for i := 0; i < toSend; i++ { 6827 msg, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second) 6828 if err != nil { 6829 t.Fatalf("Unexpected error: %v", err) 6830 } 6831 msg.Respond(nil) 6832 } 6833 nc.Flush() 6834 6835 state = mset.state() 6836 checkFor(t, time.Second, 15*time.Millisecond, func() error { 6837 usage = gacc.JetStreamUsage() 6838 if usage.Memory != 0 { 6839 return fmt.Errorf("Expected usage memory to be 0, got %d", usage.Memory) 6840 } 6841 return nil 6842 }) 6843 6844 // Now send twice the number of messages. Should receive an error at some point, and we will check usage against limits. 6845 var errSeen string 6846 for i := 0; i < toSend*2; i++ { 6847 resp, _ := nc.Request("LIMITS", []byte("The quick brown fox jumped over the..."), 50*time.Millisecond) 6848 if string(resp.Data) != OK { 6849 errSeen = string(resp.Data) 6850 break 6851 } 6852 } 6853 6854 if errSeen == "" { 6855 t.Fatalf("Expected to see an error when exceeding the account limits") 6856 } 6857 6858 state = mset.state() 6859 var lim JetStreamAccountLimits 6860 checkFor(t, time.Second, 15*time.Millisecond, func() error { 6861 usage = gacc.JetStreamUsage() 6862 lim = al[_EMPTY_] 6863 if usage.Memory > uint64(lim.MaxMemory) { 6864 return fmt.Errorf("Expected memory to not exceed limit of %d, got %d", lim.MaxMemory, usage.Memory) 6865 } 6866 return nil 6867 }) 6868 6869 // make sure that unlimited accounts work 6870 lim.MaxMemory = -1 6871 6872 if err := gacc.UpdateJetStreamLimits(al); err != nil { 6873 t.Fatalf("Unexpected error updating jetstream account limits: %v", err) 6874 } 6875 6876 for i := 0; i < toSend; i++ { 6877 sendStreamMsg(t, nc, "LIMITS", "Hello World!") 6878 } 6879 } 6880 6881 func TestJetStreamStreamFileTrackingAndLimits(t *testing.T) { 6882 s := RunBasicJetStreamServer(t) 6883 defer s.Shutdown() 6884 6885 gacc := s.GlobalAccount() 6886 6887 al := map[string]JetStreamAccountLimits{ 6888 _EMPTY_: { 6889 MaxMemory: 8192, 6890 MaxStore: 9600, 6891 MaxStreams: -1, 6892 MaxConsumers: -1, 6893 }, 6894 } 6895 6896 if err := gacc.UpdateJetStreamLimits(al); err != nil { 6897 t.Fatalf("Unexpected error updating jetstream account limits: %v", err) 6898 } 6899 6900 mconfig := &StreamConfig{Name: "LIMITS", Storage: FileStorage, Retention: WorkQueuePolicy} 6901 mset, err := gacc.addStream(mconfig) 6902 if err != nil { 6903 t.Fatalf("Unexpected error adding stream: %v", err) 6904 } 6905 defer mset.delete() 6906 6907 nc := clientConnectToServer(t, s) 6908 defer nc.Close() 6909 6910 toSend := 100 6911 for i := 0; i < toSend; i++ { 6912 sendStreamMsg(t, nc, "LIMITS", "Hello World!") 6913 } 6914 6915 state := mset.state() 6916 usage := gacc.JetStreamUsage() 6917 6918 // Make sure these are working correctly. 6919 if usage.Store != state.Bytes { 6920 t.Fatalf("Expected to have stream bytes match the store usage, %d vs %d", usage.Store, state.Bytes) 6921 } 6922 if usage.Streams != 1 { 6923 t.Fatalf("Expected to have 1 stream, got %d", usage.Streams) 6924 } 6925 6926 // Do second stream. 6927 mconfig2 := &StreamConfig{Name: "NUM22", Storage: FileStorage, Retention: WorkQueuePolicy} 6928 mset2, err := gacc.addStream(mconfig2) 6929 if err != nil { 6930 t.Fatalf("Unexpected error adding stream: %v", err) 6931 } 6932 defer mset2.delete() 6933 6934 for i := 0; i < toSend; i++ { 6935 sendStreamMsg(t, nc, "NUM22", "Hello World!") 6936 } 6937 6938 stats2 := mset2.state() 6939 usage = gacc.JetStreamUsage() 6940 6941 if usage.Store != (state.Bytes + stats2.Bytes) { 6942 t.Fatalf("Expected to track both streams, usage is %v, stream1 is %v, stream2 is %v", usage.Store, state.Bytes, stats2.Bytes) 6943 } 6944 6945 // Make sure delete works. 6946 mset2.delete() 6947 stats2 = mset2.state() 6948 usage = gacc.JetStreamUsage() 6949 6950 if usage.Store != (state.Bytes + stats2.Bytes) { 6951 t.Fatalf("Expected to track both streams, account is %v, stream1 is %v, stream2 is %v", usage.Store, state.Bytes, stats2.Bytes) 6952 } 6953 6954 // Now drain the first one by consuming the messages. 6955 o, err := mset.addConsumer(workerModeConfig("WQ")) 6956 if err != nil { 6957 t.Fatalf("Expected no error with registered interest, got %v", err) 6958 } 6959 defer o.delete() 6960 6961 for i := 0; i < toSend; i++ { 6962 msg, err := nc.Request(o.requestNextMsgSubject(), nil, time.Second) 6963 if err != nil { 6964 t.Fatalf("Unexpected error: %v", err) 6965 } 6966 msg.Respond(nil) 6967 } 6968 nc.Flush() 6969 6970 state = mset.state() 6971 usage = gacc.JetStreamUsage() 6972 6973 if usage.Memory != 0 { 6974 t.Fatalf("Expected usage memeory to be 0, got %d", usage.Memory) 6975 } 6976 6977 // Now send twice the number of messages. Should receive an error at some point, and we will check usage against limits. 6978 var errSeen string 6979 for i := 0; i < toSend*2; i++ { 6980 resp, _ := nc.Request("LIMITS", []byte("The quick brown fox jumped over the..."), 50*time.Millisecond) 6981 if string(resp.Data) != OK { 6982 errSeen = string(resp.Data) 6983 break 6984 } 6985 } 6986 6987 if errSeen == "" { 6988 t.Fatalf("Expected to see an error when exceeding the account limits") 6989 } 6990 6991 state = mset.state() 6992 usage = gacc.JetStreamUsage() 6993 6994 lim := al[_EMPTY_] 6995 if usage.Memory > uint64(lim.MaxMemory) { 6996 t.Fatalf("Expected memory to not exceed limit of %d, got %d", lim.MaxMemory, usage.Memory) 6997 } 6998 } 6999 7000 func TestJetStreamTieredLimits(t *testing.T) { 7001 s := RunBasicJetStreamServer(t) 7002 defer s.Shutdown() 7003 7004 gacc := s.GlobalAccount() 7005 7006 tFail := map[string]JetStreamAccountLimits{ 7007 "nottheer": { 7008 MaxMemory: 8192, 7009 MaxStore: 9600, 7010 MaxStreams: -1, 7011 MaxConsumers: -1, 7012 }, 7013 } 7014 7015 if err := gacc.UpdateJetStreamLimits(tFail); err != nil { 7016 t.Fatalf("Unexpected error updating jetstream account limits: %v", err) 7017 } 7018 7019 mconfig := &StreamConfig{Name: "LIMITS", Storage: FileStorage, Retention: WorkQueuePolicy} 7020 mset, err := gacc.addStream(mconfig) 7021 defer mset.delete() 7022 require_Error(t, err) 7023 require_Contains(t, err.Error(), "no JetStream default or applicable tiered limit present") 7024 7025 tPass := map[string]JetStreamAccountLimits{ 7026 "R1": { 7027 MaxMemory: 8192, 7028 MaxStore: 9600, 7029 MaxStreams: -1, 7030 MaxConsumers: -1, 7031 }, 7032 } 7033 7034 if err := gacc.UpdateJetStreamLimits(tPass); err != nil { 7035 t.Fatalf("Unexpected error updating jetstream account limits: %v", err) 7036 } 7037 } 7038 7039 type obsi struct { 7040 cfg ConsumerConfig 7041 ack int 7042 } 7043 7044 type info struct { 7045 cfg StreamConfig 7046 state StreamState 7047 obs []obsi 7048 } 7049 7050 func TestJetStreamSimpleFileRecovery(t *testing.T) { 7051 base := runtime.NumGoroutine() 7052 7053 s := RunBasicJetStreamServer(t) 7054 defer s.Shutdown() 7055 7056 acc := s.GlobalAccount() 7057 7058 ostate := make(map[string]info) 7059 7060 nid := nuid.New() 7061 randomSubject := func() string { 7062 nid.RandomizePrefix() 7063 return fmt.Sprintf("SUBJ.%s", nid.Next()) 7064 } 7065 7066 nc := clientConnectToServer(t, s) 7067 defer nc.Close() 7068 7069 numStreams := 10 7070 for i := 1; i <= numStreams; i++ { 7071 msetName := fmt.Sprintf("MMS-%d", i) 7072 subjects := []string{randomSubject(), randomSubject(), randomSubject()} 7073 msetConfig := StreamConfig{ 7074 Name: msetName, 7075 Storage: FileStorage, 7076 Subjects: subjects, 7077 MaxMsgs: 100, 7078 } 7079 mset, err := acc.addStream(&msetConfig) 7080 if err != nil { 7081 t.Fatalf("Unexpected error adding stream %q: %v", msetName, err) 7082 } 7083 7084 toSend := rand.Intn(100) + 1 7085 for n := 1; n <= toSend; n++ { 7086 msg := fmt.Sprintf("Hello %d", n*i) 7087 subj := subjects[rand.Intn(len(subjects))] 7088 sendStreamMsg(t, nc, subj, msg) 7089 } 7090 // Create up to 5 consumers. 7091 numObs := rand.Intn(5) + 1 7092 var obs []obsi 7093 for n := 1; n <= numObs; n++ { 7094 oname := fmt.Sprintf("WQ-%d-%d", i, n) 7095 o, err := mset.addConsumer(workerModeConfig(oname)) 7096 if err != nil { 7097 t.Fatalf("Unexpected error: %v", err) 7098 } 7099 // Now grab some messages. 7100 toReceive := rand.Intn(toSend) + 1 7101 rsubj := o.requestNextMsgSubject() 7102 for r := 0; r < toReceive; r++ { 7103 resp, err := nc.Request(rsubj, nil, time.Second) 7104 require_NoError(t, err) 7105 if resp != nil { 7106 resp.Respond(nil) 7107 } 7108 } 7109 obs = append(obs, obsi{o.config(), toReceive}) 7110 } 7111 ostate[msetName] = info{mset.config(), mset.state(), obs} 7112 } 7113 pusage := acc.JetStreamUsage() 7114 nc.Flush() 7115 7116 // Shutdown and restart and make sure things come back. 7117 sd := s.JetStreamConfig().StoreDir 7118 s.Shutdown() 7119 7120 checkFor(t, 2*time.Second, 200*time.Millisecond, func() error { 7121 delta := (runtime.NumGoroutine() - base) 7122 if delta > 3 { 7123 return fmt.Errorf("%d Go routines still exist post Shutdown()", delta) 7124 } 7125 return nil 7126 }) 7127 7128 s = RunJetStreamServerOnPort(-1, sd) 7129 defer s.Shutdown() 7130 7131 acc = s.GlobalAccount() 7132 7133 nusage := acc.JetStreamUsage() 7134 if !reflect.DeepEqual(nusage, pusage) { 7135 t.Fatalf("Usage does not match after restore: %+v vs %+v", nusage, pusage) 7136 } 7137 7138 for mname, info := range ostate { 7139 mset, err := acc.lookupStream(mname) 7140 if err != nil { 7141 t.Fatalf("Expected to find a stream for %q", mname) 7142 } 7143 if state := mset.state(); !reflect.DeepEqual(state, info.state) { 7144 t.Fatalf("State does not match: %+v vs %+v", state, info.state) 7145 } 7146 if cfg := mset.config(); !reflect.DeepEqual(cfg, info.cfg) { 7147 t.Fatalf("Configs do not match: %+v vs %+v", cfg, info.cfg) 7148 } 7149 // Consumers. 7150 if mset.numConsumers() != len(info.obs) { 7151 t.Fatalf("Number of consumers do not match: %d vs %d", mset.numConsumers(), len(info.obs)) 7152 } 7153 for _, oi := range info.obs { 7154 if o := mset.lookupConsumer(oi.cfg.Durable); o != nil { 7155 if uint64(oi.ack+1) != o.nextSeq() { 7156 t.Fatalf("Consumer next seq is not correct: %d vs %d", oi.ack+1, o.nextSeq()) 7157 } 7158 } else { 7159 t.Fatalf("Expected to get an consumer") 7160 } 7161 } 7162 } 7163 } 7164 7165 func TestJetStreamPushConsumerFlowControl(t *testing.T) { 7166 s := RunBasicJetStreamServer(t) 7167 defer s.Shutdown() 7168 7169 // Client for API requests. 7170 nc, js := jsClientConnect(t, s) 7171 defer nc.Close() 7172 7173 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST"}); err != nil { 7174 t.Fatalf("Unexpected error: %v", err) 7175 } 7176 7177 sub, err := nc.SubscribeSync(nats.NewInbox()) 7178 require_NoError(t, err) 7179 defer sub.Unsubscribe() 7180 7181 obsReq := CreateConsumerRequest{ 7182 Stream: "TEST", 7183 Config: ConsumerConfig{ 7184 Durable: "dlc", 7185 DeliverSubject: sub.Subject, 7186 FlowControl: true, 7187 Heartbeat: 5 * time.Second, 7188 }, 7189 } 7190 req, err := json.Marshal(obsReq) 7191 require_NoError(t, err) 7192 resp, err := nc.Request(fmt.Sprintf(JSApiDurableCreateT, "TEST", "dlc"), req, time.Second) 7193 require_NoError(t, err) 7194 var ccResp JSApiConsumerCreateResponse 7195 if err = json.Unmarshal(resp.Data, &ccResp); err != nil { 7196 t.Fatalf("Unexpected error: %v", err) 7197 } 7198 if ccResp.Error != nil { 7199 t.Fatalf("Unexpected error: %+v", ccResp.Error) 7200 } 7201 7202 // Grab the low level consumer so we can manually set the fc max. 7203 if mset, err := s.GlobalAccount().lookupStream("TEST"); err != nil { 7204 t.Fatalf("Error looking up stream: %v", err) 7205 } else if obs := mset.lookupConsumer("dlc"); obs == nil { 7206 t.Fatalf("Error looking up stream: %v", err) 7207 } else { 7208 obs.mu.Lock() 7209 obs.setMaxPendingBytes(16 * 1024) 7210 obs.mu.Unlock() 7211 } 7212 7213 msgSize := 1024 7214 msg := make([]byte, msgSize) 7215 crand.Read(msg) 7216 7217 sendBatch := func(n int) { 7218 for i := 0; i < n; i++ { 7219 if _, err := js.Publish("TEST", msg); err != nil { 7220 t.Fatalf("Unexpected publish error: %v", err) 7221 } 7222 } 7223 } 7224 7225 checkSubPending := func(numExpected int) { 7226 t.Helper() 7227 checkFor(t, time.Second, 100*time.Millisecond, func() error { 7228 if nmsgs, _, err := sub.Pending(); err != nil || nmsgs != numExpected { 7229 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected) 7230 } 7231 return nil 7232 }) 7233 } 7234 7235 sendBatch(100) 7236 checkSubPending(2) // First four data and flowcontrol from slow start pause. 7237 7238 var n int 7239 for m, err := sub.NextMsg(time.Second); err == nil; m, err = sub.NextMsg(time.Second) { 7240 if m.Subject == "TEST" { 7241 n++ 7242 } else { 7243 // This should be a FC control message. 7244 if m.Header.Get("Status") != "100" { 7245 t.Fatalf("Expected a 100 status code, got %q", m.Header.Get("Status")) 7246 } 7247 if m.Header.Get("Description") != "FlowControl Request" { 7248 t.Fatalf("Wrong description, got %q", m.Header.Get("Description")) 7249 } 7250 m.Respond(nil) 7251 } 7252 } 7253 7254 if n != 100 { 7255 t.Fatalf("Expected to receive all 100 messages but got %d", n) 7256 } 7257 } 7258 7259 func TestJetStreamFlowControlRequiresHeartbeats(t *testing.T) { 7260 s := RunBasicJetStreamServer(t) 7261 defer s.Shutdown() 7262 7263 nc, js := jsClientConnect(t, s) 7264 defer nc.Close() 7265 7266 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST"}); err != nil { 7267 t.Fatalf("Unexpected error: %v", err) 7268 } 7269 7270 if _, err := js.AddConsumer("TEST", &nats.ConsumerConfig{ 7271 Durable: "dlc", 7272 DeliverSubject: nats.NewInbox(), 7273 FlowControl: true, 7274 }); err == nil || IsNatsErr(err, JSConsumerWithFlowControlNeedsHeartbeats) { 7275 t.Fatalf("Unexpected error: %v", err) 7276 } 7277 } 7278 7279 func TestJetStreamPushConsumerIdleHeartbeats(t *testing.T) { 7280 s := RunBasicJetStreamServer(t) 7281 defer s.Shutdown() 7282 7283 // Client for API requests. 7284 nc, js := jsClientConnect(t, s) 7285 defer nc.Close() 7286 7287 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST"}); err != nil { 7288 t.Fatalf("Unexpected error: %v", err) 7289 } 7290 7291 sub, err := nc.SubscribeSync(nats.NewInbox()) 7292 require_NoError(t, err) 7293 defer sub.Unsubscribe() 7294 7295 // Test errors first 7296 obsReq := CreateConsumerRequest{ 7297 Stream: "TEST", 7298 Config: ConsumerConfig{ 7299 DeliverSubject: sub.Subject, 7300 Heartbeat: time.Millisecond, 7301 }, 7302 } 7303 req, err := json.Marshal(obsReq) 7304 require_NoError(t, err) 7305 resp, err := nc.Request(fmt.Sprintf(JSApiConsumerCreateT, "TEST"), req, time.Second) 7306 require_NoError(t, err) 7307 var ccResp JSApiConsumerCreateResponse 7308 if err = json.Unmarshal(resp.Data, &ccResp); err != nil { 7309 t.Fatalf("Unexpected error: %v", err) 7310 } 7311 if ccResp.Error == nil { 7312 t.Fatalf("Expected an error, got none") 7313 } 7314 // Set acceptable heartbeat. 7315 obsReq.Config.Heartbeat = 100 * time.Millisecond 7316 req, err = json.Marshal(obsReq) 7317 require_NoError(t, err) 7318 resp, err = nc.Request(fmt.Sprintf(JSApiConsumerCreateT, "TEST"), req, time.Second) 7319 require_NoError(t, err) 7320 ccResp.Error = nil 7321 if err = json.Unmarshal(resp.Data, &ccResp); err != nil { 7322 t.Fatalf("Unexpected error: %v", err) 7323 } 7324 checkFor(t, time.Second, 20*time.Millisecond, func() error { 7325 if nmsgs, _, err := sub.Pending(); err != nil || nmsgs < 9 { 7326 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, 9) 7327 } 7328 return nil 7329 }) 7330 m, _ := sub.NextMsg(0) 7331 if m.Header.Get("Status") != "100" { 7332 t.Fatalf("Expected a 100 status code, got %q", m.Header.Get("Status")) 7333 } 7334 if m.Header.Get("Description") != "Idle Heartbeat" { 7335 t.Fatalf("Wrong description, got %q", m.Header.Get("Description")) 7336 } 7337 } 7338 7339 func TestJetStreamPushConsumerIdleHeartbeatsWithFilterSubject(t *testing.T) { 7340 s := RunBasicJetStreamServer(t) 7341 defer s.Shutdown() 7342 7343 // Client for API requests. 7344 nc, js := jsClientConnect(t, s) 7345 defer nc.Close() 7346 7347 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"foo", "bar"}}); err != nil { 7348 t.Fatalf("Unexpected error: %v", err) 7349 } 7350 7351 hbC := make(chan *nats.Msg, 8) 7352 sub, err := nc.ChanSubscribe(nats.NewInbox(), hbC) 7353 require_NoError(t, err) 7354 defer sub.Unsubscribe() 7355 7356 obsReq := CreateConsumerRequest{ 7357 Stream: "TEST", 7358 Config: ConsumerConfig{ 7359 DeliverSubject: sub.Subject, 7360 FilterSubject: "bar", 7361 Heartbeat: 100 * time.Millisecond, 7362 }, 7363 } 7364 7365 req, err := json.Marshal(obsReq) 7366 require_NoError(t, err) 7367 resp, err := nc.Request(fmt.Sprintf(JSApiConsumerCreateT, "TEST"), req, time.Second) 7368 require_NoError(t, err) 7369 var ccResp JSApiConsumerCreateResponse 7370 if err = json.Unmarshal(resp.Data, &ccResp); err != nil { 7371 t.Fatalf("Unexpected error: %v", err) 7372 } 7373 7374 st := time.NewTicker(10 * time.Millisecond) 7375 defer st.Stop() 7376 7377 done := time.NewTimer(time.Second) 7378 defer done.Stop() 7379 7380 for { 7381 select { 7382 case <-st.C: 7383 js.Publish("foo", []byte("HELLO FOO")) 7384 case <-done.C: 7385 t.Fatalf("Expected to have seen idle heartbeats for consumer") 7386 case <-hbC: 7387 return 7388 } 7389 } 7390 } 7391 7392 func TestJetStreamPushConsumerIdleHeartbeatsWithNoInterest(t *testing.T) { 7393 s := RunBasicJetStreamServer(t) 7394 defer s.Shutdown() 7395 7396 // Client for API requests. 7397 nc, js := jsClientConnect(t, s) 7398 defer nc.Close() 7399 7400 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST"}); err != nil { 7401 t.Fatalf("Unexpected error: %v", err) 7402 } 7403 7404 dsubj := "d.22" 7405 hbC := make(chan *nats.Msg, 8) 7406 sub, err := nc.ChanSubscribe("d.>", hbC) 7407 require_NoError(t, err) 7408 defer sub.Unsubscribe() 7409 7410 obsReq := CreateConsumerRequest{ 7411 Stream: "TEST", 7412 Config: ConsumerConfig{ 7413 DeliverSubject: dsubj, 7414 Heartbeat: 100 * time.Millisecond, 7415 }, 7416 } 7417 7418 req, err := json.Marshal(obsReq) 7419 require_NoError(t, err) 7420 resp, err := nc.Request(fmt.Sprintf(JSApiConsumerCreateT, "TEST"), req, time.Second) 7421 require_NoError(t, err) 7422 var ccResp JSApiConsumerCreateResponse 7423 if err = json.Unmarshal(resp.Data, &ccResp); err != nil { 7424 t.Fatalf("Unexpected error: %v", err) 7425 } 7426 if ccResp.Error != nil { 7427 t.Fatalf("Unexpected error: %+v", ccResp.Error) 7428 } 7429 7430 done := time.NewTimer(400 * time.Millisecond) 7431 defer done.Stop() 7432 7433 for { 7434 select { 7435 case <-done.C: 7436 return 7437 case m := <-hbC: 7438 if m.Header.Get("Status") == "100" { 7439 t.Fatalf("Did not expect to see a heartbeat with no formal interest") 7440 } 7441 } 7442 } 7443 } 7444 7445 func TestJetStreamInfoAPIWithHeaders(t *testing.T) { 7446 s := RunBasicJetStreamServer(t) 7447 defer s.Shutdown() 7448 7449 // Client for API requests. 7450 nc := clientConnectToServer(t, s) 7451 defer nc.Close() 7452 7453 m := nats.NewMsg(JSApiAccountInfo) 7454 m.Header.Add("Accept-Encoding", "json") 7455 m.Header.Add("Authorization", "s3cr3t") 7456 m.Data = []byte("HELLO-JS!") 7457 7458 resp, err := nc.RequestMsg(m, time.Second) 7459 require_NoError(t, err) 7460 7461 var info JSApiAccountInfoResponse 7462 if err := json.Unmarshal(resp.Data, &info); err != nil { 7463 t.Fatalf("Unexpected error: %v", err) 7464 } 7465 if info.Error != nil { 7466 t.Fatalf("Received an error: %+v", info.Error) 7467 } 7468 } 7469 7470 func TestJetStreamRequestAPI(t *testing.T) { 7471 s := RunBasicJetStreamServer(t) 7472 defer s.Shutdown() 7473 7474 // Client for API requests. 7475 nc := clientConnectToServer(t, s) 7476 defer nc.Close() 7477 7478 // This will get the current information about usage and limits for this account. 7479 resp, err := nc.Request(JSApiAccountInfo, nil, time.Second) 7480 require_NoError(t, err) 7481 var info JSApiAccountInfoResponse 7482 if err := json.Unmarshal(resp.Data, &info); err != nil { 7483 t.Fatalf("Unexpected error: %v", err) 7484 } 7485 7486 // Now create a stream. 7487 msetCfg := StreamConfig{ 7488 Name: "MSET22", 7489 Storage: FileStorage, 7490 Subjects: []string{"foo", "bar", "baz"}, 7491 MaxMsgs: 100, 7492 } 7493 req, err := json.Marshal(msetCfg) 7494 require_NoError(t, err) 7495 resp, _ = nc.Request(fmt.Sprintf(JSApiStreamCreateT, msetCfg.Name), req, time.Second) 7496 var scResp JSApiStreamCreateResponse 7497 if err := json.Unmarshal(resp.Data, &scResp); err != nil { 7498 t.Fatalf("Unexpected error: %v", err) 7499 } 7500 if scResp.StreamInfo == nil || scResp.Error != nil { 7501 t.Fatalf("Did not receive correct response: %+v", scResp.Error) 7502 } 7503 if time.Since(scResp.Created) > time.Second { 7504 t.Fatalf("Created time seems wrong: %v\n", scResp.Created) 7505 } 7506 7507 // Check that the name in config has to match the name in the subject 7508 resp, _ = nc.Request(fmt.Sprintf(JSApiStreamCreateT, "BOB"), req, time.Second) 7509 scResp.Error, scResp.StreamInfo = nil, nil 7510 if err := json.Unmarshal(resp.Data, &scResp); err != nil { 7511 t.Fatalf("Unexpected error: %v", err) 7512 } 7513 checkNatsError(t, scResp.Error, JSStreamMismatchErr) 7514 7515 // Check that update works. 7516 msetCfg.Subjects = []string{"foo", "bar", "baz"} 7517 msetCfg.MaxBytes = 2222222 7518 req, err = json.Marshal(msetCfg) 7519 require_NoError(t, err) 7520 resp, _ = nc.Request(fmt.Sprintf(JSApiStreamUpdateT, msetCfg.Name), req, time.Second) 7521 scResp.Error, scResp.StreamInfo = nil, nil 7522 if err := json.Unmarshal(resp.Data, &scResp); err != nil { 7523 t.Fatalf("Unexpected error: %v", err) 7524 } 7525 if scResp.StreamInfo == nil || scResp.Error != nil { 7526 t.Fatalf("Did not receive correct response: %+v", scResp.Error) 7527 } 7528 7529 // Check that updating a non existing stream fails 7530 cfg := StreamConfig{ 7531 Name: "UNKNOWN_STREAM", 7532 Storage: FileStorage, 7533 Subjects: []string{"foo"}, 7534 } 7535 req, err = json.Marshal(cfg) 7536 require_NoError(t, err) 7537 resp, _ = nc.Request(fmt.Sprintf(JSApiStreamUpdateT, cfg.Name), req, time.Second) 7538 scResp.Error, scResp.StreamInfo = nil, nil 7539 if err := json.Unmarshal(resp.Data, &scResp); err != nil { 7540 t.Fatalf("Unexpected error: %v", err) 7541 } 7542 if scResp.StreamInfo != nil || scResp.Error == nil || scResp.Error.Code != 404 { 7543 t.Fatalf("Unexpected error: %+v", scResp.Error) 7544 } 7545 7546 // Now lookup info again and see that we can see the new stream. 7547 resp, err = nc.Request(JSApiAccountInfo, nil, time.Second) 7548 require_NoError(t, err) 7549 if err = json.Unmarshal(resp.Data, &info); err != nil { 7550 t.Fatalf("Unexpected error: %v", err) 7551 } 7552 if info.Streams != 1 { 7553 t.Fatalf("Expected to see 1 Stream, got %d", info.Streams) 7554 } 7555 7556 // Make sure list names works. 7557 resp, err = nc.Request(JSApiStreams, nil, time.Second) 7558 require_NoError(t, err) 7559 var namesResponse JSApiStreamNamesResponse 7560 if err = json.Unmarshal(resp.Data, &namesResponse); err != nil { 7561 t.Fatalf("Unexpected error: %v", err) 7562 } 7563 7564 if len(namesResponse.Streams) != 1 { 7565 t.Fatalf("Expected only 1 stream but got %d", len(namesResponse.Streams)) 7566 } 7567 if namesResponse.Total != 1 { 7568 t.Fatalf("Expected total to be 1 but got %d", namesResponse.Total) 7569 } 7570 if namesResponse.Offset != 0 { 7571 t.Fatalf("Expected offset to be 0 but got %d", namesResponse.Offset) 7572 } 7573 if namesResponse.Limit != JSApiNamesLimit { 7574 t.Fatalf("Expected limit to be %d but got %d", JSApiNamesLimit, namesResponse.Limit) 7575 } 7576 if namesResponse.Streams[0] != msetCfg.Name { 7577 t.Fatalf("Expected to get %q, but got %q", msetCfg.Name, namesResponse.Streams[0]) 7578 } 7579 7580 // Now do detailed version. 7581 resp, err = nc.Request(JSApiStreamList, nil, time.Second) 7582 require_NoError(t, err) 7583 var listResponse JSApiStreamListResponse 7584 if err = json.Unmarshal(resp.Data, &listResponse); err != nil { 7585 t.Fatalf("Unexpected error: %v", err) 7586 } 7587 7588 if len(listResponse.Streams) != 1 { 7589 t.Fatalf("Expected only 1 stream but got %d", len(listResponse.Streams)) 7590 } 7591 if listResponse.Total != 1 { 7592 t.Fatalf("Expected total to be 1 but got %d", listResponse.Total) 7593 } 7594 if listResponse.Offset != 0 { 7595 t.Fatalf("Expected offset to be 0 but got %d", listResponse.Offset) 7596 } 7597 if listResponse.Limit != JSApiListLimit { 7598 t.Fatalf("Expected limit to be %d but got %d", JSApiListLimit, listResponse.Limit) 7599 } 7600 if listResponse.Streams[0].Config.Name != msetCfg.Name { 7601 t.Fatalf("Expected to get %q, but got %q", msetCfg.Name, listResponse.Streams[0].Config.Name) 7602 } 7603 7604 // Now send some messages, then we can poll for info on this stream. 7605 toSend := 10 7606 for i := 0; i < toSend; i++ { 7607 nc.Request("foo", []byte("WELCOME JETSTREAM"), time.Second) 7608 } 7609 7610 resp, err = nc.Request(fmt.Sprintf(JSApiStreamInfoT, msetCfg.Name), nil, time.Second) 7611 require_NoError(t, err) 7612 var msi StreamInfo 7613 if err = json.Unmarshal(resp.Data, &msi); err != nil { 7614 t.Fatalf("Unexpected error: %v", err) 7615 } 7616 if msi.State.Msgs != uint64(toSend) { 7617 t.Fatalf("Expected to get %d msgs, got %d", toSend, msi.State.Msgs) 7618 } 7619 if time.Since(msi.Created) > time.Second { 7620 t.Fatalf("Created time seems wrong: %v\n", msi.Created) 7621 } 7622 7623 // Looking up one that is not there should yield an error. 7624 resp, err = nc.Request(fmt.Sprintf(JSApiStreamInfoT, "BOB"), nil, time.Second) 7625 require_NoError(t, err) 7626 var bResp JSApiStreamInfoResponse 7627 if err = json.Unmarshal(resp.Data, &bResp); err != nil { 7628 t.Fatalf("Unexpected error: %v", err) 7629 } 7630 checkNatsError(t, bResp.Error, JSStreamNotFoundErr) 7631 7632 // Now create a consumer. 7633 delivery := nats.NewInbox() 7634 obsReq := CreateConsumerRequest{ 7635 Stream: msetCfg.Name, 7636 Config: ConsumerConfig{DeliverSubject: delivery}, 7637 } 7638 req, err = json.Marshal(obsReq) 7639 require_NoError(t, err) 7640 resp, err = nc.Request(fmt.Sprintf(JSApiConsumerCreateT, msetCfg.Name), req, time.Second) 7641 require_NoError(t, err) 7642 var ccResp JSApiConsumerCreateResponse 7643 if err = json.Unmarshal(resp.Data, &ccResp); err != nil { 7644 t.Fatalf("Unexpected error: %v", err) 7645 } 7646 // Ephemerals are now not rejected when there is no interest. 7647 if ccResp.ConsumerInfo == nil || ccResp.Error != nil { 7648 t.Fatalf("Got a bad response %+v", ccResp) 7649 } 7650 if time.Since(ccResp.Created) > time.Second { 7651 t.Fatalf("Created time seems wrong: %v\n", ccResp.Created) 7652 } 7653 7654 // Now create subscription and make sure we get proper response. 7655 sub, _ := nc.SubscribeSync(delivery) 7656 nc.Flush() 7657 7658 checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error { 7659 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != toSend { 7660 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, toSend) 7661 } 7662 return nil 7663 }) 7664 7665 // Check that we get an error if the stream name in the subject does not match the config. 7666 resp, err = nc.Request(fmt.Sprintf(JSApiConsumerCreateT, "BOB"), req, time.Second) 7667 require_NoError(t, err) 7668 ccResp.Error, ccResp.ConsumerInfo = nil, nil 7669 if err = json.Unmarshal(resp.Data, &ccResp); err != nil { 7670 t.Fatalf("Unexpected error: %v", err) 7671 } 7672 // Since we do not have interest this should have failed. 7673 checkNatsError(t, ccResp.Error, JSStreamMismatchErr) 7674 7675 // Get the list of all of the consumers for our stream. 7676 resp, err = nc.Request(fmt.Sprintf(JSApiConsumersT, msetCfg.Name), nil, time.Second) 7677 require_NoError(t, err) 7678 var clResponse JSApiConsumerNamesResponse 7679 if err = json.Unmarshal(resp.Data, &clResponse); err != nil { 7680 t.Fatalf("Unexpected error: %v", err) 7681 } 7682 if len(clResponse.Consumers) != 1 { 7683 t.Fatalf("Expected only 1 consumer but got %d", len(clResponse.Consumers)) 7684 } 7685 // Now let's get info about our consumer. 7686 cName := clResponse.Consumers[0] 7687 resp, err = nc.Request(fmt.Sprintf(JSApiConsumerInfoT, msetCfg.Name, cName), nil, time.Second) 7688 require_NoError(t, err) 7689 var oinfo ConsumerInfo 7690 if err = json.Unmarshal(resp.Data, &oinfo); err != nil { 7691 t.Fatalf("Unexpected error: %v", err) 7692 } 7693 // Do some sanity checking. 7694 // Must match consumer.go 7695 const randConsumerNameLen = 8 7696 if len(oinfo.Name) != randConsumerNameLen { 7697 t.Fatalf("Expected ephemeral name, got %q", oinfo.Name) 7698 } 7699 if len(oinfo.Config.Durable) != 0 { 7700 t.Fatalf("Expected no durable name, but got %q", oinfo.Config.Durable) 7701 } 7702 if oinfo.Config.DeliverSubject != delivery { 7703 t.Fatalf("Expected to have delivery subject of %q, got %q", delivery, oinfo.Config.DeliverSubject) 7704 } 7705 if oinfo.Delivered.Consumer != 10 { 7706 t.Fatalf("Expected consumer delivered sequence of 10, got %d", oinfo.Delivered.Consumer) 7707 } 7708 if oinfo.AckFloor.Consumer != 10 { 7709 t.Fatalf("Expected ack floor to be 10, got %d", oinfo.AckFloor.Consumer) 7710 } 7711 7712 // Now delete the consumer. 7713 resp, _ = nc.Request(fmt.Sprintf(JSApiConsumerDeleteT, msetCfg.Name, cName), nil, time.Second) 7714 var cdResp JSApiConsumerDeleteResponse 7715 if err = json.Unmarshal(resp.Data, &cdResp); err != nil { 7716 t.Fatalf("Unexpected error: %v", err) 7717 } 7718 if !cdResp.Success || cdResp.Error != nil { 7719 t.Fatalf("Got a bad response %+v", ccResp) 7720 } 7721 7722 // Make sure we can't create a durable using the ephemeral API endpoint. 7723 obsReq = CreateConsumerRequest{ 7724 Stream: msetCfg.Name, 7725 Config: ConsumerConfig{Durable: "myd", DeliverSubject: delivery}, 7726 } 7727 req, err = json.Marshal(obsReq) 7728 require_NoError(t, err) 7729 resp, err = nc.Request(fmt.Sprintf(JSApiConsumerCreateT, msetCfg.Name), req, time.Second) 7730 require_NoError(t, err) 7731 ccResp.Error, ccResp.ConsumerInfo = nil, nil 7732 if err = json.Unmarshal(resp.Data, &ccResp); err != nil { 7733 t.Fatalf("Unexpected error: %v", err) 7734 } 7735 checkNatsError(t, ccResp.Error, JSConsumerEphemeralWithDurableNameErr) 7736 7737 // Now make sure we can create a durable on the subject with the proper name. 7738 resp, err = nc.Request(fmt.Sprintf(JSApiDurableCreateT, msetCfg.Name, obsReq.Config.Durable), req, time.Second) 7739 ccResp.Error, ccResp.ConsumerInfo = nil, nil 7740 if err = json.Unmarshal(resp.Data, &ccResp); err != nil { 7741 t.Fatalf("Unexpected error: %v", err) 7742 } 7743 if ccResp.ConsumerInfo == nil || ccResp.Error != nil { 7744 t.Fatalf("Did not receive correct response") 7745 } 7746 7747 // Make sure empty durable in cfg does not work 7748 obsReq2 := CreateConsumerRequest{ 7749 Stream: msetCfg.Name, 7750 Config: ConsumerConfig{DeliverSubject: delivery}, 7751 } 7752 req2, err := json.Marshal(obsReq2) 7753 require_NoError(t, err) 7754 resp, err = nc.Request(fmt.Sprintf(JSApiDurableCreateT, msetCfg.Name, obsReq.Config.Durable), req2, time.Second) 7755 require_NoError(t, err) 7756 ccResp.Error, ccResp.ConsumerInfo = nil, nil 7757 if err = json.Unmarshal(resp.Data, &ccResp); err != nil { 7758 t.Fatalf("Unexpected error: %v", err) 7759 } 7760 checkNatsError(t, ccResp.Error, JSConsumerDurableNameNotSetErr) 7761 7762 // Now delete a msg. 7763 dreq := JSApiMsgDeleteRequest{Seq: 2} 7764 dreqj, err := json.Marshal(dreq) 7765 require_NoError(t, err) 7766 resp, _ = nc.Request(fmt.Sprintf(JSApiMsgDeleteT, msetCfg.Name), dreqj, time.Second) 7767 var delMsgResp JSApiMsgDeleteResponse 7768 if err = json.Unmarshal(resp.Data, &delMsgResp); err != nil { 7769 t.Fatalf("Unexpected error: %v", err) 7770 } 7771 if !delMsgResp.Success || delMsgResp.Error != nil { 7772 t.Fatalf("Got a bad response %+v", delMsgResp.Error) 7773 } 7774 7775 // Now purge the stream. 7776 resp, _ = nc.Request(fmt.Sprintf(JSApiStreamPurgeT, msetCfg.Name), nil, time.Second) 7777 var pResp JSApiStreamPurgeResponse 7778 if err = json.Unmarshal(resp.Data, &pResp); err != nil { 7779 t.Fatalf("Unexpected error: %v", err) 7780 } 7781 if !pResp.Success || pResp.Error != nil { 7782 t.Fatalf("Got a bad response %+v", pResp) 7783 } 7784 if pResp.Purged != 9 { 7785 t.Fatalf("Expected 9 purged, got %d", pResp.Purged) 7786 } 7787 7788 // Now delete the stream. 7789 resp, _ = nc.Request(fmt.Sprintf(JSApiStreamDeleteT, msetCfg.Name), nil, time.Second) 7790 var dResp JSApiStreamDeleteResponse 7791 if err = json.Unmarshal(resp.Data, &dResp); err != nil { 7792 t.Fatalf("Unexpected error: %v", err) 7793 } 7794 if !dResp.Success || dResp.Error != nil { 7795 t.Fatalf("Got a bad response %+v", dResp.Error) 7796 } 7797 7798 // Now grab stats again. 7799 // This will get the current information about usage and limits for this account. 7800 resp, err = nc.Request(JSApiAccountInfo, nil, time.Second) 7801 require_NoError(t, err) 7802 if err := json.Unmarshal(resp.Data, &info); err != nil { 7803 t.Fatalf("Unexpected error: %v", err) 7804 } 7805 if info.Streams != 0 { 7806 t.Fatalf("Expected no remaining streams, got %d", info.Streams) 7807 } 7808 7809 // Now do templates. 7810 mcfg := &StreamConfig{ 7811 Subjects: []string{"kv.*"}, 7812 Retention: LimitsPolicy, 7813 MaxAge: time.Hour, 7814 MaxMsgs: 4, 7815 Storage: MemoryStorage, 7816 Replicas: 1, 7817 } 7818 template := &StreamTemplateConfig{ 7819 Name: "kv", 7820 Config: mcfg, 7821 MaxStreams: 4, 7822 } 7823 req, err = json.Marshal(template) 7824 require_NoError(t, err) 7825 7826 // Check that the name in config has to match the name in the subject 7827 resp, _ = nc.Request(fmt.Sprintf(JSApiTemplateCreateT, "BOB"), req, time.Second) 7828 var stResp JSApiStreamTemplateCreateResponse 7829 if err = json.Unmarshal(resp.Data, &stResp); err != nil { 7830 t.Fatalf("Unexpected error: %v", err) 7831 } 7832 checkNatsError(t, stResp.Error, JSTemplateNameNotMatchSubjectErr) 7833 7834 resp, _ = nc.Request(fmt.Sprintf(JSApiTemplateCreateT, template.Name), req, time.Second) 7835 stResp.Error, stResp.StreamTemplateInfo = nil, nil 7836 if err = json.Unmarshal(resp.Data, &stResp); err != nil { 7837 t.Fatalf("Unexpected error: %v", err) 7838 } 7839 if stResp.StreamTemplateInfo == nil || stResp.Error != nil { 7840 t.Fatalf("Did not receive correct response") 7841 } 7842 7843 // Create a second one. 7844 template.Name = "ss" 7845 template.Config.Subjects = []string{"foo", "bar"} 7846 7847 req, err = json.Marshal(template) 7848 if err != nil { 7849 t.Fatalf("Unexpected error: %s", err) 7850 } 7851 7852 resp, _ = nc.Request(fmt.Sprintf(JSApiTemplateCreateT, template.Name), req, time.Second) 7853 stResp.Error, stResp.StreamTemplateInfo = nil, nil 7854 if err = json.Unmarshal(resp.Data, &stResp); err != nil { 7855 t.Fatalf("Unexpected error: %v", err) 7856 } 7857 if stResp.StreamTemplateInfo == nil || stResp.Error != nil { 7858 t.Fatalf("Did not receive correct response") 7859 } 7860 7861 // Now grab the list of templates 7862 var tListResp JSApiStreamTemplateNamesResponse 7863 resp, err = nc.Request(JSApiTemplates, nil, time.Second) 7864 if err = json.Unmarshal(resp.Data, &tListResp); err != nil { 7865 t.Fatalf("Unexpected error: %v", err) 7866 } 7867 if len(tListResp.Templates) != 2 { 7868 t.Fatalf("Expected 2 templates but got %d", len(tListResp.Templates)) 7869 } 7870 sort.Strings(tListResp.Templates) 7871 if tListResp.Templates[0] != "kv" { 7872 t.Fatalf("Expected to get %q, but got %q", "kv", tListResp.Templates[0]) 7873 } 7874 if tListResp.Templates[1] != "ss" { 7875 t.Fatalf("Expected to get %q, but got %q", "ss", tListResp.Templates[1]) 7876 } 7877 7878 // Now delete one. 7879 // Test bad name. 7880 resp, _ = nc.Request(fmt.Sprintf(JSApiTemplateDeleteT, "bob"), nil, time.Second) 7881 var tDeleteResp JSApiStreamTemplateDeleteResponse 7882 if err = json.Unmarshal(resp.Data, &tDeleteResp); err != nil { 7883 t.Fatalf("Unexpected error: %v", err) 7884 } 7885 checkNatsError(t, tDeleteResp.Error, JSStreamTemplateNotFoundErr) 7886 7887 resp, _ = nc.Request(fmt.Sprintf(JSApiTemplateDeleteT, "ss"), nil, time.Second) 7888 tDeleteResp.Error = nil 7889 if err = json.Unmarshal(resp.Data, &tDeleteResp); err != nil { 7890 t.Fatalf("Unexpected error: %v", err) 7891 } 7892 if !tDeleteResp.Success || tDeleteResp.Error != nil { 7893 t.Fatalf("Did not receive correct response: %+v", tDeleteResp.Error) 7894 } 7895 7896 resp, err = nc.Request(JSApiTemplates, nil, time.Second) 7897 tListResp.Error, tListResp.Templates = nil, nil 7898 if err = json.Unmarshal(resp.Data, &tListResp); err != nil { 7899 t.Fatalf("Unexpected error: %v", err) 7900 } 7901 if len(tListResp.Templates) != 1 { 7902 t.Fatalf("Expected 1 template but got %d", len(tListResp.Templates)) 7903 } 7904 if tListResp.Templates[0] != "kv" { 7905 t.Fatalf("Expected to get %q, but got %q", "kv", tListResp.Templates[0]) 7906 } 7907 7908 // First create a stream from the template 7909 sendStreamMsg(t, nc, "kv.22", "derek") 7910 // Last do info 7911 resp, err = nc.Request(fmt.Sprintf(JSApiTemplateInfoT, "kv"), nil, time.Second) 7912 require_NoError(t, err) 7913 var ti StreamTemplateInfo 7914 if err = json.Unmarshal(resp.Data, &ti); err != nil { 7915 t.Fatalf("Unexpected error: %v", err) 7916 } 7917 if len(ti.Streams) != 1 { 7918 t.Fatalf("Expected 1 stream, got %d", len(ti.Streams)) 7919 } 7920 if ti.Streams[0] != canonicalName("kv.22") { 7921 t.Fatalf("Expected stream with name %q, but got %q", canonicalName("kv.22"), ti.Streams[0]) 7922 } 7923 7924 // Test that we can send nil or an empty legal json for requests that take no args. 7925 // We know this stream does not exist, this just checking request processing. 7926 checkEmptyReqArg := func(arg string) { 7927 t.Helper() 7928 var req []byte 7929 if len(arg) > 0 { 7930 req = []byte(arg) 7931 } 7932 resp, err = nc.Request(fmt.Sprintf(JSApiStreamDeleteT, "foo_bar_baz"), req, time.Second) 7933 var dResp JSApiStreamDeleteResponse 7934 if err = json.Unmarshal(resp.Data, &dResp); err != nil { 7935 t.Fatalf("Unexpected error: %v", err) 7936 } 7937 if dResp.Error == nil || dResp.Error.Code != 404 { 7938 t.Fatalf("Got a bad response, expected a 404 response %+v", dResp.Error) 7939 } 7940 } 7941 7942 checkEmptyReqArg("") 7943 checkEmptyReqArg("{}") 7944 checkEmptyReqArg(" {} ") 7945 checkEmptyReqArg(" { } ") 7946 } 7947 7948 func TestJetStreamFilteredStreamNames(t *testing.T) { 7949 s := RunBasicJetStreamServer(t) 7950 defer s.Shutdown() 7951 7952 // Client for API requests. 7953 nc := clientConnectToServer(t, s) 7954 defer nc.Close() 7955 7956 // Create some streams. 7957 var snid int 7958 createStream := func(subjects []string) { 7959 t.Helper() 7960 snid++ 7961 name := fmt.Sprintf("S-%d", snid) 7962 sc := &StreamConfig{Name: name, Subjects: subjects} 7963 if _, err := s.GlobalAccount().addStream(sc); err != nil { 7964 t.Fatalf("Unexpected error adding stream: %v", err) 7965 } 7966 } 7967 7968 createStream([]string{"foo"}) // S1 7969 createStream([]string{"bar"}) // S2 7970 createStream([]string{"baz"}) // S3 7971 createStream([]string{"foo.*", "bar.*"}) // S4 7972 createStream([]string{"foo-1.22", "bar-1.33"}) // S5 7973 7974 expectStreams := func(filter string, streams []string) { 7975 t.Helper() 7976 req, _ := json.Marshal(&JSApiStreamNamesRequest{Subject: filter}) 7977 r, _ := nc.Request(JSApiStreams, req, time.Second) 7978 var resp JSApiStreamNamesResponse 7979 if err := json.Unmarshal(r.Data, &resp); err != nil { 7980 t.Fatalf("Unexpected error: %v", err) 7981 } 7982 if len(resp.Streams) != len(streams) { 7983 t.Fatalf("Expected %d results, got %d", len(streams), len(resp.Streams)) 7984 } 7985 } 7986 7987 expectStreams("foo", []string{"S1"}) 7988 expectStreams("bar", []string{"S2"}) 7989 expectStreams("baz", []string{"S3"}) 7990 expectStreams("*", []string{"S1", "S2", "S3"}) 7991 expectStreams(">", []string{"S1", "S2", "S3", "S4", "S5"}) 7992 expectStreams("*.*", []string{"S4", "S5"}) 7993 expectStreams("*.22", []string{"S4", "S5"}) 7994 } 7995 7996 func TestJetStreamUpdateStream(t *testing.T) { 7997 cases := []struct { 7998 name string 7999 mconfig *StreamConfig 8000 }{ 8001 {name: "MemoryStore", 8002 mconfig: &StreamConfig{ 8003 Name: "foo", 8004 Retention: LimitsPolicy, 8005 MaxAge: time.Hour, 8006 Storage: MemoryStorage, 8007 Replicas: 1, 8008 }}, 8009 {name: "FileStore", 8010 mconfig: &StreamConfig{ 8011 Name: "foo", 8012 Retention: LimitsPolicy, 8013 MaxAge: time.Hour, 8014 Storage: FileStorage, 8015 Replicas: 1, 8016 }}, 8017 } 8018 for _, c := range cases { 8019 t.Run(c.name, func(t *testing.T) { 8020 s := RunBasicJetStreamServer(t) 8021 defer s.Shutdown() 8022 8023 mset, err := s.GlobalAccount().addStream(c.mconfig) 8024 if err != nil { 8025 t.Fatalf("Unexpected error adding stream: %v", err) 8026 } 8027 defer mset.delete() 8028 8029 // Test basic updates. We allow changing the subjects, limits, and no_ack along with replicas(TBD w/ cluster) 8030 cfg := *c.mconfig 8031 8032 // Can't change name. 8033 cfg.Name = "bar" 8034 if err := mset.update(&cfg); err == nil || !strings.Contains(err.Error(), "name must match") { 8035 t.Fatalf("Expected error trying to update name") 8036 } 8037 // Can't change max consumers for now. 8038 cfg = *c.mconfig 8039 cfg.MaxConsumers = 10 8040 if err := mset.update(&cfg); err == nil || !strings.Contains(err.Error(), "can not change") { 8041 t.Fatalf("Expected error trying to change MaxConsumers") 8042 } 8043 // Can't change storage types. 8044 cfg = *c.mconfig 8045 if cfg.Storage == FileStorage { 8046 cfg.Storage = MemoryStorage 8047 } else { 8048 cfg.Storage = FileStorage 8049 } 8050 if err := mset.update(&cfg); err == nil || !strings.Contains(err.Error(), "can not change") { 8051 t.Fatalf("Expected error trying to change Storage") 8052 } 8053 // Can't change replicas > 1 for now. 8054 cfg = *c.mconfig 8055 cfg.Replicas = 10 8056 if err := mset.update(&cfg); err == nil || !strings.Contains(err.Error(), "maximum replicas") { 8057 t.Fatalf("Expected error trying to change Replicas") 8058 } 8059 // Can't have a template set for now. 8060 cfg = *c.mconfig 8061 cfg.Template = "baz" 8062 if err := mset.update(&cfg); err == nil || !strings.Contains(err.Error(), "template") { 8063 t.Fatalf("Expected error trying to change Template owner") 8064 } 8065 // Can't change limits policy. 8066 cfg = *c.mconfig 8067 cfg.Retention = WorkQueuePolicy 8068 if err := mset.update(&cfg); err == nil || !strings.Contains(err.Error(), "can not change") { 8069 t.Fatalf("Expected error trying to change Retention") 8070 } 8071 8072 // Now test changing limits. 8073 nc := clientConnectToServer(t, s) 8074 defer nc.Close() 8075 8076 pending := uint64(100) 8077 for i := uint64(0); i < pending; i++ { 8078 sendStreamMsg(t, nc, "foo", "0123456789") 8079 } 8080 pendingBytes := mset.state().Bytes 8081 8082 checkPending := func(msgs, bts uint64) { 8083 t.Helper() 8084 state := mset.state() 8085 if state.Msgs != msgs { 8086 t.Fatalf("Expected %d messages, got %d", msgs, state.Msgs) 8087 } 8088 if state.Bytes != bts { 8089 t.Fatalf("Expected %d bytes, got %d", bts, state.Bytes) 8090 } 8091 } 8092 checkPending(pending, pendingBytes) 8093 8094 // Update msgs to higher. 8095 cfg = *c.mconfig 8096 cfg.MaxMsgs = int64(pending * 2) 8097 if err := mset.update(&cfg); err != nil { 8098 t.Fatalf("Unexpected error %v", err) 8099 } 8100 if mset.config().MaxMsgs != cfg.MaxMsgs { 8101 t.Fatalf("Expected the change to take effect, %d vs %d", mset.config().MaxMsgs, cfg.MaxMsgs) 8102 } 8103 checkPending(pending, pendingBytes) 8104 8105 // Update msgs to lower. 8106 cfg = *c.mconfig 8107 cfg.MaxMsgs = int64(pending / 2) 8108 if err := mset.update(&cfg); err != nil { 8109 t.Fatalf("Unexpected error %v", err) 8110 } 8111 if mset.config().MaxMsgs != cfg.MaxMsgs { 8112 t.Fatalf("Expected the change to take effect, %d vs %d", mset.config().MaxMsgs, cfg.MaxMsgs) 8113 } 8114 checkPending(pending/2, pendingBytes/2) 8115 // Now do bytes. 8116 cfg = *c.mconfig 8117 cfg.MaxBytes = int64(pendingBytes / 4) 8118 if err := mset.update(&cfg); err != nil { 8119 t.Fatalf("Unexpected error %v", err) 8120 } 8121 if mset.config().MaxBytes != cfg.MaxBytes { 8122 t.Fatalf("Expected the change to take effect, %d vs %d", mset.config().MaxBytes, cfg.MaxBytes) 8123 } 8124 checkPending(pending/4, pendingBytes/4) 8125 8126 // Now do age. 8127 cfg = *c.mconfig 8128 cfg.MaxAge = time.Second 8129 if err := mset.update(&cfg); err != nil { 8130 t.Fatalf("Unexpected error %v", err) 8131 } 8132 // Just wait a bit for expiration. 8133 time.Sleep(2 * time.Second) 8134 if mset.config().MaxAge != cfg.MaxAge { 8135 t.Fatalf("Expected the change to take effect, %d vs %d", mset.config().MaxAge, cfg.MaxAge) 8136 } 8137 checkPending(0, 0) 8138 8139 // Now put back to original. 8140 cfg = *c.mconfig 8141 if err := mset.update(&cfg); err != nil { 8142 t.Fatalf("Unexpected error %v", err) 8143 } 8144 for i := uint64(0); i < pending; i++ { 8145 sendStreamMsg(t, nc, "foo", "0123456789") 8146 } 8147 8148 // subject changes. 8149 // Add in a subject first. 8150 cfg = *c.mconfig 8151 cfg.Subjects = []string{"foo", "bar"} 8152 if err := mset.update(&cfg); err != nil { 8153 t.Fatalf("Unexpected error %v", err) 8154 } 8155 // Make sure we can still send to foo. 8156 sendStreamMsg(t, nc, "foo", "0123456789") 8157 // And we can now send to bar. 8158 sendStreamMsg(t, nc, "bar", "0123456789") 8159 // Now delete both and change to baz only. 8160 cfg.Subjects = []string{"baz"} 8161 if err := mset.update(&cfg); err != nil { 8162 t.Fatalf("Unexpected error %v", err) 8163 } 8164 // Make sure we do not get response acks for "foo" or "bar". 8165 if resp, err := nc.Request("foo", nil, 25*time.Millisecond); err == nil || resp != nil { 8166 t.Fatalf("Expected no response from jetstream for deleted subject: %q", "foo") 8167 } 8168 if resp, err := nc.Request("bar", nil, 25*time.Millisecond); err == nil || resp != nil { 8169 t.Fatalf("Expected no response from jetstream for deleted subject: %q", "bar") 8170 } 8171 // Make sure we can send to "baz" 8172 sendStreamMsg(t, nc, "baz", "0123456789") 8173 if nmsgs := mset.state().Msgs; nmsgs != pending+3 { 8174 t.Fatalf("Expected %d msgs, got %d", pending+3, nmsgs) 8175 } 8176 8177 // FileStore restarts for config save. 8178 cfg = *c.mconfig 8179 if cfg.Storage == FileStorage { 8180 cfg.Subjects = []string{"foo", "bar"} 8181 cfg.MaxMsgs = 2222 8182 cfg.MaxBytes = 3333333 8183 cfg.MaxAge = 22 * time.Hour 8184 if err := mset.update(&cfg); err != nil { 8185 t.Fatalf("Unexpected error %v", err) 8186 } 8187 // Pull since certain defaults etc are set in processing. 8188 cfg = mset.config() 8189 8190 // Restart the 8191 // Capture port since it was dynamic. 8192 u, _ := url.Parse(s.ClientURL()) 8193 port, _ := strconv.Atoi(u.Port()) 8194 8195 // Stop current 8196 sd := s.JetStreamConfig().StoreDir 8197 s.Shutdown() 8198 // Restart. 8199 s = RunJetStreamServerOnPort(port, sd) 8200 defer s.Shutdown() 8201 8202 mset, err = s.GlobalAccount().lookupStream(cfg.Name) 8203 if err != nil { 8204 t.Fatalf("Expected to find a stream for %q", cfg.Name) 8205 } 8206 restored_cfg := mset.config() 8207 if !reflect.DeepEqual(cfg, restored_cfg) { 8208 t.Fatalf("restored configuration does not match: \n%+v\n vs \n%+v", restored_cfg, cfg) 8209 } 8210 } 8211 }) 8212 } 8213 } 8214 8215 func TestJetStreamDeleteMsg(t *testing.T) { 8216 cases := []struct { 8217 name string 8218 mconfig *StreamConfig 8219 }{ 8220 {name: "MemoryStore", 8221 mconfig: &StreamConfig{ 8222 Name: "foo", 8223 Retention: LimitsPolicy, 8224 MaxAge: time.Hour, 8225 Storage: MemoryStorage, 8226 Replicas: 1, 8227 }}, 8228 {name: "FileStore", 8229 mconfig: &StreamConfig{ 8230 Name: "foo", 8231 Retention: LimitsPolicy, 8232 MaxAge: time.Hour, 8233 Storage: FileStorage, 8234 Replicas: 1, 8235 }}, 8236 } 8237 8238 for _, c := range cases { 8239 t.Run(c.name, func(t *testing.T) { 8240 8241 s := RunBasicJetStreamServer(t) 8242 defer s.Shutdown() 8243 8244 mset, err := s.GlobalAccount().addStream(c.mconfig) 8245 if err != nil { 8246 t.Fatalf("Unexpected error adding stream: %v", err) 8247 } 8248 8249 nc, js := jsClientConnect(t, s) 8250 defer nc.Close() 8251 8252 pubTen := func() { 8253 t.Helper() 8254 for i := 0; i < 10; i++ { 8255 js.Publish("foo", []byte("Hello World!")) 8256 } 8257 } 8258 8259 pubTen() 8260 8261 state := mset.state() 8262 if state.Msgs != 10 { 8263 t.Fatalf("Expected 10 messages, got %d", state.Msgs) 8264 } 8265 bytesPerMsg := state.Bytes / 10 8266 if bytesPerMsg == 0 { 8267 t.Fatalf("Expected non-zero bytes for msg size") 8268 } 8269 8270 deleteAndCheck := func(seq, expectedFirstSeq uint64) { 8271 t.Helper() 8272 beforeState := mset.state() 8273 if removed, _ := mset.deleteMsg(seq); !removed { 8274 t.Fatalf("Expected the delete of sequence %d to succeed", seq) 8275 } 8276 expectedState := beforeState 8277 expectedState.Msgs-- 8278 expectedState.Bytes -= bytesPerMsg 8279 expectedState.FirstSeq = expectedFirstSeq 8280 8281 sm, err := mset.getMsg(expectedFirstSeq) 8282 if err != nil { 8283 t.Fatalf("Error fetching message for seq: %d - %v", expectedFirstSeq, err) 8284 } 8285 expectedState.FirstTime = sm.Time 8286 expectedState.Deleted = nil 8287 expectedState.NumDeleted = 0 8288 8289 afterState := mset.state() 8290 afterState.Deleted = nil 8291 afterState.NumDeleted = 0 8292 8293 // Ignore first time in this test. 8294 if !reflect.DeepEqual(afterState, expectedState) { 8295 t.Fatalf("Stats not what we expected. Expected %+v, got %+v\n", expectedState, afterState) 8296 } 8297 } 8298 8299 // Delete one from the middle 8300 deleteAndCheck(5, 1) 8301 // Now make sure sequences are updated properly. 8302 // Delete first msg. 8303 deleteAndCheck(1, 2) 8304 // Now last 8305 deleteAndCheck(10, 2) 8306 // Now gaps. 8307 deleteAndCheck(3, 2) 8308 deleteAndCheck(2, 4) 8309 8310 mset.purge(nil) 8311 // Put ten more one. 8312 pubTen() 8313 deleteAndCheck(11, 12) 8314 deleteAndCheck(15, 12) 8315 deleteAndCheck(16, 12) 8316 deleteAndCheck(20, 12) 8317 8318 // Only file storage beyond here. 8319 if c.mconfig.Storage == MemoryStorage { 8320 return 8321 } 8322 8323 // Capture port since it was dynamic. 8324 u, _ := url.Parse(s.ClientURL()) 8325 port, _ := strconv.Atoi(u.Port()) 8326 sd := s.JetStreamConfig().StoreDir 8327 8328 // Shutdown the 8329 s.Shutdown() 8330 8331 s = RunJetStreamServerOnPort(port, sd) 8332 defer s.Shutdown() 8333 8334 mset, err = s.GlobalAccount().lookupStream("foo") 8335 if err != nil { 8336 t.Fatalf("Expected to get the stream back") 8337 } 8338 8339 expected := StreamState{Msgs: 6, Bytes: 6 * bytesPerMsg, FirstSeq: 12, LastSeq: 20, NumSubjects: 1} 8340 state = mset.state() 8341 state.FirstTime, state.LastTime, state.Deleted, state.NumDeleted = time.Time{}, time.Time{}, nil, 0 8342 8343 if !reflect.DeepEqual(expected, state) { 8344 t.Fatalf("State not what we expected. Expected %+v, got %+v\n", expected, state) 8345 } 8346 8347 // Now create an consumer and make sure we get the right sequence. 8348 nc = clientConnectToServer(t, s) 8349 defer nc.Close() 8350 8351 delivery := nats.NewInbox() 8352 sub, _ := nc.SubscribeSync(delivery) 8353 nc.Flush() 8354 8355 o, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: delivery, FilterSubject: "foo"}) 8356 if err != nil { 8357 t.Fatalf("Unexpected error: %v", err) 8358 } 8359 8360 expectedStoreSeq := []uint64{12, 13, 14, 17, 18, 19} 8361 8362 for i := 0; i < 6; i++ { 8363 m, err := sub.NextMsg(time.Second) 8364 if err != nil { 8365 t.Fatalf("Unexpected error: %v", err) 8366 } 8367 if o.streamSeqFromReply(m.Reply) != expectedStoreSeq[i] { 8368 t.Fatalf("Expected store seq of %d, got %d", expectedStoreSeq[i], o.streamSeqFromReply(m.Reply)) 8369 } 8370 } 8371 }) 8372 } 8373 } 8374 8375 // https://github.com/nats-io/jetstream/issues/396 8376 func TestJetStreamLimitLockBug(t *testing.T) { 8377 cases := []struct { 8378 name string 8379 mconfig *StreamConfig 8380 }{ 8381 {name: "MemoryStore", 8382 mconfig: &StreamConfig{ 8383 Name: "foo", 8384 Retention: LimitsPolicy, 8385 MaxMsgs: 10, 8386 Storage: MemoryStorage, 8387 Replicas: 1, 8388 }}, 8389 {name: "FileStore", 8390 mconfig: &StreamConfig{ 8391 Name: "foo", 8392 Retention: LimitsPolicy, 8393 MaxMsgs: 10, 8394 Storage: FileStorage, 8395 Replicas: 1, 8396 }}, 8397 } 8398 8399 for _, c := range cases { 8400 t.Run(c.name, func(t *testing.T) { 8401 8402 s := RunBasicJetStreamServer(t) 8403 defer s.Shutdown() 8404 8405 mset, err := s.GlobalAccount().addStream(c.mconfig) 8406 if err != nil { 8407 t.Fatalf("Unexpected error adding stream: %v", err) 8408 } 8409 8410 nc := clientConnectToServer(t, s) 8411 defer nc.Close() 8412 8413 for i := 0; i < 100; i++ { 8414 sendStreamMsg(t, nc, "foo", "ok") 8415 } 8416 8417 state := mset.state() 8418 if state.Msgs != 10 { 8419 t.Fatalf("Expected 10 messages, got %d", state.Msgs) 8420 } 8421 }) 8422 } 8423 } 8424 8425 func TestJetStreamNextMsgNoInterest(t *testing.T) { 8426 cases := []struct { 8427 name string 8428 mconfig *StreamConfig 8429 }{ 8430 {name: "MemoryStore", 8431 mconfig: &StreamConfig{ 8432 Name: "foo", 8433 Retention: LimitsPolicy, 8434 MaxAge: time.Hour, 8435 Storage: MemoryStorage, 8436 Replicas: 1, 8437 }}, 8438 {name: "FileStore", 8439 mconfig: &StreamConfig{ 8440 Name: "foo", 8441 Retention: LimitsPolicy, 8442 MaxAge: time.Hour, 8443 Storage: FileStorage, 8444 Replicas: 1, 8445 }}, 8446 } 8447 8448 for _, c := range cases { 8449 t.Run(c.name, func(t *testing.T) { 8450 s := RunBasicJetStreamServer(t) 8451 defer s.Shutdown() 8452 8453 cfg := &StreamConfig{Name: "foo", Storage: FileStorage} 8454 mset, err := s.GlobalAccount().addStream(cfg) 8455 if err != nil { 8456 t.Fatalf("Unexpected error adding stream: %v", err) 8457 } 8458 8459 nc := clientConnectWithOldRequest(t, s) 8460 defer nc.Close() 8461 8462 // Now create an consumer and make sure it functions properly. 8463 o, err := mset.addConsumer(workerModeConfig("WQ")) 8464 if err != nil { 8465 t.Fatalf("Expected no error with registered interest, got %v", err) 8466 } 8467 defer o.delete() 8468 8469 nextSubj := o.requestNextMsgSubject() 8470 8471 // Queue up a worker but use a short time out. 8472 if _, err := nc.Request(nextSubj, nil, time.Millisecond); err != nats.ErrTimeout { 8473 t.Fatalf("Expected a timeout error and no response with acks suppressed") 8474 } 8475 // Now send a message, the worker from above will still be known but we want to make 8476 // sure the system detects that so we will do a request for next msg right behind it. 8477 nc.Publish("foo", []byte("OK")) 8478 if msg, err := nc.Request(nextSubj, nil, 5*time.Millisecond); err != nil { 8479 t.Fatalf("Unexpected error: %v", err) 8480 } else { 8481 msg.Respond(nil) // Ack 8482 } 8483 // Now queue up 10 workers. 8484 for i := 0; i < 10; i++ { 8485 if _, err := nc.Request(nextSubj, nil, time.Microsecond); err != nats.ErrTimeout { 8486 t.Fatalf("Expected a timeout error and no response with acks suppressed") 8487 } 8488 } 8489 // Now publish ten messages. 8490 for i := 0; i < 10; i++ { 8491 nc.Publish("foo", []byte("OK")) 8492 } 8493 nc.Flush() 8494 for i := 0; i < 10; i++ { 8495 if msg, err := nc.Request(nextSubj, nil, 10*time.Millisecond); err != nil { 8496 t.Fatalf("Unexpected error for %d: %v", i, err) 8497 } else { 8498 msg.Respond(nil) // Ack 8499 } 8500 } 8501 nc.Flush() 8502 ostate := o.info() 8503 if ostate.AckFloor.Stream != 11 || ostate.NumAckPending > 0 { 8504 t.Fatalf("Inconsistent ack state: %+v", ostate) 8505 } 8506 }) 8507 } 8508 } 8509 8510 func TestJetStreamMsgHeaders(t *testing.T) { 8511 cases := []struct { 8512 name string 8513 mconfig *StreamConfig 8514 }{ 8515 {name: "MemoryStore", 8516 mconfig: &StreamConfig{ 8517 Name: "foo", 8518 Retention: LimitsPolicy, 8519 MaxAge: time.Hour, 8520 Storage: MemoryStorage, 8521 Replicas: 1, 8522 }}, 8523 {name: "FileStore", 8524 mconfig: &StreamConfig{ 8525 Name: "foo", 8526 Retention: LimitsPolicy, 8527 MaxAge: time.Hour, 8528 Storage: FileStorage, 8529 Replicas: 1, 8530 }}, 8531 } 8532 for _, c := range cases { 8533 t.Run(c.name, func(t *testing.T) { 8534 s := RunBasicJetStreamServer(t) 8535 defer s.Shutdown() 8536 8537 mset, err := s.GlobalAccount().addStream(c.mconfig) 8538 if err != nil { 8539 t.Fatalf("Unexpected error adding stream: %v", err) 8540 } 8541 defer mset.delete() 8542 8543 nc := clientConnectToServer(t, s) 8544 defer nc.Close() 8545 8546 m := nats.NewMsg("foo") 8547 m.Header.Add("Accept-Encoding", "json") 8548 m.Header.Add("Authorization", "s3cr3t") 8549 m.Data = []byte("Hello JetStream Headers - #1!") 8550 8551 nc.PublishMsg(m) 8552 nc.Flush() 8553 8554 checkFor(t, time.Second*2, time.Millisecond*250, func() error { 8555 state := mset.state() 8556 if state.Msgs != 1 { 8557 return fmt.Errorf("Expected 1 message, got %d", state.Msgs) 8558 } 8559 if state.Bytes == 0 { 8560 return fmt.Errorf("Expected non-zero bytes") 8561 } 8562 return nil 8563 }) 8564 8565 // Now access raw from stream. 8566 sm, err := mset.getMsg(1) 8567 if err != nil { 8568 t.Fatalf("Unexpected error getting stored message: %v", err) 8569 } 8570 // Calculate the []byte version of the headers. 8571 var b bytes.Buffer 8572 b.WriteString("NATS/1.0\r\n") 8573 http.Header(m.Header).Write(&b) 8574 b.WriteString("\r\n") 8575 hdr := b.Bytes() 8576 8577 if !bytes.Equal(sm.Header, hdr) { 8578 t.Fatalf("Message headers do not match, %q vs %q", hdr, sm.Header) 8579 } 8580 if !bytes.Equal(sm.Data, m.Data) { 8581 t.Fatalf("Message data do not match, %q vs %q", m.Data, sm.Data) 8582 } 8583 8584 // Now do consumer based. 8585 sub, _ := nc.SubscribeSync(nats.NewInbox()) 8586 defer sub.Unsubscribe() 8587 nc.Flush() 8588 8589 o, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: sub.Subject}) 8590 if err != nil { 8591 t.Fatalf("Expected no error with registered interest, got %v", err) 8592 } 8593 defer o.delete() 8594 8595 cm, err := sub.NextMsg(time.Second) 8596 if err != nil { 8597 t.Fatalf("Error getting message: %v", err) 8598 } 8599 // Check the message. 8600 // Check out original headers. 8601 if cm.Header.Get("Accept-Encoding") != "json" || 8602 cm.Header.Get("Authorization") != "s3cr3t" { 8603 t.Fatalf("Original headers not present") 8604 } 8605 if !bytes.Equal(m.Data, cm.Data) { 8606 t.Fatalf("Message payloads are not the same: %q vs %q", cm.Data, m.Data) 8607 } 8608 }) 8609 } 8610 } 8611 8612 func TestJetStreamTemplateBasics(t *testing.T) { 8613 s := RunBasicJetStreamServer(t) 8614 defer s.Shutdown() 8615 8616 acc := s.GlobalAccount() 8617 8618 mcfg := &StreamConfig{ 8619 Subjects: []string{"kv.*"}, 8620 Retention: LimitsPolicy, 8621 MaxAge: time.Hour, 8622 MaxMsgs: 4, 8623 Storage: MemoryStorage, 8624 Replicas: 1, 8625 } 8626 template := &StreamTemplateConfig{ 8627 Name: "kv", 8628 Config: mcfg, 8629 MaxStreams: 4, 8630 } 8631 8632 if _, err := acc.addStreamTemplate(template); err != nil { 8633 t.Fatalf("Unexpected error: %v", err) 8634 } 8635 if templates := acc.templates(); len(templates) != 1 { 8636 t.Fatalf("Expected to get array of 1 template, got %d", len(templates)) 8637 } 8638 if err := acc.deleteStreamTemplate("foo"); err == nil { 8639 t.Fatalf("Expected an error for non-existent template") 8640 } 8641 if err := acc.deleteStreamTemplate(template.Name); err != nil { 8642 t.Fatalf("Unexpected error: %v", err) 8643 } 8644 if templates := acc.templates(); len(templates) != 0 { 8645 t.Fatalf("Expected to get array of no templates, got %d", len(templates)) 8646 } 8647 // Add it back in and test basics 8648 if _, err := acc.addStreamTemplate(template); err != nil { 8649 t.Fatalf("Unexpected error: %v", err) 8650 } 8651 8652 // Connect a client and send a message which should trigger the stream creation. 8653 nc := clientConnectToServer(t, s) 8654 defer nc.Close() 8655 8656 sendStreamMsg(t, nc, "kv.22", "derek") 8657 sendStreamMsg(t, nc, "kv.33", "cat") 8658 sendStreamMsg(t, nc, "kv.44", "sam") 8659 sendStreamMsg(t, nc, "kv.55", "meg") 8660 8661 if nms := acc.numStreams(); nms != 4 { 8662 t.Fatalf("Expected 4 auto-created streams, got %d", nms) 8663 } 8664 8665 // This one should fail due to max. 8666 if resp, err := nc.Request("kv.99", nil, 100*time.Millisecond); err == nil { 8667 t.Fatalf("Expected this to fail, but got %q", resp.Data) 8668 } 8669 8670 // Now delete template and make sure the underlying streams go away too. 8671 if err := acc.deleteStreamTemplate(template.Name); err != nil { 8672 t.Fatalf("Unexpected error: %v", err) 8673 } 8674 8675 if nms := acc.numStreams(); nms != 0 { 8676 t.Fatalf("Expected no auto-created streams to remain, got %d", nms) 8677 } 8678 } 8679 8680 func TestJetStreamTemplateFileStoreRecovery(t *testing.T) { 8681 s := RunBasicJetStreamServer(t) 8682 defer s.Shutdown() 8683 8684 acc := s.GlobalAccount() 8685 8686 mcfg := &StreamConfig{ 8687 Subjects: []string{"kv.*"}, 8688 Retention: LimitsPolicy, 8689 MaxAge: time.Hour, 8690 MaxMsgs: 50, 8691 Storage: FileStorage, 8692 Replicas: 1, 8693 } 8694 template := &StreamTemplateConfig{ 8695 Name: "kv", 8696 Config: mcfg, 8697 MaxStreams: 100, 8698 } 8699 8700 if _, err := acc.addStreamTemplate(template); err != nil { 8701 t.Fatalf("Unexpected error: %v", err) 8702 } 8703 8704 // Make sure we can not add in a stream on our own with a template owner. 8705 badCfg := *mcfg 8706 badCfg.Name = "bad" 8707 badCfg.Template = "kv" 8708 if _, err := acc.addStream(&badCfg); err == nil { 8709 t.Fatalf("Expected error adding stream with direct template owner") 8710 } 8711 8712 // Connect a client and send a message which should trigger the stream creation. 8713 nc := clientConnectToServer(t, s) 8714 defer nc.Close() 8715 8716 for i := 1; i <= 100; i++ { 8717 subj := fmt.Sprintf("kv.%d", i) 8718 for x := 0; x < 50; x++ { 8719 sendStreamMsg(t, nc, subj, "Hello") 8720 } 8721 } 8722 nc.Flush() 8723 8724 if nms := acc.numStreams(); nms != 100 { 8725 t.Fatalf("Expected 100 auto-created streams, got %d", nms) 8726 } 8727 8728 // Capture port since it was dynamic. 8729 u, _ := url.Parse(s.ClientURL()) 8730 port, _ := strconv.Atoi(u.Port()) 8731 8732 restartServer := func() { 8733 t.Helper() 8734 sd := s.JetStreamConfig().StoreDir 8735 // Stop current 8736 s.Shutdown() 8737 // Restart. 8738 s = RunJetStreamServerOnPort(port, sd) 8739 } 8740 8741 // Restart. 8742 restartServer() 8743 defer s.Shutdown() 8744 8745 acc = s.GlobalAccount() 8746 if nms := acc.numStreams(); nms != 100 { 8747 t.Fatalf("Expected 100 auto-created streams, got %d", nms) 8748 } 8749 tmpl, err := acc.lookupStreamTemplate(template.Name) 8750 require_NoError(t, err) 8751 // Make sure t.delete() survives restart. 8752 tmpl.delete() 8753 8754 // Restart. 8755 restartServer() 8756 defer s.Shutdown() 8757 8758 acc = s.GlobalAccount() 8759 if nms := acc.numStreams(); nms != 0 { 8760 t.Fatalf("Expected no auto-created streams, got %d", nms) 8761 } 8762 if _, err := acc.lookupStreamTemplate(template.Name); err == nil { 8763 t.Fatalf("Expected to not find the template after restart") 8764 } 8765 } 8766 8767 // This will be testing our ability to conditionally rewrite subjects for last mile 8768 // when working with JetStream. Consumers receive messages that have their subjects 8769 // rewritten to match the original subject. NATS routing is all subject based except 8770 // for the last mile to the client. 8771 func TestJetStreamSingleInstanceRemoteAccess(t *testing.T) { 8772 ca := createClusterWithName(t, "A", 1) 8773 defer shutdownCluster(ca) 8774 cb := createClusterWithName(t, "B", 1, ca) 8775 defer shutdownCluster(cb) 8776 8777 // Connect our leafnode server to cluster B. 8778 opts := cb.opts[rand.Intn(len(cb.opts))] 8779 s, _ := runSolicitLeafServer(opts) 8780 defer s.Shutdown() 8781 8782 checkLeafNodeConnected(t, s) 8783 8784 if err := s.EnableJetStream(&JetStreamConfig{StoreDir: t.TempDir()}); err != nil { 8785 t.Fatalf("Expected no error, got %v", err) 8786 } 8787 8788 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: "foo", Storage: MemoryStorage}) 8789 if err != nil { 8790 t.Fatalf("Unexpected error adding stream: %v", err) 8791 } 8792 defer mset.delete() 8793 8794 nc := clientConnectToServer(t, s) 8795 defer nc.Close() 8796 8797 toSend := 10 8798 for i := 0; i < toSend; i++ { 8799 sendStreamMsg(t, nc, "foo", "Hello World!") 8800 } 8801 8802 // Now create a push based consumer. Connected to the non-jetstream server via a random server on cluster A. 8803 sl := ca.servers[rand.Intn(len(ca.servers))] 8804 nc2 := clientConnectToServer(t, sl) 8805 defer nc2.Close() 8806 8807 sub, _ := nc2.SubscribeSync(nats.NewInbox()) 8808 defer sub.Unsubscribe() 8809 8810 // Need to wait for interest to propagate across GW. 8811 nc2.Flush() 8812 time.Sleep(25 * time.Millisecond) 8813 8814 o, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: sub.Subject}) 8815 if err != nil { 8816 t.Fatalf("Expected no error with registered interest, got %v", err) 8817 } 8818 defer o.delete() 8819 8820 checkSubPending := func(numExpected int) { 8821 t.Helper() 8822 checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error { 8823 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != numExpected { 8824 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected) 8825 } 8826 return nil 8827 }) 8828 } 8829 checkSubPending(toSend) 8830 8831 checkMsg := func(m *nats.Msg, err error, i int) { 8832 t.Helper() 8833 if err != nil { 8834 t.Fatalf("Got an error checking message: %v", err) 8835 } 8836 if m.Subject != "foo" { 8837 t.Fatalf("Expected original subject of %q, but got %q", "foo", m.Subject) 8838 } 8839 // Now check that reply subject exists and has a sequence as the last token. 8840 if seq := o.seqFromReply(m.Reply); seq != uint64(i) { 8841 t.Fatalf("Expected sequence of %d , got %d", i, seq) 8842 } 8843 } 8844 8845 // Now check the subject to make sure its the original one. 8846 for i := 1; i <= toSend; i++ { 8847 m, err := sub.NextMsg(time.Second) 8848 checkMsg(m, err, i) 8849 } 8850 8851 // Now do a pull based consumer. 8852 o, err = mset.addConsumer(workerModeConfig("p")) 8853 if err != nil { 8854 t.Fatalf("Expected no error with registered interest, got %v", err) 8855 } 8856 defer o.delete() 8857 8858 nextMsg := o.requestNextMsgSubject() 8859 for i := 1; i <= toSend; i++ { 8860 m, err := nc.Request(nextMsg, nil, time.Second) 8861 checkMsg(m, err, i) 8862 } 8863 } 8864 8865 func clientConnectToServerWithUP(t *testing.T, opts *Options, user, pass string) *nats.Conn { 8866 curl := fmt.Sprintf("nats://%s:%s@%s:%d", user, pass, opts.Host, opts.Port) 8867 nc, err := nats.Connect(curl, nats.Name("JS-UP-TEST"), nats.ReconnectWait(5*time.Millisecond), nats.MaxReconnects(-1)) 8868 if err != nil { 8869 t.Fatalf("Failed to create client: %v", err) 8870 } 8871 return nc 8872 } 8873 8874 func TestJetStreamCanNotEnableOnSystemAccount(t *testing.T) { 8875 s := RunBasicJetStreamServer(t) 8876 defer s.Shutdown() 8877 8878 sa := s.SystemAccount() 8879 if err := sa.EnableJetStream(nil); err == nil { 8880 t.Fatalf("Expected an error trying to enable on the system account") 8881 } 8882 } 8883 8884 func TestJetStreamMultipleAccountsBasics(t *testing.T) { 8885 tdir := t.TempDir() 8886 conf := createConfFile(t, []byte(fmt.Sprintf(` 8887 listen: 127.0.0.1:-1 8888 jetstream: {max_mem_store: 64GB, max_file_store: 10TB, store_dir: %q} 8889 accounts: { 8890 A: { 8891 jetstream: enabled 8892 users: [ {user: ua, password: pwd} ] 8893 }, 8894 B: { 8895 jetstream: {max_mem: 1GB, max_store: 1TB, max_streams: 10, max_consumers: 1k} 8896 users: [ {user: ub, password: pwd} ] 8897 }, 8898 C: { 8899 users: [ {user: uc, password: pwd} ] 8900 }, 8901 } 8902 `, tdir))) 8903 8904 s, opts := RunServerWithConfig(conf) 8905 defer s.Shutdown() 8906 8907 if !s.JetStreamEnabled() { 8908 t.Fatalf("Expected JetStream to be enabled") 8909 } 8910 8911 nca := clientConnectToServerWithUP(t, opts, "ua", "pwd") 8912 defer nca.Close() 8913 8914 ncb := clientConnectToServerWithUP(t, opts, "ub", "pwd") 8915 defer ncb.Close() 8916 8917 resp, err := ncb.Request(JSApiAccountInfo, nil, time.Second) 8918 require_NoError(t, err) 8919 var info JSApiAccountInfoResponse 8920 if err := json.Unmarshal(resp.Data, &info); err != nil { 8921 t.Fatalf("Unexpected error: %v", err) 8922 } 8923 limits := info.Limits 8924 if limits.MaxStreams != 10 { 8925 t.Fatalf("Expected 10 for MaxStreams, got %d", limits.MaxStreams) 8926 } 8927 if limits.MaxConsumers != 1000 { 8928 t.Fatalf("Expected MaxConsumers of %d, got %d", 1000, limits.MaxConsumers) 8929 } 8930 gb := int64(1024 * 1024 * 1024) 8931 if limits.MaxMemory != gb { 8932 t.Fatalf("Expected MaxMemory to be 1GB, got %d", limits.MaxMemory) 8933 } 8934 if limits.MaxStore != 1024*gb { 8935 t.Fatalf("Expected MaxStore to be 1TB, got %d", limits.MaxStore) 8936 } 8937 8938 ncc := clientConnectToServerWithUP(t, opts, "uc", "pwd") 8939 defer ncc.Close() 8940 8941 expectNotEnabled := func(resp *nats.Msg, err error) { 8942 t.Helper() 8943 if err != nil { 8944 t.Fatalf("Unexpected error requesting enabled status: %v", err) 8945 } 8946 if resp == nil { 8947 t.Fatalf("No response, possible timeout?") 8948 } 8949 var iResp JSApiAccountInfoResponse 8950 if err := json.Unmarshal(resp.Data, &iResp); err != nil { 8951 t.Fatalf("Unexpected error: %v", err) 8952 } 8953 if iResp.Error == nil { 8954 t.Fatalf("Expected an error on not enabled account") 8955 } 8956 } 8957 8958 // Check C is not enabled. We expect a negative response, not a timeout. 8959 expectNotEnabled(ncc.Request(JSApiAccountInfo, nil, 250*time.Millisecond)) 8960 8961 // Now do simple reload and check that we do the right thing. Testing enable and disable and also change in limits 8962 newConf := []byte(fmt.Sprintf(` 8963 listen: 127.0.0.1:-1 8964 jetstream: {max_mem_store: 64GB, max_file_store: 10TB, store_dir: %q} 8965 accounts: { 8966 A: { 8967 jetstream: disabled 8968 users: [ {user: ua, password: pwd} ] 8969 }, 8970 B: { 8971 jetstream: {max_mem: 32GB, max_store: 512GB, max_streams: 100, max_consumers: 4k} 8972 users: [ {user: ub, password: pwd} ] 8973 }, 8974 C: { 8975 jetstream: {max_mem: 1GB, max_store: 1TB, max_streams: 10, max_consumers: 1k} 8976 users: [ {user: uc, password: pwd} ] 8977 }, 8978 } 8979 `, tdir)) 8980 if err := os.WriteFile(conf, newConf, 0600); err != nil { 8981 t.Fatalf("Error rewriting server's config file: %v", err) 8982 } 8983 if err := s.Reload(); err != nil { 8984 t.Fatalf("Error on server reload: %v", err) 8985 } 8986 expectNotEnabled(nca.Request(JSApiAccountInfo, nil, 250*time.Millisecond)) 8987 8988 resp, _ = ncb.Request(JSApiAccountInfo, nil, 250*time.Millisecond) 8989 if err := json.Unmarshal(resp.Data, &info); err != nil { 8990 t.Fatalf("Unexpected error: %v", err) 8991 } 8992 if info.Error != nil { 8993 t.Fatalf("Expected JetStream to be enabled, got %+v", info.Error) 8994 } 8995 8996 resp, _ = ncc.Request(JSApiAccountInfo, nil, 250*time.Millisecond) 8997 if err := json.Unmarshal(resp.Data, &info); err != nil { 8998 t.Fatalf("Unexpected error: %v", err) 8999 } 9000 if info.Error != nil { 9001 t.Fatalf("Expected JetStream to be enabled, got %+v", info.Error) 9002 } 9003 9004 // Now check that limits have been updated. 9005 // Account B 9006 resp, err = ncb.Request(JSApiAccountInfo, nil, time.Second) 9007 require_NoError(t, err) 9008 if err := json.Unmarshal(resp.Data, &info); err != nil { 9009 t.Fatalf("Unexpected error: %v", err) 9010 } 9011 limits = info.Limits 9012 if limits.MaxStreams != 100 { 9013 t.Fatalf("Expected 100 for MaxStreams, got %d", limits.MaxStreams) 9014 } 9015 if limits.MaxConsumers != 4000 { 9016 t.Fatalf("Expected MaxConsumers of %d, got %d", 4000, limits.MaxConsumers) 9017 } 9018 if limits.MaxMemory != 32*gb { 9019 t.Fatalf("Expected MaxMemory to be 32GB, got %d", limits.MaxMemory) 9020 } 9021 if limits.MaxStore != 512*gb { 9022 t.Fatalf("Expected MaxStore to be 512GB, got %d", limits.MaxStore) 9023 } 9024 9025 // Account C 9026 resp, err = ncc.Request(JSApiAccountInfo, nil, time.Second) 9027 require_NoError(t, err) 9028 if err := json.Unmarshal(resp.Data, &info); err != nil { 9029 t.Fatalf("Unexpected error: %v", err) 9030 } 9031 limits = info.Limits 9032 if limits.MaxStreams != 10 { 9033 t.Fatalf("Expected 10 for MaxStreams, got %d", limits.MaxStreams) 9034 } 9035 if limits.MaxConsumers != 1000 { 9036 t.Fatalf("Expected MaxConsumers of %d, got %d", 1000, limits.MaxConsumers) 9037 } 9038 if limits.MaxMemory != gb { 9039 t.Fatalf("Expected MaxMemory to be 1GB, got %d", limits.MaxMemory) 9040 } 9041 if limits.MaxStore != 1024*gb { 9042 t.Fatalf("Expected MaxStore to be 1TB, got %d", limits.MaxStore) 9043 } 9044 } 9045 9046 func TestJetStreamServerResourcesConfig(t *testing.T) { 9047 conf := createConfFile(t, []byte(fmt.Sprintf(` 9048 listen: 127.0.0.1:-1 9049 jetstream: {max_mem_store: 2GB, max_file_store: 1TB, store_dir: %q} 9050 `, t.TempDir()))) 9051 9052 s, _ := RunServerWithConfig(conf) 9053 defer s.Shutdown() 9054 9055 if !s.JetStreamEnabled() { 9056 t.Fatalf("Expected JetStream to be enabled") 9057 } 9058 9059 gb := int64(1024 * 1024 * 1024) 9060 jsc := s.JetStreamConfig() 9061 if jsc.MaxMemory != 2*gb { 9062 t.Fatalf("Expected MaxMemory to be %d, got %d", 2*gb, jsc.MaxMemory) 9063 } 9064 if jsc.MaxStore != 1024*gb { 9065 t.Fatalf("Expected MaxStore to be %d, got %d", 1024*gb, jsc.MaxStore) 9066 } 9067 } 9068 9069 // From 2.2.2 to 2.2.3 we fixed a bug that would not consistently place a jetstream directory 9070 // under the store directory configured. However there were some cases where the directory was 9071 // created that way and therefore 2.2.3 would start and not recognize the existing accounts, 9072 // streams and consumers. 9073 func TestJetStreamStoreDirectoryFix(t *testing.T) { 9074 sd := filepath.Join(os.TempDir(), "sd_test") 9075 defer removeDir(t, sd) 9076 9077 conf := createConfFile(t, []byte(fmt.Sprintf("listen: 127.0.0.1:-1\njetstream: {store_dir: %q}\n", sd))) 9078 9079 s, _ := RunServerWithConfig(conf) 9080 defer s.Shutdown() 9081 9082 nc, js := jsClientConnect(t, s) 9083 defer nc.Close() 9084 9085 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST"}); err != nil { 9086 t.Fatalf("Unexpected error: %v", err) 9087 } 9088 if _, err := js.Publish("TEST", []byte("TSS")); err != nil { 9089 t.Fatalf("Unexpected publish error: %v", err) 9090 } 9091 // Push based. 9092 sub, err := js.SubscribeSync("TEST", nats.Durable("dlc")) 9093 require_NoError(t, err) 9094 defer sub.Unsubscribe() 9095 9096 // Now shutdown the server. 9097 nc.Close() 9098 s.Shutdown() 9099 9100 // Now move stuff up from the jetstream directory etc. 9101 jssd := filepath.Join(sd, JetStreamStoreDir) 9102 fis, _ := os.ReadDir(jssd) 9103 // This will be accounts, move them up one directory. 9104 for _, fi := range fis { 9105 os.Rename(filepath.Join(jssd, fi.Name()), filepath.Join(sd, fi.Name())) 9106 } 9107 removeDir(t, jssd) 9108 9109 // Restart our server. Make sure our assets got moved. 9110 s, _ = RunServerWithConfig(conf) 9111 defer s.Shutdown() 9112 9113 nc, js = jsClientConnect(t, s) 9114 defer nc.Close() 9115 9116 var names []string 9117 for name := range js.StreamNames() { 9118 names = append(names, name) 9119 } 9120 if len(names) != 1 { 9121 t.Fatalf("Expected only 1 stream but got %d", len(names)) 9122 } 9123 names = names[:0] 9124 for name := range js.ConsumerNames("TEST") { 9125 names = append(names, name) 9126 } 9127 if len(names) != 1 { 9128 t.Fatalf("Expected only 1 consumer but got %d", len(names)) 9129 } 9130 } 9131 9132 func TestJetStreamPushConsumersPullError(t *testing.T) { 9133 s := RunBasicJetStreamServer(t) 9134 defer s.Shutdown() 9135 9136 nc, js := jsClientConnect(t, s) 9137 defer nc.Close() 9138 9139 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST"}); err != nil { 9140 t.Fatalf("Unexpected error: %v", err) 9141 } 9142 if _, err := js.Publish("TEST", []byte("TSS")); err != nil { 9143 t.Fatalf("Unexpected publish error: %v", err) 9144 } 9145 // Push based. 9146 sub, err := js.SubscribeSync("TEST") 9147 require_NoError(t, err) 9148 defer sub.Unsubscribe() 9149 ci, err := sub.ConsumerInfo() 9150 require_NoError(t, err) 9151 9152 // Now do a pull. Make sure we get an error. 9153 m, err := nc.Request(fmt.Sprintf(JSApiRequestNextT, "TEST", ci.Name), nil, time.Second) 9154 require_NoError(t, err) 9155 if m.Header.Get("Status") != "409" { 9156 t.Fatalf("Expected a 409 status code, got %q", m.Header.Get("Status")) 9157 } 9158 } 9159 9160 func TestJetStreamPullConsumerMaxWaitingOfOne(t *testing.T) { 9161 s := RunBasicJetStreamServer(t) 9162 defer s.Shutdown() 9163 9164 nc, js := jsClientConnect(t, s) 9165 defer nc.Close() 9166 9167 _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"TEST.A"}}) 9168 require_NoError(t, err) 9169 9170 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 9171 Durable: "dur", 9172 MaxWaiting: 1, 9173 AckPolicy: nats.AckExplicitPolicy, 9174 }) 9175 require_NoError(t, err) 9176 9177 // First check that a request can timeout (we had an issue where this was 9178 // not the case for MaxWaiting of 1). 9179 req := JSApiConsumerGetNextRequest{Batch: 1, Expires: 250 * time.Millisecond} 9180 reqb, _ := json.Marshal(req) 9181 msg, err := nc.Request("$JS.API.CONSUMER.MSG.NEXT.TEST.dur", reqb, 13000*time.Millisecond) 9182 require_NoError(t, err) 9183 if v := msg.Header.Get("Status"); v != "408" { 9184 t.Fatalf("Expected 408, got: %s", v) 9185 } 9186 9187 // Now have a request waiting... 9188 req = JSApiConsumerGetNextRequest{Batch: 1} 9189 reqb, _ = json.Marshal(req) 9190 // Send the request, but do not block since we want then to send an extra 9191 // request that should be rejected. 9192 sub := natsSubSync(t, nc, nats.NewInbox()) 9193 err = nc.PublishRequest("$JS.API.CONSUMER.MSG.NEXT.TEST.dur", sub.Subject, reqb) 9194 require_NoError(t, err) 9195 9196 // Send a new request, this should be rejected as a 409. 9197 req = JSApiConsumerGetNextRequest{Batch: 1, Expires: 250 * time.Millisecond} 9198 reqb, _ = json.Marshal(req) 9199 msg, err = nc.Request("$JS.API.CONSUMER.MSG.NEXT.TEST.dur", reqb, 300*time.Millisecond) 9200 require_NoError(t, err) 9201 if v := msg.Header.Get("Status"); v != "409" { 9202 t.Fatalf("Expected 409, got: %s", v) 9203 } 9204 if v := msg.Header.Get("Description"); v != "Exceeded MaxWaiting" { 9205 t.Fatalf("Expected error about exceeded max waiting, got: %s", v) 9206 } 9207 } 9208 9209 func TestJetStreamPullConsumerMaxWaiting(t *testing.T) { 9210 s := RunBasicJetStreamServer(t) 9211 defer s.Shutdown() 9212 9213 nc, js := jsClientConnect(t, s) 9214 defer nc.Close() 9215 9216 _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"test.*"}}) 9217 require_NoError(t, err) 9218 9219 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 9220 Durable: "dur", 9221 AckPolicy: nats.AckExplicitPolicy, 9222 MaxWaiting: 10, 9223 }) 9224 require_NoError(t, err) 9225 9226 // Cannot be updated. 9227 _, err = js.UpdateConsumer("TEST", &nats.ConsumerConfig{ 9228 Durable: "dur", 9229 AckPolicy: nats.AckExplicitPolicy, 9230 MaxWaiting: 1, 9231 }) 9232 if !strings.Contains(err.Error(), "can not be updated") { 9233 t.Fatalf(`expected "cannot be updated" error, got %s`, err) 9234 } 9235 } 9236 9237 //////////////////////////////////////// 9238 // Benchmark placeholders 9239 // TODO(dlc) - move 9240 //////////////////////////////////////// 9241 9242 func TestJetStreamPubPerf(t *testing.T) { 9243 // Comment out to run, holding place for now. 9244 t.SkipNow() 9245 9246 s := RunBasicJetStreamServer(t) 9247 defer s.Shutdown() 9248 9249 acc := s.GlobalAccount() 9250 9251 msetConfig := StreamConfig{ 9252 Name: "sr22", 9253 Storage: FileStorage, 9254 Subjects: []string{"foo"}, 9255 } 9256 9257 if _, err := acc.addStream(&msetConfig); err != nil { 9258 t.Fatalf("Unexpected error adding stream: %v", err) 9259 } 9260 9261 nc := clientConnectToServer(t, s) 9262 defer nc.Close() 9263 9264 toSend := 5_000_000 9265 numProducers := 5 9266 9267 payload := []byte("Hello World") 9268 9269 startCh := make(chan bool) 9270 var wg sync.WaitGroup 9271 9272 for n := 0; n < numProducers; n++ { 9273 wg.Add(1) 9274 go func() { 9275 defer wg.Done() 9276 <-startCh 9277 for i := 0; i < int(toSend)/numProducers; i++ { 9278 nc.Publish("foo", payload) 9279 } 9280 nc.Flush() 9281 }() 9282 } 9283 9284 // Wait for Go routines. 9285 time.Sleep(20 * time.Millisecond) 9286 start := time.Now() 9287 close(startCh) 9288 wg.Wait() 9289 9290 tt := time.Since(start) 9291 fmt.Printf("time is %v\n", tt) 9292 fmt.Printf("%.0f msgs/sec\n", float64(toSend)/tt.Seconds()) 9293 9294 // Stop current 9295 sd := s.JetStreamConfig().StoreDir 9296 s.Shutdown() 9297 // Restart. 9298 start = time.Now() 9299 s = RunJetStreamServerOnPort(-1, sd) 9300 defer s.Shutdown() 9301 fmt.Printf("Took %v to restart!\n", time.Since(start)) 9302 } 9303 9304 func TestJetStreamPubWithAsyncResponsePerf(t *testing.T) { 9305 // Comment out to run, holding place for now. 9306 t.SkipNow() 9307 9308 s := RunBasicJetStreamServer(t) 9309 defer s.Shutdown() 9310 9311 acc := s.GlobalAccount() 9312 9313 msetConfig := StreamConfig{ 9314 Name: "sr33", 9315 Storage: FileStorage, 9316 Subjects: []string{"foo"}, 9317 } 9318 9319 if _, err := acc.addStream(&msetConfig); err != nil { 9320 t.Fatalf("Unexpected error adding stream: %v", err) 9321 } 9322 9323 nc := clientConnectToServer(t, s) 9324 defer nc.Close() 9325 9326 toSend := 1_000_000 9327 payload := []byte("Hello World") 9328 9329 start := time.Now() 9330 for i := 0; i < toSend; i++ { 9331 nc.PublishRequest("foo", "bar", payload) 9332 } 9333 nc.Flush() 9334 9335 tt := time.Since(start) 9336 fmt.Printf("time is %v\n", tt) 9337 fmt.Printf("%.0f msgs/sec\n", float64(toSend)/tt.Seconds()) 9338 } 9339 9340 func TestJetStreamPubWithSyncPerf(t *testing.T) { 9341 // Comment out to run, holding place for now. 9342 t.SkipNow() 9343 9344 s := RunBasicJetStreamServer(t) 9345 defer s.Shutdown() 9346 9347 nc, js := jsClientConnect(t, s) 9348 defer nc.Close() 9349 9350 _, err := js.AddStream(&nats.StreamConfig{Name: "foo"}) 9351 require_NoError(t, err) 9352 9353 toSend := 1_000_000 9354 payload := []byte("Hello World") 9355 9356 start := time.Now() 9357 for i := 0; i < toSend; i++ { 9358 js.Publish("foo", payload) 9359 } 9360 9361 tt := time.Since(start) 9362 fmt.Printf("time is %v\n", tt) 9363 fmt.Printf("%.0f msgs/sec\n", float64(toSend)/tt.Seconds()) 9364 } 9365 9366 func TestJetStreamConsumerPerf(t *testing.T) { 9367 // Comment out to run, holding place for now. 9368 t.SkipNow() 9369 9370 s := RunBasicJetStreamServer(t) 9371 defer s.Shutdown() 9372 9373 acc := s.GlobalAccount() 9374 9375 msetConfig := StreamConfig{ 9376 Name: "sr22", 9377 Storage: MemoryStorage, 9378 Subjects: []string{"foo"}, 9379 } 9380 9381 mset, err := acc.addStream(&msetConfig) 9382 if err != nil { 9383 t.Fatalf("Unexpected error adding stream: %v", err) 9384 } 9385 9386 nc := clientConnectToServer(t, s) 9387 defer nc.Close() 9388 9389 payload := []byte("Hello World") 9390 9391 toStore := 2000000 9392 for i := 0; i < toStore; i++ { 9393 nc.Publish("foo", payload) 9394 } 9395 nc.Flush() 9396 9397 _, err = mset.addConsumer(&ConsumerConfig{ 9398 Durable: "d", 9399 DeliverSubject: "d", 9400 AckPolicy: AckNone, 9401 }) 9402 if err != nil { 9403 t.Fatalf("Error creating consumer: %v", err) 9404 } 9405 9406 var received int 9407 done := make(chan bool) 9408 9409 nc.Subscribe("d", func(m *nats.Msg) { 9410 received++ 9411 if received >= toStore { 9412 done <- true 9413 } 9414 }) 9415 start := time.Now() 9416 nc.Flush() 9417 9418 <-done 9419 tt := time.Since(start) 9420 fmt.Printf("time is %v\n", tt) 9421 fmt.Printf("%.0f msgs/sec\n", float64(toStore)/tt.Seconds()) 9422 } 9423 9424 func TestJetStreamConsumerAckFileStorePerf(t *testing.T) { 9425 // Comment out to run, holding place for now. 9426 t.SkipNow() 9427 9428 s := RunBasicJetStreamServer(t) 9429 defer s.Shutdown() 9430 9431 acc := s.GlobalAccount() 9432 9433 msetConfig := StreamConfig{ 9434 Name: "sr22", 9435 Storage: FileStorage, 9436 Subjects: []string{"foo"}, 9437 } 9438 9439 mset, err := acc.addStream(&msetConfig) 9440 if err != nil { 9441 t.Fatalf("Unexpected error adding stream: %v", err) 9442 } 9443 9444 nc := clientConnectToServer(t, s) 9445 defer nc.Close() 9446 9447 payload := []byte("Hello World") 9448 9449 toStore := uint64(200000) 9450 for i := uint64(0); i < toStore; i++ { 9451 nc.Publish("foo", payload) 9452 } 9453 nc.Flush() 9454 9455 if msgs := mset.state().Msgs; msgs != uint64(toStore) { 9456 t.Fatalf("Expected %d messages, got %d", toStore, msgs) 9457 } 9458 9459 o, err := mset.addConsumer(&ConsumerConfig{ 9460 Durable: "d", 9461 DeliverSubject: "d", 9462 AckPolicy: AckExplicit, 9463 AckWait: 10 * time.Minute, 9464 }) 9465 if err != nil { 9466 t.Fatalf("Error creating consumer: %v", err) 9467 } 9468 defer o.stop() 9469 9470 var received uint64 9471 done := make(chan bool) 9472 9473 sub, _ := nc.Subscribe("d", func(m *nats.Msg) { 9474 m.Respond(nil) // Ack 9475 received++ 9476 if received >= toStore { 9477 done <- true 9478 } 9479 }) 9480 sub.SetPendingLimits(-1, -1) 9481 9482 start := time.Now() 9483 nc.Flush() 9484 9485 <-done 9486 tt := time.Since(start) 9487 fmt.Printf("time is %v\n", tt) 9488 fmt.Printf("%.0f msgs/sec\n", float64(toStore)/tt.Seconds()) 9489 } 9490 9491 func TestJetStreamPubSubPerf(t *testing.T) { 9492 // Comment out to run, holding place for now. 9493 t.SkipNow() 9494 9495 s := RunBasicJetStreamServer(t) 9496 defer s.Shutdown() 9497 9498 acc := s.GlobalAccount() 9499 9500 msetConfig := StreamConfig{ 9501 Name: "MSET22", 9502 Storage: FileStorage, 9503 Subjects: []string{"foo"}, 9504 } 9505 9506 mset, err := acc.addStream(&msetConfig) 9507 if err != nil { 9508 t.Fatalf("Unexpected error adding stream: %v", err) 9509 } 9510 9511 nc := clientConnectToServer(t, s) 9512 defer nc.Close() 9513 9514 var toSend = 1_000_000 9515 var received int 9516 done := make(chan bool) 9517 9518 delivery := "d" 9519 9520 nc.Subscribe(delivery, func(m *nats.Msg) { 9521 received++ 9522 if received >= toSend { 9523 done <- true 9524 } 9525 }) 9526 nc.Flush() 9527 9528 _, err = mset.addConsumer(&ConsumerConfig{ 9529 DeliverSubject: delivery, 9530 AckPolicy: AckNone, 9531 }) 9532 if err != nil { 9533 t.Fatalf("Error creating consumer: %v", err) 9534 } 9535 9536 payload := []byte("Hello World") 9537 9538 start := time.Now() 9539 9540 for i := 0; i < toSend; i++ { 9541 nc.Publish("foo", payload) 9542 } 9543 9544 <-done 9545 tt := time.Since(start) 9546 fmt.Printf("time is %v\n", tt) 9547 fmt.Printf("%.0f msgs/sec\n", float64(toSend)/tt.Seconds()) 9548 } 9549 9550 func TestJetStreamAckExplicitMsgRemoval(t *testing.T) { 9551 cases := []struct { 9552 name string 9553 mconfig *StreamConfig 9554 }{ 9555 {"MemoryStore", &StreamConfig{ 9556 Name: "MY_STREAM", 9557 Storage: MemoryStorage, 9558 Subjects: []string{"foo.*"}, 9559 Retention: InterestPolicy, 9560 }}, 9561 {"FileStore", &StreamConfig{ 9562 Name: "MY_STREAM", 9563 Storage: FileStorage, 9564 Subjects: []string{"foo.*"}, 9565 Retention: InterestPolicy, 9566 }}, 9567 } 9568 for _, c := range cases { 9569 t.Run(c.name, func(t *testing.T) { 9570 s := RunBasicJetStreamServer(t) 9571 defer s.Shutdown() 9572 9573 mset, err := s.GlobalAccount().addStream(c.mconfig) 9574 if err != nil { 9575 t.Fatalf("Unexpected error adding stream: %v", err) 9576 } 9577 defer mset.delete() 9578 9579 nc1 := clientConnectToServer(t, s) 9580 defer nc1.Close() 9581 9582 nc2 := clientConnectToServer(t, s) 9583 defer nc2.Close() 9584 9585 // Create two durable consumers on the same subject 9586 sub1, _ := nc1.SubscribeSync(nats.NewInbox()) 9587 defer sub1.Unsubscribe() 9588 nc1.Flush() 9589 9590 o1, err := mset.addConsumer(&ConsumerConfig{ 9591 Durable: "dur1", 9592 DeliverSubject: sub1.Subject, 9593 FilterSubject: "foo.bar", 9594 AckPolicy: AckExplicit, 9595 }) 9596 if err != nil { 9597 t.Fatalf("Unexpected error adding consumer: %v", err) 9598 } 9599 defer o1.delete() 9600 9601 sub2, _ := nc2.SubscribeSync(nats.NewInbox()) 9602 defer sub2.Unsubscribe() 9603 nc2.Flush() 9604 9605 o2, err := mset.addConsumer(&ConsumerConfig{ 9606 Durable: "dur2", 9607 DeliverSubject: sub2.Subject, 9608 FilterSubject: "foo.bar", 9609 AckPolicy: AckExplicit, 9610 AckWait: 100 * time.Millisecond, 9611 }) 9612 if err != nil { 9613 t.Fatalf("Unexpected error adding consumer: %v", err) 9614 } 9615 defer o2.delete() 9616 9617 // Send 2 messages 9618 toSend := 2 9619 for i := 0; i < toSend; i++ { 9620 sendStreamMsg(t, nc1, "foo.bar", fmt.Sprintf("msg%v", i+1)) 9621 } 9622 state := mset.state() 9623 if state.Msgs != uint64(toSend) { 9624 t.Fatalf("Expected %v messages, got %d", toSend, state.Msgs) 9625 } 9626 9627 // Receive the messages and ack them. 9628 subs := []*nats.Subscription{sub1, sub2} 9629 for _, sub := range subs { 9630 for i := 0; i < toSend; i++ { 9631 m, err := sub.NextMsg(time.Second) 9632 if err != nil { 9633 t.Fatalf("Error acking message: %v", err) 9634 } 9635 m.Respond(nil) 9636 } 9637 } 9638 // To make sure acks are processed for checking state after sending new ones. 9639 checkFor(t, time.Second, 25*time.Millisecond, func() error { 9640 if state = mset.state(); state.Msgs != 0 { 9641 return fmt.Errorf("Stream still has messages") 9642 } 9643 return nil 9644 }) 9645 9646 // Now close the 2nd subscription... 9647 sub2.Unsubscribe() 9648 nc2.Flush() 9649 9650 // Send 2 more new messages 9651 for i := 0; i < toSend; i++ { 9652 sendStreamMsg(t, nc1, "foo.bar", fmt.Sprintf("msg%v", 2+i+1)) 9653 } 9654 state = mset.state() 9655 if state.Msgs != uint64(toSend) { 9656 t.Fatalf("Expected %v messages, got %d", toSend, state.Msgs) 9657 } 9658 9659 // first subscription should get it and will ack it. 9660 for i := 0; i < toSend; i++ { 9661 m, err := sub1.NextMsg(time.Second) 9662 if err != nil { 9663 t.Fatalf("Error getting message to ack: %v", err) 9664 } 9665 m.Respond(nil) 9666 } 9667 // For acks from m.Respond above 9668 nc1.Flush() 9669 9670 // Now recreate the subscription for the 2nd JS consumer 9671 sub2, _ = nc2.SubscribeSync(nats.NewInbox()) 9672 defer sub2.Unsubscribe() 9673 9674 o2, err = mset.addConsumer(&ConsumerConfig{ 9675 Durable: "dur2", 9676 DeliverSubject: sub2.Subject, 9677 FilterSubject: "foo.bar", 9678 AckPolicy: AckExplicit, 9679 AckWait: 100 * time.Millisecond, 9680 }) 9681 if err != nil { 9682 t.Fatalf("Unexpected error adding consumer: %v", err) 9683 } 9684 defer o2.delete() 9685 9686 // Those messages should be redelivered to the 2nd consumer 9687 for i := 1; i <= toSend; i++ { 9688 m, err := sub2.NextMsg(time.Second) 9689 if err != nil { 9690 t.Fatalf("Error receiving message %d: %v", i, err) 9691 } 9692 m.Respond(nil) 9693 9694 sseq := o2.streamSeqFromReply(m.Reply) 9695 // Depending on timing from above we could receive stream sequences out of order but 9696 // we know we want 3 & 4. 9697 if sseq != 3 && sseq != 4 { 9698 t.Fatalf("Expected stream sequence of 3 or 4 but got %d", sseq) 9699 } 9700 } 9701 }) 9702 } 9703 } 9704 9705 // This test is in support fo clients that want to match on subject, they 9706 // can set the filter subject always. We always store the subject so that 9707 // should the stream later be edited to expand into more subjects the consumer 9708 // still gets what was actually requested 9709 func TestJetStreamConsumerFilterSubject(t *testing.T) { 9710 s := RunBasicJetStreamServer(t) 9711 defer s.Shutdown() 9712 9713 sc := &StreamConfig{Name: "MY_STREAM", Subjects: []string{"foo"}} 9714 mset, err := s.GlobalAccount().addStream(sc) 9715 if err != nil { 9716 t.Fatalf("Unexpected error adding stream: %v", err) 9717 } 9718 defer mset.delete() 9719 9720 cfg := &ConsumerConfig{ 9721 Durable: "d", 9722 DeliverSubject: "A", 9723 AckPolicy: AckExplicit, 9724 FilterSubject: "foo", 9725 } 9726 9727 o, err := mset.addConsumer(cfg) 9728 if err != nil { 9729 t.Fatalf("Unexpected error adding consumer: %v", err) 9730 } 9731 defer o.delete() 9732 9733 if o.info().Config.FilterSubject != "foo" { 9734 t.Fatalf("Expected the filter to be stored") 9735 } 9736 9737 // Now use the original cfg with updated delivery subject and make sure that works ok. 9738 cfg = &ConsumerConfig{ 9739 Durable: "d", 9740 DeliverSubject: "B", 9741 AckPolicy: AckExplicit, 9742 FilterSubject: "foo", 9743 } 9744 9745 o, err = mset.addConsumer(cfg) 9746 if err != nil { 9747 t.Fatalf("Unexpected error adding consumer: %v", err) 9748 } 9749 defer o.delete() 9750 } 9751 9752 func TestJetStreamStoredMsgsDontDisappearAfterCacheExpiration(t *testing.T) { 9753 sc := &StreamConfig{ 9754 Name: "MY_STREAM", 9755 Storage: FileStorage, 9756 Subjects: []string{"foo.>"}, 9757 Retention: InterestPolicy, 9758 } 9759 9760 s := RunBasicJetStreamServer(t) 9761 defer s.Shutdown() 9762 9763 mset, err := s.GlobalAccount().addStreamWithStore(sc, &FileStoreConfig{BlockSize: 128, CacheExpire: 15 * time.Millisecond}) 9764 if err != nil { 9765 t.Fatalf("Unexpected error adding stream: %v", err) 9766 } 9767 defer mset.delete() 9768 9769 nc1 := clientConnectWithOldRequest(t, s) 9770 defer nc1.Close() 9771 9772 // Create a durable consumers 9773 sub, _ := nc1.SubscribeSync(nats.NewInbox()) 9774 defer sub.Unsubscribe() 9775 nc1.Flush() 9776 9777 o, err := mset.addConsumer(&ConsumerConfig{ 9778 Durable: "dur", 9779 DeliverSubject: sub.Subject, 9780 FilterSubject: "foo.bar", 9781 DeliverPolicy: DeliverNew, 9782 AckPolicy: AckExplicit, 9783 }) 9784 if err != nil { 9785 t.Fatalf("Unexpected error adding consumer: %v", err) 9786 } 9787 defer o.delete() 9788 9789 nc2 := clientConnectWithOldRequest(t, s) 9790 defer nc2.Close() 9791 9792 sendStreamMsg(t, nc2, "foo.bar", "msg1") 9793 9794 msg, err := sub.NextMsg(time.Second) 9795 if err != nil { 9796 t.Fatalf("Did not get message: %v", err) 9797 } 9798 if string(msg.Data) != "msg1" { 9799 t.Fatalf("Unexpected message: %q", msg.Data) 9800 } 9801 9802 nc1.Close() 9803 9804 // Get the message from the stream 9805 getMsgSeq := func(seq uint64) { 9806 t.Helper() 9807 mreq := &JSApiMsgGetRequest{Seq: seq} 9808 req, err := json.Marshal(mreq) 9809 if err != nil { 9810 t.Fatalf("Unexpected error: %v", err) 9811 } 9812 smsgj, err := nc2.Request(fmt.Sprintf(JSApiMsgGetT, sc.Name), req, time.Second) 9813 if err != nil { 9814 t.Fatalf("Could not retrieve stream message: %v", err) 9815 } 9816 if strings.Contains(string(smsgj.Data), "code") { 9817 t.Fatalf("Error: %q", smsgj.Data) 9818 } 9819 } 9820 9821 getMsgSeq(1) 9822 9823 time.Sleep(time.Second) 9824 9825 sendStreamMsg(t, nc2, "foo.bar", "msg2") 9826 sendStreamMsg(t, nc2, "foo.bar", "msg3") 9827 9828 getMsgSeq(1) 9829 getMsgSeq(2) 9830 getMsgSeq(3) 9831 } 9832 9833 func TestJetStreamConsumerUpdateRedelivery(t *testing.T) { 9834 cases := []struct { 9835 name string 9836 mconfig *StreamConfig 9837 }{ 9838 {"MemoryStore", &StreamConfig{ 9839 Name: "MY_STREAM", 9840 Storage: MemoryStorage, 9841 Subjects: []string{"foo.>"}, 9842 Retention: InterestPolicy, 9843 }}, 9844 {"FileStore", &StreamConfig{ 9845 Name: "MY_STREAM", 9846 Storage: FileStorage, 9847 Subjects: []string{"foo.>"}, 9848 Retention: InterestPolicy, 9849 }}, 9850 } 9851 for _, c := range cases { 9852 t.Run(c.name, func(t *testing.T) { 9853 s := RunBasicJetStreamServer(t) 9854 defer s.Shutdown() 9855 9856 mset, err := s.GlobalAccount().addStream(c.mconfig) 9857 if err != nil { 9858 t.Fatalf("Unexpected error adding stream: %v", err) 9859 } 9860 defer mset.delete() 9861 9862 nc := clientConnectToServer(t, s) 9863 defer nc.Close() 9864 9865 // Create a durable consumer. 9866 sub, _ := nc.SubscribeSync(nats.NewInbox()) 9867 defer sub.Unsubscribe() 9868 9869 o, err := mset.addConsumer(&ConsumerConfig{ 9870 Durable: "dur22", 9871 DeliverSubject: sub.Subject, 9872 FilterSubject: "foo.bar", 9873 AckPolicy: AckExplicit, 9874 AckWait: 100 * time.Millisecond, 9875 MaxDeliver: 3, 9876 }) 9877 if err != nil { 9878 t.Fatalf("Unexpected error adding consumer: %v", err) 9879 } 9880 defer o.delete() 9881 9882 // Send 20 messages 9883 toSend := 20 9884 for i := 1; i <= toSend; i++ { 9885 sendStreamMsg(t, nc, "foo.bar", fmt.Sprintf("msg-%v", i)) 9886 } 9887 state := mset.state() 9888 if state.Msgs != uint64(toSend) { 9889 t.Fatalf("Expected %v messages, got %d", toSend, state.Msgs) 9890 } 9891 9892 // Receive the messages and ack only every 4th 9893 for i := 0; i < toSend; i++ { 9894 m, err := sub.NextMsg(time.Second) 9895 if err != nil { 9896 t.Fatalf("Error getting message: %v", err) 9897 } 9898 seq, _, _, _, _ := replyInfo(m.Reply) 9899 // 4, 8, 12, 16, 20 9900 if seq%4 == 0 { 9901 m.Respond(nil) 9902 } 9903 } 9904 9905 // Now close the sub and open a new one and update the consumer. 9906 sub.Unsubscribe() 9907 9908 // Wait for it to become inactive 9909 checkFor(t, 200*time.Millisecond, 10*time.Millisecond, func() error { 9910 if o.isActive() { 9911 return fmt.Errorf("Consumer still active") 9912 } 9913 return nil 9914 }) 9915 9916 // Send 20 more messages. 9917 for i := toSend; i < toSend*2; i++ { 9918 sendStreamMsg(t, nc, "foo.bar", fmt.Sprintf("msg-%v", i)) 9919 } 9920 9921 // Create new subscription. 9922 sub, _ = nc.SubscribeSync(nats.NewInbox()) 9923 defer sub.Unsubscribe() 9924 nc.Flush() 9925 9926 o, err = mset.addConsumer(&ConsumerConfig{ 9927 Durable: "dur22", 9928 DeliverSubject: sub.Subject, 9929 FilterSubject: "foo.bar", 9930 AckPolicy: AckExplicit, 9931 AckWait: 100 * time.Millisecond, 9932 MaxDeliver: 3, 9933 }) 9934 if err != nil { 9935 t.Fatalf("Unexpected error adding consumer: %v", err) 9936 } 9937 defer o.delete() 9938 9939 expect := toSend + toSend - 5 // mod 4 acks 9940 checkFor(t, time.Second, 5*time.Millisecond, func() error { 9941 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != expect { 9942 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, expect) 9943 } 9944 return nil 9945 }) 9946 9947 for i, eseq := 0, uint64(1); i < expect; i++ { 9948 m, err := sub.NextMsg(time.Second) 9949 if err != nil { 9950 t.Fatalf("Error getting message: %v", err) 9951 } 9952 // Skip the ones we ack'd from above. We should not get them back here. 9953 if eseq <= uint64(toSend) && eseq%4 == 0 { 9954 eseq++ 9955 } 9956 seq, _, dc, _, _ := replyInfo(m.Reply) 9957 if seq != eseq { 9958 t.Fatalf("Expected stream sequence of %d, got %d", eseq, seq) 9959 } 9960 if seq <= uint64(toSend) && dc != 2 { 9961 t.Fatalf("Expected delivery count of 2 for sequence of %d, got %d", seq, dc) 9962 } 9963 if seq > uint64(toSend) && dc != 1 { 9964 t.Fatalf("Expected delivery count of 1 for sequence of %d, got %d", seq, dc) 9965 } 9966 if seq > uint64(toSend) { 9967 m.Respond(nil) // Ack 9968 } 9969 eseq++ 9970 } 9971 9972 // We should get the second half back since we did not ack those from above. 9973 expect = toSend - 5 9974 checkFor(t, time.Second, 5*time.Millisecond, func() error { 9975 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != expect { 9976 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, expect) 9977 } 9978 return nil 9979 }) 9980 9981 for i, eseq := 0, uint64(1); i < expect; i++ { 9982 m, err := sub.NextMsg(time.Second) 9983 if err != nil { 9984 t.Fatalf("Error getting message: %v", err) 9985 } 9986 // Skip the ones we ack'd from above. We should not get them back here. 9987 if eseq <= uint64(toSend) && eseq%4 == 0 { 9988 eseq++ 9989 } 9990 seq, _, dc, _, _ := replyInfo(m.Reply) 9991 if seq != eseq { 9992 t.Fatalf("Expected stream sequence of %d, got %d", eseq, seq) 9993 } 9994 if dc != 3 { 9995 t.Fatalf("Expected delivery count of 3 for sequence of %d, got %d", seq, dc) 9996 } 9997 eseq++ 9998 } 9999 }) 10000 } 10001 } 10002 10003 func TestJetStreamConsumerMaxAckPending(t *testing.T) { 10004 cases := []struct { 10005 name string 10006 mconfig *StreamConfig 10007 }{ 10008 {"MemoryStore", &StreamConfig{ 10009 Name: "MY_STREAM", 10010 Storage: MemoryStorage, 10011 Subjects: []string{"foo.*"}, 10012 }}, 10013 {"FileStore", &StreamConfig{ 10014 Name: "MY_STREAM", 10015 Storage: FileStorage, 10016 Subjects: []string{"foo.*"}, 10017 }}, 10018 } 10019 for _, c := range cases { 10020 t.Run(c.name, func(t *testing.T) { 10021 s := RunBasicJetStreamServer(t) 10022 defer s.Shutdown() 10023 10024 mset, err := s.GlobalAccount().addStream(c.mconfig) 10025 if err != nil { 10026 t.Fatalf("Unexpected error adding stream: %v", err) 10027 } 10028 defer mset.delete() 10029 10030 nc := clientConnectToServer(t, s) 10031 defer nc.Close() 10032 10033 // Do error scenarios. 10034 _, err = mset.addConsumer(&ConsumerConfig{ 10035 Durable: "d22", 10036 DeliverSubject: nats.NewInbox(), 10037 AckPolicy: AckNone, 10038 MaxAckPending: 1, 10039 }) 10040 if err == nil { 10041 t.Fatalf("Expected error, MaxAckPending only applicable to ack != AckNone") 10042 } 10043 10044 // Queue up 100 messages. 10045 toSend := 100 10046 for i := 0; i < toSend; i++ { 10047 sendStreamMsg(t, nc, "foo.bar", fmt.Sprintf("MSG: %d", i+1)) 10048 } 10049 10050 // Limit to 33 10051 maxAckPending := 33 10052 10053 o, err := mset.addConsumer(&ConsumerConfig{ 10054 Durable: "d22", 10055 DeliverSubject: nats.NewInbox(), 10056 AckPolicy: AckExplicit, 10057 MaxAckPending: maxAckPending, 10058 }) 10059 require_NoError(t, err) 10060 10061 defer o.delete() 10062 10063 sub, _ := nc.SubscribeSync(o.info().Config.DeliverSubject) 10064 defer sub.Unsubscribe() 10065 10066 checkSubPending := func(numExpected int) { 10067 t.Helper() 10068 checkFor(t, time.Second, 20*time.Millisecond, func() error { 10069 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != numExpected { 10070 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected) 10071 } 10072 return nil 10073 }) 10074 } 10075 10076 checkSubPending(maxAckPending) 10077 // We hit the limit, double check we stayed there. 10078 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != maxAckPending { 10079 t.Fatalf("Too many messages received: %d vs %d", nmsgs, maxAckPending) 10080 } 10081 10082 // Now ack them all. 10083 for i := 0; i < maxAckPending; i++ { 10084 m, err := sub.NextMsg(time.Second) 10085 if err != nil { 10086 t.Fatalf("Error receiving message %d: %v", i, err) 10087 } 10088 m.Respond(nil) 10089 } 10090 checkSubPending(maxAckPending) 10091 10092 o.stop() 10093 mset.purge(nil) 10094 10095 // Now test a consumer that is live while we publish messages to the stream. 10096 o, err = mset.addConsumer(&ConsumerConfig{ 10097 Durable: "d33", 10098 DeliverSubject: nats.NewInbox(), 10099 AckPolicy: AckExplicit, 10100 MaxAckPending: maxAckPending, 10101 }) 10102 require_NoError(t, err) 10103 10104 defer o.delete() 10105 10106 sub, _ = nc.SubscribeSync(o.info().Config.DeliverSubject) 10107 defer sub.Unsubscribe() 10108 nc.Flush() 10109 10110 checkSubPending(0) 10111 10112 // Now stream more then maxAckPending. 10113 for i := 0; i < toSend; i++ { 10114 sendStreamMsg(t, nc, "foo.baz", fmt.Sprintf("MSG: %d", i+1)) 10115 } 10116 checkSubPending(maxAckPending) 10117 // We hit the limit, double check we stayed there. 10118 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != maxAckPending { 10119 t.Fatalf("Too many messages received: %d vs %d", nmsgs, maxAckPending) 10120 } 10121 }) 10122 } 10123 } 10124 10125 func TestJetStreamPullConsumerMaxAckPending(t *testing.T) { 10126 cases := []struct { 10127 name string 10128 mconfig *StreamConfig 10129 }{ 10130 {"MemoryStore", &StreamConfig{ 10131 Name: "MY_STREAM", 10132 Storage: MemoryStorage, 10133 Subjects: []string{"foo.*"}, 10134 }}, 10135 {"FileStore", &StreamConfig{ 10136 Name: "MY_STREAM", 10137 Storage: FileStorage, 10138 Subjects: []string{"foo.*"}, 10139 }}, 10140 } 10141 for _, c := range cases { 10142 t.Run(c.name, func(t *testing.T) { 10143 s := RunBasicJetStreamServer(t) 10144 defer s.Shutdown() 10145 10146 mset, err := s.GlobalAccount().addStream(c.mconfig) 10147 if err != nil { 10148 t.Fatalf("Unexpected error adding stream: %v", err) 10149 } 10150 defer mset.delete() 10151 10152 nc := clientConnectToServer(t, s) 10153 defer nc.Close() 10154 10155 // Queue up 100 messages. 10156 toSend := 100 10157 for i := 0; i < toSend; i++ { 10158 sendStreamMsg(t, nc, "foo.bar", fmt.Sprintf("MSG: %d", i+1)) 10159 } 10160 10161 // Limit to 33 10162 maxAckPending := 33 10163 10164 o, err := mset.addConsumer(&ConsumerConfig{ 10165 Durable: "d22", 10166 AckPolicy: AckExplicit, 10167 MaxAckPending: maxAckPending, 10168 }) 10169 require_NoError(t, err) 10170 10171 defer o.delete() 10172 10173 getSubj := o.requestNextMsgSubject() 10174 10175 var toAck []*nats.Msg 10176 10177 for i := 0; i < maxAckPending; i++ { 10178 if m, err := nc.Request(getSubj, nil, time.Second); err != nil { 10179 t.Fatalf("Unexpected error: %v", err) 10180 } else { 10181 toAck = append(toAck, m) 10182 } 10183 } 10184 10185 // Now ack them all. 10186 for _, m := range toAck { 10187 m.Respond(nil) 10188 } 10189 10190 // Now do batch above the max. 10191 sub, _ := nc.SubscribeSync(nats.NewInbox()) 10192 defer sub.Unsubscribe() 10193 10194 checkSubPending := func(numExpected int) { 10195 t.Helper() 10196 checkFor(t, time.Second, 10*time.Millisecond, func() error { 10197 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != numExpected { 10198 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, numExpected) 10199 } 10200 return nil 10201 }) 10202 } 10203 10204 req := &JSApiConsumerGetNextRequest{Batch: maxAckPending} 10205 jreq, _ := json.Marshal(req) 10206 nc.PublishRequest(getSubj, sub.Subject, jreq) 10207 10208 checkSubPending(maxAckPending) 10209 // We hit the limit, double check we stayed there. 10210 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != maxAckPending { 10211 t.Fatalf("Too many messages received: %d vs %d", nmsgs, maxAckPending) 10212 } 10213 }) 10214 } 10215 } 10216 10217 func TestJetStreamPullConsumerMaxAckPendingRedeliveries(t *testing.T) { 10218 cases := []struct { 10219 name string 10220 mconfig *StreamConfig 10221 }{ 10222 {"MemoryStore", &StreamConfig{ 10223 Name: "MY_STREAM", 10224 Storage: MemoryStorage, 10225 Subjects: []string{"foo.*"}, 10226 }}, 10227 {"FileStore", &StreamConfig{ 10228 Name: "MY_STREAM", 10229 Storage: FileStorage, 10230 Subjects: []string{"foo.*"}, 10231 }}, 10232 } 10233 for _, c := range cases { 10234 t.Run(c.name, func(t *testing.T) { 10235 s := RunBasicJetStreamServer(t) 10236 defer s.Shutdown() 10237 10238 mset, err := s.GlobalAccount().addStream(c.mconfig) 10239 if err != nil { 10240 t.Fatalf("Unexpected error adding stream: %v", err) 10241 } 10242 defer mset.delete() 10243 10244 nc := clientConnectToServer(t, s) 10245 defer nc.Close() 10246 10247 // Queue up 10 messages. 10248 toSend := 10 10249 for i := 0; i < toSend; i++ { 10250 sendStreamMsg(t, nc, "foo.bar", fmt.Sprintf("MSG: %d", i+1)) 10251 } 10252 10253 // Limit to 1 10254 maxAckPending := 1 10255 ackWait := 20 * time.Millisecond 10256 expSeq := uint64(4) 10257 10258 o, err := mset.addConsumer(&ConsumerConfig{ 10259 Durable: "d22", 10260 DeliverPolicy: DeliverByStartSequence, 10261 OptStartSeq: expSeq, 10262 AckPolicy: AckExplicit, 10263 AckWait: ackWait, 10264 MaxAckPending: maxAckPending, 10265 }) 10266 require_NoError(t, err) 10267 10268 defer o.delete() 10269 10270 getSubj := o.requestNextMsgSubject() 10271 delivery := uint64(1) 10272 10273 getNext := func() { 10274 t.Helper() 10275 m, err := nc.Request(getSubj, nil, time.Second) 10276 if err != nil { 10277 t.Fatalf("Unexpected error: %v", err) 10278 } 10279 sseq, dseq, dcount, _, pending := replyInfo(m.Reply) 10280 if sseq != expSeq { 10281 t.Fatalf("Expected stream sequence of %d, got %d", expSeq, sseq) 10282 } 10283 if dseq != delivery { 10284 t.Fatalf("Expected consumer sequence of %d, got %d", delivery, dseq) 10285 } 10286 if dcount != delivery { 10287 t.Fatalf("Expected delivery count of %d, got %d", delivery, dcount) 10288 } 10289 if pending != uint64(toSend)-expSeq { 10290 t.Fatalf("Expected pending to be %d, got %d", uint64(toSend)-expSeq, pending) 10291 } 10292 delivery++ 10293 } 10294 10295 getNext() 10296 getNext() 10297 getNext() 10298 getNext() 10299 getNext() 10300 }) 10301 } 10302 } 10303 10304 func TestJetStreamDeliveryAfterServerRestart(t *testing.T) { 10305 opts := DefaultTestOptions 10306 opts.Port = -1 10307 opts.JetStream = true 10308 opts.StoreDir = t.TempDir() 10309 s := RunServer(&opts) 10310 defer s.Shutdown() 10311 10312 mset, err := s.GlobalAccount().addStream(&StreamConfig{ 10313 Name: "MY_STREAM", 10314 Storage: FileStorage, 10315 Subjects: []string{"foo.>"}, 10316 Retention: InterestPolicy, 10317 }) 10318 if err != nil { 10319 t.Fatalf("Unexpected error adding stream: %v", err) 10320 } 10321 defer mset.delete() 10322 10323 nc := clientConnectToServer(t, s) 10324 defer nc.Close() 10325 10326 inbox := nats.NewInbox() 10327 o, err := mset.addConsumer(&ConsumerConfig{ 10328 Durable: "dur", 10329 DeliverSubject: inbox, 10330 DeliverPolicy: DeliverNew, 10331 AckPolicy: AckExplicit, 10332 }) 10333 if err != nil { 10334 t.Fatalf("Expected no error, got %v", err) 10335 } 10336 defer o.delete() 10337 10338 sub, err := nc.SubscribeSync(inbox) 10339 if err != nil { 10340 t.Fatalf("Error on subscribe: %v", err) 10341 } 10342 nc.Flush() 10343 10344 // Send 1 message 10345 sendStreamMsg(t, nc, "foo.bar", "msg1") 10346 10347 // Make sure we receive it and ack it. 10348 msg, err := sub.NextMsg(250 * time.Millisecond) 10349 if err != nil { 10350 t.Fatalf("Did not get message: %v", err) 10351 } 10352 // Ack it! 10353 msg.Respond(nil) 10354 nc.Flush() 10355 10356 // Shutdown client and server 10357 nc.Close() 10358 10359 dir := strings.TrimSuffix(s.JetStreamConfig().StoreDir, JetStreamStoreDir) 10360 s.Shutdown() 10361 10362 opts.Port = -1 10363 opts.StoreDir = dir 10364 s = RunServer(&opts) 10365 defer s.Shutdown() 10366 10367 // Lookup stream. 10368 mset, err = s.GlobalAccount().lookupStream("MY_STREAM") 10369 if err != nil { 10370 t.Fatalf("Error looking up stream: %v", err) 10371 } 10372 10373 // Update consumer's deliver subject with new inbox 10374 inbox = nats.NewInbox() 10375 o, err = mset.addConsumer(&ConsumerConfig{ 10376 Durable: "dur", 10377 DeliverSubject: inbox, 10378 DeliverPolicy: DeliverNew, 10379 AckPolicy: AckExplicit, 10380 }) 10381 if err != nil { 10382 t.Fatalf("Expected no error, got %v", err) 10383 } 10384 defer o.delete() 10385 10386 nc = clientConnectToServer(t, s) 10387 defer nc.Close() 10388 10389 // Send 2nd message 10390 sendStreamMsg(t, nc, "foo.bar", "msg2") 10391 10392 // Start sub on new inbox 10393 sub, err = nc.SubscribeSync(inbox) 10394 if err != nil { 10395 t.Fatalf("Error on subscribe: %v", err) 10396 } 10397 nc.Flush() 10398 10399 // Should receive message 2. 10400 if _, err := sub.NextMsg(500 * time.Millisecond); err != nil { 10401 t.Fatalf("Did not get message: %v", err) 10402 } 10403 } 10404 10405 // This is for the basics of importing the ability to send to a stream and consume 10406 // from a consumer that is pull based on push based on a well known delivery subject. 10407 func TestJetStreamAccountImportBasics(t *testing.T) { 10408 conf := createConfFile(t, []byte(fmt.Sprintf(` 10409 listen: 127.0.0.1:-1 10410 no_auth_user: rip 10411 jetstream: {max_mem_store: 64GB, max_file_store: 10TB, store_dir: %q} 10412 accounts: { 10413 JS: { 10414 jetstream: enabled 10415 users: [ {user: dlc, password: foo} ] 10416 exports [ 10417 # This is for sending into a stream from other accounts. 10418 { service: "ORDERS.*" } 10419 # This is for accessing a pull based consumer. 10420 { service: "$JS.API.CONSUMER.MSG.NEXT.*.*" } 10421 # This is streaming to a delivery subject for a push based consumer. 10422 { stream: "deliver.ORDERS" } 10423 # This is to ack received messages. This is a service to ack acks.. 10424 { service: "$JS.ACK.ORDERS.*.>" } 10425 ] 10426 }, 10427 IU: { 10428 users: [ {user: rip, password: bar} ] 10429 imports [ 10430 { service: { subject: "ORDERS.*", account: JS }, to: "my.orders.$1" } 10431 { service: { subject: "$JS.API.CONSUMER.MSG.NEXT.ORDERS.d", account: JS }, to: "nxt.msg" } 10432 { stream: { subject: "deliver.ORDERS", account: JS }, to: "d" } 10433 { service: { subject: "$JS.ACK.ORDERS.*.>", account: JS } } 10434 ] 10435 }, 10436 } 10437 `, t.TempDir()))) 10438 10439 s, _ := RunServerWithConfig(conf) 10440 defer s.Shutdown() 10441 10442 acc, err := s.LookupAccount("JS") 10443 if err != nil { 10444 t.Fatalf("Unexpected error looking up account: %v", err) 10445 } 10446 10447 mset, err := acc.addStream(&StreamConfig{Name: "ORDERS", Subjects: []string{"ORDERS.*"}}) 10448 if err != nil { 10449 t.Fatalf("Unexpected error adding stream: %v", err) 10450 } 10451 defer mset.delete() 10452 10453 // This should be the rip user, the one that imports some JS. 10454 nc := clientConnectToServer(t, s) 10455 defer nc.Close() 10456 10457 // Simple publish to a stream. 10458 pubAck := sendStreamMsg(t, nc, "my.orders.foo", "ORDERS-1") 10459 if pubAck.Stream != "ORDERS" || pubAck.Sequence != 1 { 10460 t.Fatalf("Bad pubAck received: %+v", pubAck) 10461 } 10462 if msgs := mset.state().Msgs; msgs != 1 { 10463 t.Fatalf("Expected 1 message, got %d", msgs) 10464 } 10465 10466 total := 2 10467 for i := 2; i <= total; i++ { 10468 sendStreamMsg(t, nc, "my.orders.bar", fmt.Sprintf("ORDERS-%d", i)) 10469 } 10470 if msgs := mset.state().Msgs; msgs != uint64(total) { 10471 t.Fatalf("Expected %d messages, got %d", total, msgs) 10472 } 10473 10474 // Now test access to a pull based consumer, e.g. workqueue. 10475 o, err := mset.addConsumer(&ConsumerConfig{ 10476 Durable: "d", 10477 AckPolicy: AckExplicit, 10478 }) 10479 if err != nil { 10480 t.Fatalf("Expected no error, got %v", err) 10481 } 10482 defer o.delete() 10483 10484 // We mapped the next message request, "$JS.API.CONSUMER.MSG.NEXT.ORDERS.d" -> "nxt.msg" 10485 m, err := nc.Request("nxt.msg", nil, time.Second) 10486 require_NoError(t, err) 10487 if string(m.Data) != "ORDERS-1" { 10488 t.Fatalf("Expected to receive %q, got %q", "ORDERS-1", m.Data) 10489 } 10490 10491 // Now test access to a push based consumer 10492 o, err = mset.addConsumer(&ConsumerConfig{ 10493 Durable: "p", 10494 DeliverSubject: "deliver.ORDERS", 10495 AckPolicy: AckExplicit, 10496 }) 10497 if err != nil { 10498 t.Fatalf("Expected no error, got %v", err) 10499 } 10500 defer o.delete() 10501 10502 // We remapped from above, deliver.ORDERS -> d 10503 sub, _ := nc.SubscribeSync("d") 10504 defer sub.Unsubscribe() 10505 10506 checkFor(t, 250*time.Millisecond, 10*time.Millisecond, func() error { 10507 if nmsgs, _, _ := sub.Pending(); err != nil || nmsgs != total { 10508 return fmt.Errorf("Did not receive correct number of messages: %d vs %d", nmsgs, total) 10509 } 10510 return nil 10511 }) 10512 10513 m, _ = sub.NextMsg(time.Second) 10514 // Make sure we remapped subject correctly across the account boundary. 10515 if m.Subject != "ORDERS.foo" { 10516 t.Fatalf("Expected subject of %q, got %q", "ORDERS.foo", m.Subject) 10517 } 10518 // Now make sure we can ack messages correctly. 10519 m.Respond(AckAck) 10520 nc.Flush() 10521 10522 if info := o.info(); info.AckFloor.Consumer != 1 { 10523 t.Fatalf("Did not receive the ack properly") 10524 } 10525 10526 // Grab second one now. 10527 m, _ = sub.NextMsg(time.Second) 10528 // Make sure we remapped subject correctly across the account boundary. 10529 if m.Subject != "ORDERS.bar" { 10530 t.Fatalf("Expected subject of %q, got %q", "ORDERS.bar", m.Subject) 10531 } 10532 // Now make sure we can ack messages and get back an ack as well. 10533 resp, _ := nc.Request(m.Reply, nil, 100*time.Millisecond) 10534 if resp == nil { 10535 t.Fatalf("No response, possible timeout?") 10536 } 10537 if info := o.info(); info.AckFloor.Consumer != 2 { 10538 t.Fatalf("Did not receive the ack properly") 10539 } 10540 } 10541 10542 // This tests whether we are able to aggregate all JetStream advisory events 10543 // from all accounts into a single account. Config for this test uses 10544 // service imports and exports as that allows for gathering all events 10545 // without having to know the account name and without separate entries 10546 // for each account in aggregate account config. 10547 // This test fails as it is not receiving the api audit event ($JS.EVENT.ADVISORY.API). 10548 func TestJetStreamAccountImportJSAdvisoriesAsService(t *testing.T) { 10549 conf := createConfFile(t, []byte(fmt.Sprintf(` 10550 listen=127.0.0.1:-1 10551 no_auth_user: pp 10552 jetstream: {max_mem_store: 64GB, max_file_store: 10TB, store_dir: %q} 10553 accounts { 10554 JS { 10555 jetstream: enabled 10556 users: [ {user: pp, password: foo} ] 10557 imports [ 10558 { service: { account: AGG, subject: '$JS.EVENT.ADVISORY.ACC.JS.>' }, to: '$JS.EVENT.ADVISORY.>' } 10559 ] 10560 } 10561 AGG { 10562 users: [ {user: agg, password: foo} ] 10563 exports: [ 10564 { service: '$JS.EVENT.ADVISORY.ACC.*.>', response: Singleton, account_token_position: 5 } 10565 ] 10566 } 10567 } 10568 `, t.TempDir()))) 10569 10570 s, _ := RunServerWithConfig(conf) 10571 defer s.Shutdown() 10572 10573 // This should be the pp user, one which manages JetStream assets 10574 ncJS, err := nats.Connect(s.ClientURL()) 10575 if err != nil { 10576 t.Fatalf("Unexpected error during connect: %v", err) 10577 } 10578 defer ncJS.Close() 10579 10580 // This is the agg user, which should aggregate all JS advisory events. 10581 ncAgg, err := nats.Connect(s.ClientURL(), nats.UserInfo("agg", "foo")) 10582 if err != nil { 10583 t.Fatalf("Unexpected error during connect: %v", err) 10584 } 10585 defer ncAgg.Close() 10586 10587 js, err := ncJS.JetStream() 10588 if err != nil { 10589 t.Fatalf("Unexpected error: %v", err) 10590 } 10591 10592 // user from JS account should receive events on $JS.EVENT.ADVISORY.> subject 10593 subJS, err := ncJS.SubscribeSync("$JS.EVENT.ADVISORY.>") 10594 if err != nil { 10595 t.Fatalf("Unexpected error: %v", err) 10596 } 10597 defer subJS.Unsubscribe() 10598 10599 // user from AGG account should receive events on mapped $JS.EVENT.ADVISORY.ACC.JS.> subject (with account name) 10600 subAgg, err := ncAgg.SubscribeSync("$JS.EVENT.ADVISORY.ACC.JS.>") 10601 if err != nil { 10602 t.Fatalf("Unexpected error: %v", err) 10603 } 10604 10605 // add stream using JS account 10606 // this should trigger 2 events: 10607 // - an action event on $JS.EVENT.ADVISORY.STREAM.CREATED.ORDERS 10608 // - an api audit event on $JS.EVENT.ADVISORY.API 10609 _, err = js.AddStream(&nats.StreamConfig{Name: "ORDERS", Subjects: []string{"ORDERS.*"}}) 10610 if err != nil { 10611 t.Fatalf("Unexpected error adding stream: %v", err) 10612 } 10613 10614 gotEvents := map[string]int{} 10615 for i := 0; i < 2; i++ { 10616 msg, err := subJS.NextMsg(time.Second * 2) 10617 if err != nil { 10618 t.Fatalf("Unexpected error: %v", err) 10619 } 10620 gotEvents[msg.Subject]++ 10621 } 10622 if c := gotEvents["$JS.EVENT.ADVISORY.STREAM.CREATED.ORDERS"]; c != 1 { 10623 t.Fatalf("Should have received one advisory from $JS.EVENT.ADVISORY.STREAM.CREATED.ORDERS but got %d", c) 10624 } 10625 if c := gotEvents["$JS.EVENT.ADVISORY.API"]; c != 1 { 10626 t.Fatalf("Should have received one advisory from $JS.EVENT.ADVISORY.API but got %d", c) 10627 } 10628 10629 // same set of events should be received by AGG account 10630 // on subjects containing account name (ACC.JS) 10631 gotEvents = map[string]int{} 10632 for i := 0; i < 2; i++ { 10633 msg, err := subAgg.NextMsg(time.Second * 2) 10634 require_NoError(t, err) 10635 var adv JSAPIAudit 10636 require_NoError(t, json.Unmarshal(msg.Data, &adv)) 10637 // Make sure we have full fidelity info via implicit share. 10638 if adv.Client != nil { 10639 require_True(t, adv.Client.Host != _EMPTY_) 10640 require_True(t, adv.Client.User != _EMPTY_) 10641 require_True(t, adv.Client.Lang != _EMPTY_) 10642 } 10643 gotEvents[msg.Subject]++ 10644 } 10645 if c := gotEvents["$JS.EVENT.ADVISORY.ACC.JS.STREAM.CREATED.ORDERS"]; c != 1 { 10646 t.Fatalf("Should have received one advisory from $JS.EVENT.ADVISORY.ACC.JS.STREAM.CREATED.ORDERS but got %d", c) 10647 } 10648 if c := gotEvents["$JS.EVENT.ADVISORY.ACC.JS.API"]; c != 1 { 10649 t.Fatalf("Should have received one advisory from $JS.EVENT.ADVISORY.ACC.JS.API but got %d", c) 10650 } 10651 } 10652 10653 // This tests whether we are able to aggregate all JetStream advisory events 10654 // from all accounts into a single account. Config for this test uses 10655 // stream imports and exports as that allows for gathering all events 10656 // as long as there is a separate stream import entry for each account 10657 // in aggregate account config. 10658 func TestJetStreamAccountImportJSAdvisoriesAsStream(t *testing.T) { 10659 conf := createConfFile(t, []byte(fmt.Sprintf(` 10660 listen=127.0.0.1:-1 10661 no_auth_user: pp 10662 jetstream: {max_mem_store: 64GB, max_file_store: 10TB, store_dir: %q} 10663 accounts { 10664 JS { 10665 jetstream: enabled 10666 users: [ {user: pp, password: foo} ] 10667 exports [ 10668 { stream: '$JS.EVENT.ADVISORY.>' } 10669 ] 10670 } 10671 AGG { 10672 users: [ {user: agg, password: foo} ] 10673 imports: [ 10674 { stream: { account: JS, subject: '$JS.EVENT.ADVISORY.>' }, to: '$JS.EVENT.ADVISORY.ACC.JS.>' } 10675 ] 10676 } 10677 } 10678 `, t.TempDir()))) 10679 10680 s, _ := RunServerWithConfig(conf) 10681 defer s.Shutdown() 10682 10683 // This should be the pp user, one which manages JetStream assets 10684 ncJS, err := nats.Connect(s.ClientURL()) 10685 if err != nil { 10686 t.Fatalf("Unexpected error during connect: %v", err) 10687 } 10688 defer ncJS.Close() 10689 10690 // This is the agg user, which should aggregate all JS advisory events. 10691 ncAgg, err := nats.Connect(s.ClientURL(), nats.UserInfo("agg", "foo")) 10692 if err != nil { 10693 t.Fatalf("Unexpected error during connect: %v", err) 10694 } 10695 defer ncAgg.Close() 10696 10697 js, err := ncJS.JetStream() 10698 if err != nil { 10699 t.Fatalf("Unexpected error: %v", err) 10700 } 10701 10702 // user from JS account should receive events on $JS.EVENT.ADVISORY.> subject 10703 subJS, err := ncJS.SubscribeSync("$JS.EVENT.ADVISORY.>") 10704 if err != nil { 10705 t.Fatalf("Unexpected error: %v", err) 10706 } 10707 defer subJS.Unsubscribe() 10708 10709 // user from AGG account should receive events on mapped $JS.EVENT.ADVISORY.ACC.JS.> subject (with account name) 10710 subAgg, err := ncAgg.SubscribeSync("$JS.EVENT.ADVISORY.ACC.JS.>") 10711 if err != nil { 10712 t.Fatalf("Unexpected error: %v", err) 10713 } 10714 10715 // add stream using JS account 10716 // this should trigger 2 events: 10717 // - an action event on $JS.EVENT.ADVISORY.STREAM.CREATED.ORDERS 10718 // - an api audit event on $JS.EVENT.ADVISORY.API 10719 _, err = js.AddStream(&nats.StreamConfig{Name: "ORDERS", Subjects: []string{"ORDERS.*"}}) 10720 if err != nil { 10721 t.Fatalf("Unexpected error adding stream: %v", err) 10722 } 10723 10724 var gotAPIAdvisory, gotCreateAdvisory bool 10725 for i := 0; i < 2; i++ { 10726 msg, err := subJS.NextMsg(time.Second * 2) 10727 if err != nil { 10728 t.Fatalf("Unexpected error on JS account: %v", err) 10729 } 10730 switch msg.Subject { 10731 case "$JS.EVENT.ADVISORY.STREAM.CREATED.ORDERS": 10732 gotCreateAdvisory = true 10733 case "$JS.EVENT.ADVISORY.API": 10734 gotAPIAdvisory = true 10735 default: 10736 t.Fatalf("Unexpected subject: %q", msg.Subject) 10737 } 10738 } 10739 if !gotAPIAdvisory || !gotCreateAdvisory { 10740 t.Fatalf("Expected to have received both advisories on JS account (API advisory %v, create advisory %v)", gotAPIAdvisory, gotCreateAdvisory) 10741 } 10742 10743 // same set of events should be received by AGG account 10744 // on subjects containing account name (ACC.JS) 10745 gotAPIAdvisory, gotCreateAdvisory = false, false 10746 for i := 0; i < 2; i++ { 10747 msg, err := subAgg.NextMsg(time.Second * 2) 10748 if err != nil { 10749 t.Fatalf("Unexpected error on AGG account: %v", err) 10750 } 10751 switch msg.Subject { 10752 case "$JS.EVENT.ADVISORY.ACC.JS.STREAM.CREATED.ORDERS": 10753 gotCreateAdvisory = true 10754 case "$JS.EVENT.ADVISORY.ACC.JS.API": 10755 gotAPIAdvisory = true 10756 default: 10757 t.Fatalf("Unexpected subject: %q", msg.Subject) 10758 } 10759 } 10760 if !gotAPIAdvisory || !gotCreateAdvisory { 10761 t.Fatalf("Expected to have received both advisories on AGG account (API advisory %v, create advisory %v)", gotAPIAdvisory, gotCreateAdvisory) 10762 } 10763 } 10764 10765 // This is for importing all of JetStream into another account for admin purposes. 10766 func TestJetStreamAccountImportAll(t *testing.T) { 10767 conf := createConfFile(t, []byte(fmt.Sprintf(` 10768 listen: 127.0.0.1:-1 10769 no_auth_user: rip 10770 jetstream: {max_mem_store: 64GB, max_file_store: 10TB, store_dir: %q} 10771 accounts: { 10772 JS: { 10773 jetstream: enabled 10774 users: [ {user: dlc, password: foo} ] 10775 exports [ { service: "$JS.API.>" } ] 10776 }, 10777 IU: { 10778 users: [ {user: rip, password: bar} ] 10779 imports [ { service: { subject: "$JS.API.>", account: JS }, to: "jsapi.>"} ] 10780 }, 10781 } 10782 `, t.TempDir()))) 10783 10784 s, _ := RunServerWithConfig(conf) 10785 defer s.Shutdown() 10786 10787 acc, err := s.LookupAccount("JS") 10788 if err != nil { 10789 t.Fatalf("Unexpected error looking up account: %v", err) 10790 } 10791 10792 mset, err := acc.addStream(&StreamConfig{Name: "ORDERS", Subjects: []string{"ORDERS.*"}}) 10793 if err != nil { 10794 t.Fatalf("Unexpected error adding stream: %v", err) 10795 } 10796 defer mset.delete() 10797 10798 // This should be the rip user, the one that imports all of JS. 10799 nc := clientConnectToServer(t, s) 10800 defer nc.Close() 10801 10802 mapSubj := func(subject string) string { 10803 return strings.Replace(subject, "$JS.API.", "jsapi.", 1) 10804 } 10805 10806 // This will get the current information about usage and limits for this account. 10807 resp, err := nc.Request(mapSubj(JSApiAccountInfo), nil, time.Second) 10808 require_NoError(t, err) 10809 var info JSApiAccountInfoResponse 10810 if err := json.Unmarshal(resp.Data, &info); err != nil { 10811 t.Fatalf("Unexpected error: %v", err) 10812 } 10813 if info.Error != nil { 10814 t.Fatalf("Unexpected error: %+v", info.Error) 10815 } 10816 // Lookup streams. 10817 resp, err = nc.Request(mapSubj(JSApiStreams), nil, time.Second) 10818 require_NoError(t, err) 10819 var namesResponse JSApiStreamNamesResponse 10820 if err = json.Unmarshal(resp.Data, &namesResponse); err != nil { 10821 t.Fatalf("Unexpected error: %v", err) 10822 } 10823 if namesResponse.Error != nil { 10824 t.Fatalf("Unexpected error: %+v", namesResponse.Error) 10825 } 10826 } 10827 10828 // https://github.com/nats-io/nats-server/issues/1736 10829 func TestJetStreamServerReload(t *testing.T) { 10830 conf := createConfFile(t, []byte(fmt.Sprintf(` 10831 listen: 127.0.0.1:-1 10832 jetstream: {max_mem_store: 64GB, max_file_store: 10TB, store_dir: %q } 10833 accounts: { 10834 A: { users: [ {user: ua, password: pwd} ] }, 10835 B: { 10836 jetstream: {max_mem: 1GB, max_store: 1TB, max_streams: 10, max_consumers: 1k} 10837 users: [ {user: ub, password: pwd} ] 10838 }, 10839 SYS: { users: [ {user: uc, password: pwd} ] }, 10840 } 10841 no_auth_user: ub 10842 system_account: SYS 10843 `, t.TempDir()))) 10844 10845 s, _ := RunServerWithConfig(conf) 10846 defer s.Shutdown() 10847 10848 if !s.JetStreamEnabled() { 10849 t.Fatalf("Expected JetStream to be enabled") 10850 } 10851 10852 // Client for API requests. 10853 nc := clientConnectToServer(t, s) 10854 defer nc.Close() 10855 10856 checkJSAccount := func() { 10857 t.Helper() 10858 resp, err := nc.Request(JSApiAccountInfo, nil, time.Second) 10859 if err != nil { 10860 t.Fatalf("Unexpected error: %v", err) 10861 } 10862 var info JSApiAccountInfoResponse 10863 if err := json.Unmarshal(resp.Data, &info); err != nil { 10864 t.Fatalf("Unexpected error: %v", err) 10865 } 10866 } 10867 10868 checkJSAccount() 10869 10870 acc, err := s.LookupAccount("B") 10871 if err != nil { 10872 t.Fatalf("Unexpected error looking up account: %v", err) 10873 } 10874 mset, err := acc.addStream(&StreamConfig{Name: "22"}) 10875 require_NoError(t, err) 10876 10877 toSend := 10 10878 for i := 0; i < toSend; i++ { 10879 sendStreamMsg(t, nc, "22", fmt.Sprintf("MSG: %d", i+1)) 10880 } 10881 if msgs := mset.state().Msgs; msgs != uint64(toSend) { 10882 t.Fatalf("Expected %d messages, got %d", toSend, msgs) 10883 } 10884 10885 if err := s.Reload(); err != nil { 10886 t.Fatalf("Error on server reload: %v", err) 10887 } 10888 10889 // Wait to get reconnected. 10890 checkFor(t, 5*time.Second, 10*time.Millisecond, func() error { 10891 if !nc.IsConnected() { 10892 return fmt.Errorf("Not connected") 10893 } 10894 return nil 10895 }) 10896 10897 checkJSAccount() 10898 sendStreamMsg(t, nc, "22", "MSG: 22") 10899 } 10900 10901 func TestJetStreamConfigReloadWithGlobalAccount(t *testing.T) { 10902 tdir := t.TempDir() 10903 template := ` 10904 listen: 127.0.0.1:-1 10905 authorization { 10906 users [ 10907 {user: anonymous} 10908 {user: user1, password: %s} 10909 ] 10910 } 10911 no_auth_user: anonymous 10912 jetstream { 10913 store_dir = %q 10914 } 10915 ` 10916 conf := createConfFile(t, []byte(fmt.Sprintf(template, "pwd", tdir))) 10917 10918 s, _ := RunServerWithConfig(conf) 10919 defer s.Shutdown() 10920 10921 // Client for API requests. 10922 nc, js := jsClientConnect(t, s) 10923 defer nc.Close() 10924 10925 checkJSAccount := func() { 10926 t.Helper() 10927 if _, err := js.AccountInfo(); err != nil { 10928 t.Fatalf("Unexpected error: %v", err) 10929 } 10930 } 10931 10932 checkJSAccount() 10933 10934 if _, err := js.AddStream(&nats.StreamConfig{Name: "foo"}); err != nil { 10935 t.Fatalf("Unexpected error: %v", err) 10936 } 10937 10938 toSend := 10 10939 for i := 0; i < toSend; i++ { 10940 if _, err := js.Publish("foo", []byte(fmt.Sprintf("MSG: %d", i+1))); err != nil { 10941 t.Fatalf("Unexpected publish error: %v", err) 10942 } 10943 } 10944 si, err := js.StreamInfo("foo") 10945 require_NoError(t, err) 10946 if si.State.Msgs != uint64(toSend) { 10947 t.Fatalf("Expected %d msgs after restart, got %d", toSend, si.State.Msgs) 10948 } 10949 10950 if err := os.WriteFile(conf, []byte(fmt.Sprintf(template, "pwd2", tdir)), 0666); err != nil { 10951 t.Fatalf("Error writing config: %v", err) 10952 } 10953 10954 if err := s.Reload(); err != nil { 10955 t.Fatalf("Error during config reload: %v", err) 10956 } 10957 10958 nc, js = jsClientConnect(t, s) 10959 defer nc.Close() 10960 10961 // Try to add a new stream to the global account 10962 if _, err := js.AddStream(&nats.StreamConfig{Name: "bar"}); err != nil { 10963 t.Fatalf("Unexpected error: %v", err) 10964 } 10965 10966 checkJSAccount() 10967 } 10968 10969 // Test that we properly enforce per subject msg limits. 10970 func TestJetStreamMaxMsgsPerSubject(t *testing.T) { 10971 const maxPer = 5 10972 msc := StreamConfig{ 10973 Name: "TEST", 10974 Subjects: []string{"foo", "bar", "baz.*"}, 10975 Storage: MemoryStorage, 10976 MaxMsgsPer: maxPer, 10977 } 10978 fsc := msc 10979 fsc.Storage = FileStorage 10980 10981 cases := []struct { 10982 name string 10983 mconfig *StreamConfig 10984 }{ 10985 {"MemoryStore", &msc}, 10986 {"FileStore", &fsc}, 10987 } 10988 10989 for _, c := range cases { 10990 t.Run(c.name, func(t *testing.T) { 10991 s := RunBasicJetStreamServer(t) 10992 defer s.Shutdown() 10993 10994 mset, err := s.GlobalAccount().addStream(c.mconfig) 10995 if err != nil { 10996 t.Fatalf("Unexpected error adding stream: %v", err) 10997 } 10998 defer mset.delete() 10999 11000 // Client for API requests. 11001 nc, js := jsClientConnect(t, s) 11002 defer nc.Close() 11003 11004 pubAndCheck := func(subj string, num int, expectedNumMsgs uint64) { 11005 t.Helper() 11006 for i := 0; i < num; i++ { 11007 if _, err = js.Publish(subj, []byte("TSLA")); err != nil { 11008 t.Fatalf("Unexpected publish error: %v", err) 11009 } 11010 } 11011 si, err := js.StreamInfo("TEST") 11012 if err != nil { 11013 t.Fatalf("Unexpected error: %v", err) 11014 } 11015 if si.State.Msgs != expectedNumMsgs { 11016 t.Fatalf("Expected %d msgs, got %d", expectedNumMsgs, si.State.Msgs) 11017 } 11018 } 11019 11020 pubAndCheck("foo", 1, 1) 11021 pubAndCheck("foo", 4, 5) 11022 // Now make sure our per subject limits kick in.. 11023 pubAndCheck("foo", 2, 5) 11024 pubAndCheck("baz.22", 5, 10) 11025 pubAndCheck("baz.33", 5, 15) 11026 // We are maxed so totals should be same no matter what we add here. 11027 pubAndCheck("baz.22", 5, 15) 11028 pubAndCheck("baz.33", 5, 15) 11029 11030 // Now purge and make sure all is still good. 11031 mset.purge(nil) 11032 pubAndCheck("foo", 1, 1) 11033 pubAndCheck("foo", 4, 5) 11034 pubAndCheck("baz.22", 5, 10) 11035 pubAndCheck("baz.33", 5, 15) 11036 }) 11037 } 11038 } 11039 11040 func TestJetStreamGetLastMsgBySubject(t *testing.T) { 11041 for _, st := range []StorageType{FileStorage, MemoryStorage} { 11042 t.Run(st.String(), func(t *testing.T) { 11043 c := createJetStreamClusterExplicit(t, "JSC", 3) 11044 defer c.shutdown() 11045 11046 nc, js := jsClientConnect(t, c.randomServer()) 11047 defer nc.Close() 11048 11049 cfg := StreamConfig{ 11050 Name: "KV", 11051 Subjects: []string{"kv.>"}, 11052 Storage: st, 11053 Replicas: 2, 11054 MaxMsgsPer: 20, 11055 } 11056 11057 req, err := json.Marshal(cfg) 11058 if err != nil { 11059 t.Fatalf("Unexpected error: %v", err) 11060 } 11061 // Do manually for now. 11062 nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second) 11063 si, err := js.StreamInfo("KV") 11064 if err != nil { 11065 t.Fatalf("Unexpected error: %v", err) 11066 } 11067 if si == nil || si.Config.Name != "KV" { 11068 t.Fatalf("StreamInfo is not correct %+v", si) 11069 } 11070 11071 for i := 0; i < 1000; i++ { 11072 msg := []byte(fmt.Sprintf("VAL-%d", i+1)) 11073 js.PublishAsync("kv.foo", msg) 11074 js.PublishAsync("kv.bar", msg) 11075 js.PublishAsync("kv.baz", msg) 11076 } 11077 select { 11078 case <-js.PublishAsyncComplete(): 11079 case <-time.After(5 * time.Second): 11080 t.Fatalf("Did not receive completion signal") 11081 } 11082 11083 // Check that if both set that errors. 11084 b, _ := json.Marshal(JSApiMsgGetRequest{LastFor: "kv.foo", Seq: 950}) 11085 rmsg, err := nc.Request(fmt.Sprintf(JSApiMsgGetT, "KV"), b, time.Second) 11086 if err != nil { 11087 t.Fatalf("Unexpected error: %v", err) 11088 } 11089 var resp JSApiMsgGetResponse 11090 err = json.Unmarshal(rmsg.Data, &resp) 11091 if err != nil { 11092 t.Fatalf("Could not parse stream message: %v", err) 11093 } 11094 if resp.Error == nil { 11095 t.Fatalf("Expected an error when both are set, got %+v", resp.Error) 11096 } 11097 11098 // Need to do stream GetMsg by hand for now until Go client support lands. 11099 getLast := func(subject string) *StoredMsg { 11100 t.Helper() 11101 req := &JSApiMsgGetRequest{LastFor: subject} 11102 b, _ := json.Marshal(req) 11103 rmsg, err := nc.Request(fmt.Sprintf(JSApiMsgGetT, "KV"), b, time.Second) 11104 if err != nil { 11105 t.Fatalf("Unexpected error: %v", err) 11106 } 11107 var resp JSApiMsgGetResponse 11108 err = json.Unmarshal(rmsg.Data, &resp) 11109 if err != nil { 11110 t.Fatalf("Could not parse stream message: %v", err) 11111 } 11112 if resp.Message == nil || resp.Error != nil { 11113 t.Fatalf("Did not receive correct response: %+v", resp.Error) 11114 } 11115 return resp.Message 11116 } 11117 // Do basic checks. 11118 basicCheck := func(subject string, expectedSeq uint64) { 11119 sm := getLast(subject) 11120 if sm == nil { 11121 t.Fatalf("Expected a message but got none") 11122 } else if string(sm.Data) != "VAL-1000" { 11123 t.Fatalf("Wrong message payload, wanted %q but got %q", "VAL-1000", sm.Data) 11124 } else if sm.Sequence != expectedSeq { 11125 t.Fatalf("Wrong message sequence, wanted %d but got %d", expectedSeq, sm.Sequence) 11126 } else if !subjectIsSubsetMatch(sm.Subject, subject) { 11127 t.Fatalf("Wrong subject, wanted %q but got %q", subject, sm.Subject) 11128 } 11129 } 11130 11131 basicCheck("kv.foo", 2998) 11132 basicCheck("kv.bar", 2999) 11133 basicCheck("kv.baz", 3000) 11134 basicCheck("kv.*", 3000) 11135 basicCheck(">", 3000) 11136 }) 11137 } 11138 } 11139 11140 // https://github.com/nats-io/nats-server/issues/2329 11141 func TestJetStreamGetLastMsgBySubjectAfterUpdate(t *testing.T) { 11142 c := createJetStreamClusterExplicit(t, "JSC", 3) 11143 defer c.shutdown() 11144 11145 nc, js := jsClientConnect(t, c.randomServer()) 11146 defer nc.Close() 11147 11148 sc := &nats.StreamConfig{ 11149 Name: "TEST", 11150 Subjects: []string{"foo"}, 11151 Replicas: 2, 11152 } 11153 if _, err := js.AddStream(sc); err != nil { 11154 t.Fatalf("Unexpected error: %v", err) 11155 } 11156 // Now Update and add in other subjects. 11157 sc.Subjects = append(sc.Subjects, "bar", "baz") 11158 if _, err := js.UpdateStream(sc); err != nil { 11159 t.Fatalf("Unexpected error: %v", err) 11160 } 11161 11162 js.Publish("foo", []byte("OK1")) // 1 11163 js.Publish("bar", []byte("OK1")) // 2 11164 js.Publish("foo", []byte("OK2")) // 3 11165 js.Publish("bar", []byte("OK2")) // 4 11166 11167 // Need to do stream GetMsg by hand for now until Go client support lands. 11168 getLast := func(subject string) *StoredMsg { 11169 t.Helper() 11170 req := &JSApiMsgGetRequest{LastFor: subject} 11171 b, _ := json.Marshal(req) 11172 rmsg, err := nc.Request(fmt.Sprintf(JSApiMsgGetT, "TEST"), b, time.Second) 11173 if err != nil { 11174 t.Fatalf("Unexpected error: %v", err) 11175 } 11176 var resp JSApiMsgGetResponse 11177 err = json.Unmarshal(rmsg.Data, &resp) 11178 if err != nil { 11179 t.Fatalf("Could not parse stream message: %v", err) 11180 } 11181 if resp.Message == nil || resp.Error != nil { 11182 t.Fatalf("Did not receive correct response: %+v", resp.Error) 11183 } 11184 return resp.Message 11185 } 11186 // Do basic checks. 11187 basicCheck := func(subject string, expectedSeq uint64) { 11188 sm := getLast(subject) 11189 if sm == nil { 11190 t.Fatalf("Expected a message but got none") 11191 } else if sm.Sequence != expectedSeq { 11192 t.Fatalf("Wrong message sequence, wanted %d but got %d", expectedSeq, sm.Sequence) 11193 } else if !subjectIsSubsetMatch(sm.Subject, subject) { 11194 t.Fatalf("Wrong subject, wanted %q but got %q", subject, sm.Subject) 11195 } 11196 } 11197 11198 basicCheck("foo", 3) 11199 basicCheck("bar", 4) 11200 } 11201 11202 func TestJetStreamLastSequenceBySubject(t *testing.T) { 11203 for _, st := range []StorageType{FileStorage, MemoryStorage} { 11204 t.Run(st.String(), func(t *testing.T) { 11205 c := createJetStreamClusterExplicit(t, "JSC", 3) 11206 defer c.shutdown() 11207 11208 nc, js := jsClientConnect(t, c.randomServer()) 11209 defer nc.Close() 11210 11211 cfg := StreamConfig{ 11212 Name: "KV", 11213 Subjects: []string{"kv.>"}, 11214 Storage: st, 11215 Replicas: 3, 11216 MaxMsgsPer: 1, 11217 } 11218 11219 req, err := json.Marshal(cfg) 11220 if err != nil { 11221 t.Fatalf("Unexpected error: %v", err) 11222 } 11223 // Do manually for now. 11224 m, err := nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second) 11225 require_NoError(t, err) 11226 si, err := js.StreamInfo("KV") 11227 if err != nil { 11228 t.Fatalf("Unexpected error: %v, respmsg: %q", err, string(m.Data)) 11229 } 11230 if si == nil || si.Config.Name != "KV" { 11231 t.Fatalf("StreamInfo is not correct %+v", si) 11232 } 11233 11234 js.PublishAsync("kv.foo", []byte("1")) 11235 js.PublishAsync("kv.bar", []byte("2")) 11236 js.PublishAsync("kv.baz", []byte("3")) 11237 11238 select { 11239 case <-js.PublishAsyncComplete(): 11240 case <-time.After(time.Second): 11241 t.Fatalf("Did not receive completion signal") 11242 } 11243 11244 // Now make sure we get an error if the last sequence is not correct per subject. 11245 pubAndCheck := func(subj, seq string, ok bool) { 11246 t.Helper() 11247 m := nats.NewMsg(subj) 11248 m.Data = []byte("HELLO") 11249 m.Header.Set(JSExpectedLastSubjSeq, seq) 11250 _, err := js.PublishMsg(m) 11251 if ok && err != nil { 11252 t.Fatalf("Unexpected error: %v", err) 11253 } 11254 if !ok && err == nil { 11255 t.Fatalf("Expected to get an error and got none") 11256 } 11257 } 11258 11259 pubAndCheck("kv.foo", "1", true) // So last is now 4. 11260 pubAndCheck("kv.foo", "1", false) // This should fail. 11261 pubAndCheck("kv.bar", "2", true) 11262 pubAndCheck("kv.bar", "5", true) 11263 pubAndCheck("kv.xxx", "5", false) 11264 }) 11265 } 11266 } 11267 11268 func TestJetStreamFilteredConsumersWithWiderFilter(t *testing.T) { 11269 s := RunBasicJetStreamServer(t) 11270 defer s.Shutdown() 11271 11272 // Client for API requests. 11273 nc, js := jsClientConnect(t, s) 11274 defer nc.Close() 11275 11276 // Origin 11277 _, err := js.AddStream(&nats.StreamConfig{ 11278 Name: "TEST", 11279 Subjects: []string{"foo", "bar", "baz", "N.*"}, 11280 }) 11281 require_NoError(t, err) 11282 11283 // Add in some messages. 11284 js.Publish("foo", []byte("OK")) 11285 js.Publish("bar", []byte("OK")) 11286 js.Publish("baz", []byte("OK")) 11287 for i := 0; i < 12; i++ { 11288 js.Publish(fmt.Sprintf("N.%d", i+1), []byte("OK")) 11289 } 11290 11291 checkFor(t, 5*time.Second, 250*time.Millisecond, func() error { 11292 si, err := js.StreamInfo("TEST") 11293 require_NoError(t, err) 11294 11295 if si.State.Msgs != 15 { 11296 return fmt.Errorf("Expected 15 msgs, got state: %+v", si.State) 11297 } 11298 return nil 11299 }) 11300 11301 checkWider := func(subj string, numExpected int) { 11302 sub, err := js.SubscribeSync(subj) 11303 require_NoError(t, err) 11304 11305 defer sub.Unsubscribe() 11306 checkSubsPending(t, sub, numExpected) 11307 } 11308 11309 checkWider("*", 3) 11310 checkWider("N.*", 12) 11311 checkWider("*.*", 12) 11312 checkWider("N.>", 12) 11313 checkWider(">", 15) 11314 } 11315 11316 func TestJetStreamMirrorAndSourcesFilteredConsumers(t *testing.T) { 11317 s := RunBasicJetStreamServer(t) 11318 defer s.Shutdown() 11319 11320 // Client for API requests. 11321 nc, js := jsClientConnect(t, s) 11322 defer nc.Close() 11323 11324 // Origin 11325 _, err := js.AddStream(&nats.StreamConfig{ 11326 Name: "TEST", 11327 Subjects: []string{"foo", "bar", "baz.*"}, 11328 }) 11329 require_NoError(t, err) 11330 11331 // Create Mirror now. 11332 _, err = js.AddStream(&nats.StreamConfig{ 11333 Name: "M", 11334 Mirror: &nats.StreamSource{Name: "TEST"}, 11335 }) 11336 require_NoError(t, err) 11337 11338 dsubj := nats.NewInbox() 11339 nc.SubscribeSync(dsubj) 11340 nc.Flush() 11341 11342 createConsumer := func(sn, fs string) { 11343 t.Helper() 11344 _, err = js.AddConsumer(sn, &nats.ConsumerConfig{DeliverSubject: dsubj, FilterSubject: fs}) 11345 require_NoError(t, err) 11346 11347 } 11348 11349 createConsumer("M", "foo") 11350 createConsumer("M", "bar") 11351 createConsumer("M", "baz.foo") 11352 11353 // Now do some sources. 11354 if _, err := js.AddStream(&nats.StreamConfig{Name: "O1", Subjects: []string{"foo.*"}}); err != nil { 11355 t.Fatalf("Unexpected error: %v", err) 11356 } 11357 if _, err := js.AddStream(&nats.StreamConfig{Name: "O2", Subjects: []string{"bar.*"}}); err != nil { 11358 t.Fatalf("Unexpected error: %v", err) 11359 } 11360 11361 // Create Mirror now. 11362 _, err = js.AddStream(&nats.StreamConfig{ 11363 Name: "S", 11364 Sources: []*nats.StreamSource{{Name: "O1"}, {Name: "O2"}}, 11365 }) 11366 require_NoError(t, err) 11367 11368 createConsumer("S", "foo.1") 11369 createConsumer("S", "bar.1") 11370 11371 // Chaining 11372 // Create Mirror now. 11373 _, err = js.AddStream(&nats.StreamConfig{ 11374 Name: "M2", 11375 Mirror: &nats.StreamSource{Name: "M"}, 11376 }) 11377 require_NoError(t, err) 11378 11379 createConsumer("M2", "foo") 11380 createConsumer("M2", "bar") 11381 createConsumer("M2", "baz.foo") 11382 } 11383 11384 func TestJetStreamMirrorBasics(t *testing.T) { 11385 s := RunBasicJetStreamServer(t) 11386 defer s.Shutdown() 11387 11388 // Client for API requests. 11389 nc, js := jsClientConnect(t, s) 11390 defer nc.Close() 11391 11392 createStream := func(cfg *nats.StreamConfig) (*nats.StreamInfo, error) { 11393 return js.AddStream(cfg) 11394 } 11395 11396 createStreamOk := func(cfg *nats.StreamConfig) { 11397 t.Helper() 11398 if _, err := createStream(cfg); err != nil { 11399 t.Fatalf("Expected no error, got %+v", err) 11400 } 11401 } 11402 11403 // Test we get right config errors etc. 11404 cfg := &nats.StreamConfig{ 11405 Name: "M1", 11406 Subjects: []string{"foo", "bar", "baz"}, 11407 Mirror: &nats.StreamSource{Name: "S1"}, 11408 } 11409 _, err := createStream(cfg) 11410 if err == nil || !strings.Contains(err.Error(), "stream mirrors can not") { 11411 t.Fatalf("Expected error, got %+v", err) 11412 } 11413 11414 // Clear subjects. 11415 cfg.Subjects = nil 11416 11417 // Mirrored 11418 scfg := &nats.StreamConfig{ 11419 Name: "S1", 11420 Subjects: []string{"foo", "bar", "baz"}, 11421 } 11422 11423 // Create mirrored stream 11424 createStreamOk(scfg) 11425 11426 // Now create our mirror stream. 11427 createStreamOk(cfg) 11428 11429 // For now wait for the consumer state to register. 11430 time.Sleep(250 * time.Millisecond) 11431 11432 // Send 100 messages. 11433 for i := 0; i < 100; i++ { 11434 if _, err := js.Publish("foo", []byte("OK")); err != nil { 11435 t.Fatalf("Unexpected publish error: %v", err) 11436 } 11437 } 11438 11439 // Faster timeout since we loop below checking for condition. 11440 js2, err := nc.JetStream(nats.MaxWait(500 * time.Millisecond)) 11441 require_NoError(t, err) 11442 11443 checkFor(t, 5*time.Second, 250*time.Millisecond, func() error { 11444 si, err := js2.StreamInfo("M1") 11445 require_NoError(t, err) 11446 11447 if si.State.Msgs != 100 { 11448 return fmt.Errorf("Expected 100 msgs, got state: %+v", si.State) 11449 } 11450 return nil 11451 }) 11452 11453 // Purge the mirrored stream. 11454 if err := js.PurgeStream("S1"); err != nil { 11455 t.Fatalf("Unexpected purge error: %v", err) 11456 } 11457 // Send 50 more msgs now. 11458 for i := 0; i < 50; i++ { 11459 if _, err := js.Publish("bar", []byte("OK")); err != nil { 11460 t.Fatalf("Unexpected publish error: %v", err) 11461 } 11462 } 11463 11464 cfg = &nats.StreamConfig{ 11465 Name: "M2", 11466 Storage: nats.FileStorage, 11467 Mirror: &nats.StreamSource{Name: "S1"}, 11468 } 11469 11470 // Now create our second mirror stream. 11471 createStreamOk(cfg) 11472 11473 checkFor(t, 5*time.Second, 250*time.Millisecond, func() error { 11474 si, err := js2.StreamInfo("M2") 11475 require_NoError(t, err) 11476 11477 if si.State.Msgs != 50 { 11478 return fmt.Errorf("Expected 50 msgs, got state: %+v", si.State) 11479 } 11480 if si.State.FirstSeq != 101 { 11481 return fmt.Errorf("Expected start seq of 101, got state: %+v", si.State) 11482 } 11483 return nil 11484 }) 11485 11486 // Send 100 more msgs now. Should be 150 total, 101 first. 11487 for i := 0; i < 100; i++ { 11488 if _, err := js.Publish("baz", []byte("OK")); err != nil { 11489 t.Fatalf("Unexpected publish error: %v", err) 11490 } 11491 } 11492 11493 cfg = &nats.StreamConfig{ 11494 Name: "M3", 11495 Mirror: &nats.StreamSource{Name: "S1", OptStartSeq: 150}, 11496 } 11497 11498 createStreamOk(cfg) 11499 11500 checkFor(t, 5*time.Second, 250*time.Millisecond, func() error { 11501 si, err := js2.StreamInfo("M3") 11502 require_NoError(t, err) 11503 11504 if si.State.Msgs != 101 { 11505 return fmt.Errorf("Expected 101 msgs, got state: %+v", si.State) 11506 } 11507 if si.State.FirstSeq != 150 { 11508 return fmt.Errorf("Expected start seq of 150, got state: %+v", si.State) 11509 } 11510 return nil 11511 }) 11512 11513 // Make sure setting time works ok. 11514 start := time.Now().UTC().Add(-2 * time.Hour) 11515 cfg = &nats.StreamConfig{ 11516 Name: "M4", 11517 Mirror: &nats.StreamSource{Name: "S1", OptStartTime: &start}, 11518 } 11519 createStreamOk(cfg) 11520 11521 checkFor(t, 5*time.Second, 250*time.Millisecond, func() error { 11522 si, err := js2.StreamInfo("M4") 11523 require_NoError(t, err) 11524 11525 if si.State.Msgs != 150 { 11526 return fmt.Errorf("Expected 150 msgs, got state: %+v", si.State) 11527 } 11528 if si.State.FirstSeq != 101 { 11529 return fmt.Errorf("Expected start seq of 101, got state: %+v", si.State) 11530 } 11531 return nil 11532 }) 11533 11534 // Test subject filtering and transformation 11535 createStreamServerStreamConfig := func(cfg *StreamConfig, errToCheck uint16) { 11536 t.Helper() 11537 req, err := json.Marshal(cfg) 11538 require_NoError(t, err) 11539 11540 rm, err := nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second) 11541 require_NoError(t, err) 11542 11543 var resp JSApiStreamCreateResponse 11544 if err := json.Unmarshal(rm.Data, &resp); err != nil { 11545 t.Fatalf("Unexpected error: %v", err) 11546 } 11547 11548 if errToCheck == 0 { 11549 if resp.Error != nil { 11550 t.Fatalf("Unexpected error: %+v", resp.Error) 11551 } 11552 } else { 11553 if resp.Error.ErrCode != errToCheck { 11554 t.Fatalf("Expected error %+v, got: %+v", errToCheck, resp.Error) 11555 } 11556 } 11557 } 11558 11559 // check for errors 11560 createStreamServerStreamConfig(&StreamConfig{ 11561 Name: "MBAD", 11562 Storage: FileStorage, 11563 Mirror: &StreamSource{Name: "S1", FilterSubject: "foo", SubjectTransforms: []SubjectTransformConfig{{Source: "foo", Destination: "foo3"}}}, 11564 }, ApiErrors[JSMirrorMultipleFiltersNotAllowed].ErrCode) 11565 11566 createStreamServerStreamConfig(&StreamConfig{ 11567 Name: "MBAD", 11568 Storage: FileStorage, 11569 Mirror: &StreamSource{Name: "S1", SubjectTransforms: []SubjectTransformConfig{{Source: ".*.", Destination: "foo3"}}}, 11570 }, ApiErrors[JSMirrorInvalidSubjectFilter].ErrCode) 11571 11572 createStreamServerStreamConfig(&StreamConfig{ 11573 Name: "MBAD", 11574 Storage: FileStorage, 11575 Mirror: &StreamSource{Name: "S1", SubjectTransforms: []SubjectTransformConfig{{Source: "*", Destination: "{{wildcard(2)}}"}}}, 11576 }, ApiErrors[JSStreamCreateErrF].ErrCode) 11577 11578 createStreamServerStreamConfig(&StreamConfig{ 11579 Name: "MBAD", 11580 Storage: FileStorage, 11581 Mirror: &StreamSource{Name: "S1", SubjectTransforms: []SubjectTransformConfig{{Source: "foo", Destination: ""}, {Source: "foo", Destination: "bar"}}}, 11582 }, ApiErrors[JSMirrorOverlappingSubjectFilters].ErrCode) 11583 11584 createStreamServerStreamConfig(&StreamConfig{ 11585 Name: "M5", 11586 Storage: FileStorage, 11587 Mirror: &StreamSource{Name: "S1", SubjectTransforms: []SubjectTransformConfig{{Source: "foo", Destination: "foo2"}}}, 11588 }, 0) 11589 11590 createStreamServerStreamConfig(&StreamConfig{ 11591 Name: "M6", 11592 Storage: FileStorage, 11593 Mirror: &StreamSource{Name: "S1", SubjectTransforms: []SubjectTransformConfig{{Source: "bar", Destination: "bar2"}, {Source: "baz", Destination: "baz2"}}}, 11594 }, 0) 11595 11596 // Send 100 messages on foo (there should already be 50 messages on bar and 100 on baz in the stream) 11597 for i := 0; i < 100; i++ { 11598 if _, err := js.Publish("foo", []byte("OK")); err != nil { 11599 t.Fatalf("Unexpected publish error: %v", err) 11600 } 11601 } 11602 11603 var f = func(streamName string, subject string, subjectNumMsgs uint64, streamNumMsg uint64, firstSeq uint64, lastSeq uint64) func() error { 11604 return func() error { 11605 si, err := js2.StreamInfo(streamName, &nats.StreamInfoRequest{SubjectsFilter: subject}) 11606 require_NoError(t, err) 11607 if ss, ok := si.State.Subjects[subject]; !ok { 11608 return fmt.Errorf("expected messages with the transformed subject %s", subject) 11609 } else { 11610 if ss != subjectNumMsgs { 11611 return fmt.Errorf("expected %d messages on the transformed subject %s but got %d", subjectNumMsgs, subject, ss) 11612 } 11613 } 11614 if si.State.Msgs != streamNumMsg { 11615 return fmt.Errorf("expected %d stream messages, got state: %+v", streamNumMsg, si.State) 11616 } 11617 if si.State.FirstSeq != firstSeq || si.State.LastSeq != lastSeq { 11618 return fmt.Errorf("expected first sequence=%d and last sequence=%d, but got state: %+v", firstSeq, lastSeq, si.State) 11619 } 11620 return nil 11621 } 11622 } 11623 11624 checkFor(t, 10*time.Second, 500*time.Millisecond, f("M5", "foo2", 100, 100, 251, 350)) 11625 checkFor(t, 10*time.Second, 500*time.Millisecond, f("M6", "bar2", 50, 150, 101, 250)) 11626 checkFor(t, 10*time.Second, 500*time.Millisecond, f("M6", "baz2", 100, 150, 101, 250)) 11627 11628 } 11629 11630 func TestJetStreamMirrorUpdatePreventsSubjects(t *testing.T) { 11631 s := RunBasicJetStreamServer(t) 11632 defer s.Shutdown() 11633 11634 // Client for API requests. 11635 nc, js := jsClientConnect(t, s) 11636 defer nc.Close() 11637 11638 _, err := js.AddStream(&nats.StreamConfig{Name: "ORIGINAL"}) 11639 require_NoError(t, err) 11640 11641 _, err = js.AddStream(&nats.StreamConfig{Name: "MIRROR", Mirror: &nats.StreamSource{Name: "ORIGINAL"}}) 11642 require_NoError(t, err) 11643 11644 _, err = js.UpdateStream(&nats.StreamConfig{Name: "MIRROR", Mirror: &nats.StreamSource{Name: "ORIGINAL"}, Subjects: []string{"x"}}) 11645 if err == nil || err.Error() != "nats: stream mirrors can not contain subjects" { 11646 t.Fatalf("Expected to not be able to put subjects on a stream, got: %+v", err) 11647 } 11648 } 11649 11650 func TestJetStreamSourceBasics(t *testing.T) { 11651 s := RunBasicJetStreamServer(t) 11652 defer s.Shutdown() 11653 11654 // Client for API requests. 11655 nc, js := jsClientConnect(t, s) 11656 defer nc.Close() 11657 11658 createStream := func(cfg *StreamConfig) { 11659 t.Helper() 11660 req, err := json.Marshal(cfg) 11661 require_NoError(t, err) 11662 11663 rm, err := nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second) 11664 require_NoError(t, err) 11665 11666 var resp JSApiStreamCreateResponse 11667 if err := json.Unmarshal(rm.Data, &resp); err != nil { 11668 t.Fatalf("Unexpected error: %v", err) 11669 } 11670 if resp.Error != nil { 11671 t.Fatalf("Unexpected error: %+v", resp.Error) 11672 } 11673 } 11674 11675 if _, err := js.AddStream(&nats.StreamConfig{Name: "test", Sources: []*nats.StreamSource{{Name: ""}}}); err.Error() == "source stream name is invalid" { 11676 t.Fatal("Expected a source stream name is invalid error") 11677 } 11678 11679 for _, sname := range []string{"foo", "bar", "baz"} { 11680 if _, err := js.AddStream(&nats.StreamConfig{Name: sname}); err != nil { 11681 t.Fatalf("Unexpected error: %v", err) 11682 } 11683 } 11684 sendBatch := func(subject string, n int) { 11685 for i := 0; i < n; i++ { 11686 if _, err := js.Publish(subject, []byte("OK")); err != nil { 11687 t.Fatalf("Unexpected publish error: %v", err) 11688 } 11689 } 11690 } 11691 // Populate each one. 11692 sendBatch("foo", 10) 11693 sendBatch("bar", 15) 11694 sendBatch("baz", 25) 11695 11696 cfg := &StreamConfig{ 11697 Name: "MS", 11698 Storage: FileStorage, 11699 Sources: []*StreamSource{ 11700 {Name: "foo", SubjectTransforms: []SubjectTransformConfig{{Source: ">", Destination: "foo2.>"}}}, 11701 {Name: "bar"}, 11702 {Name: "baz"}, 11703 }, 11704 } 11705 11706 createStream(cfg) 11707 11708 // Faster timeout since we loop below checking for condition. 11709 js2, err := nc.JetStream(nats.MaxWait(250 * time.Millisecond)) 11710 require_NoError(t, err) 11711 11712 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 11713 si, err := js2.StreamInfo("MS") 11714 require_NoError(t, err) 11715 11716 if si.State.Msgs != 50 { 11717 return fmt.Errorf("Expected 50 msgs, got state: %+v", si.State) 11718 } 11719 return nil 11720 }) 11721 11722 ss, err := js.SubscribeSync("foo2.foo", nats.BindStream("MS")) 11723 require_NoError(t, err) 11724 // we must have at least one message on the transformed subject name (ie no timeout) 11725 _, err = ss.NextMsg(time.Millisecond) 11726 require_NoError(t, err) 11727 ss.Drain() 11728 11729 // Test Source Updates 11730 ncfg := &nats.StreamConfig{ 11731 Name: "MS", 11732 Sources: []*nats.StreamSource{ 11733 // Keep foo, bar, remove baz, add dlc 11734 {Name: "foo"}, 11735 {Name: "bar"}, 11736 {Name: "dlc"}, 11737 }, 11738 } 11739 if _, err := js.UpdateStream(ncfg); err != nil { 11740 t.Fatalf("Unexpected error: %v", err) 11741 } 11742 11743 // Test optional start times, filtered subjects etc. 11744 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"dlc", "rip", "jnm"}}); err != nil { 11745 t.Fatalf("Unexpected error: %v", err) 11746 } 11747 sendBatch("dlc", 20) 11748 sendBatch("rip", 20) 11749 sendBatch("dlc", 10) 11750 sendBatch("jnm", 10) 11751 11752 cfg = &StreamConfig{ 11753 Name: "FMS", 11754 Storage: FileStorage, 11755 Sources: []*StreamSource{ 11756 {Name: "TEST", OptStartSeq: 26}, 11757 }, 11758 } 11759 createStream(cfg) 11760 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 11761 si, err := js2.StreamInfo("FMS") 11762 require_NoError(t, err) 11763 if si.State.Msgs != 35 { 11764 return fmt.Errorf("Expected 35 msgs, got state: %+v", si.State) 11765 } 11766 return nil 11767 }) 11768 // Double check first starting. 11769 m, err := js.GetMsg("FMS", 1) 11770 require_NoError(t, err) 11771 if shdr := m.Header.Get(JSStreamSource); shdr == _EMPTY_ { 11772 t.Fatalf("Expected a header, got none") 11773 } else if _, _, sseq := streamAndSeq(shdr); sseq != 26 { 11774 t.Fatalf("Expected header sequence of 26, got %d", sseq) 11775 } 11776 11777 // Test Filters 11778 cfg = &StreamConfig{ 11779 Name: "FMS2", 11780 Storage: FileStorage, 11781 Sources: []*StreamSource{ 11782 {Name: "TEST", OptStartSeq: 11, SubjectTransforms: []SubjectTransformConfig{{Source: "dlc", Destination: "dlc2"}}}, 11783 }, 11784 } 11785 createStream(cfg) 11786 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 11787 si, err := js2.StreamInfo("FMS2") 11788 require_NoError(t, err) 11789 if si.State.Msgs != 20 { 11790 return fmt.Errorf("Expected 20 msgs, got state: %+v", si.State) 11791 } 11792 return nil 11793 }) 11794 11795 // Double check first starting. 11796 if m, err = js.GetMsg("FMS2", 1); err != nil { 11797 t.Fatalf("Unexpected error: %v", err) 11798 } 11799 if shdr := m.Header.Get(JSStreamSource); shdr == _EMPTY_ { 11800 t.Fatalf("Expected a header, got none") 11801 } else if _, _, sseq := streamAndSeq(shdr); sseq != 11 { 11802 t.Fatalf("Expected header sequence of 11, got %d", sseq) 11803 } 11804 if m.Subject != "dlc2" { 11805 t.Fatalf("Expected transformed subject dlc2, but got %s instead", m.Subject) 11806 } 11807 11808 // Test Filters 11809 cfg = &StreamConfig{ 11810 Name: "FMS3", 11811 Storage: FileStorage, 11812 Sources: []*StreamSource{ 11813 {Name: "TEST", SubjectTransforms: []SubjectTransformConfig{{Source: "dlc", Destination: "dlc2"}, {Source: "rip", Destination: ""}}}, 11814 }, 11815 } 11816 createStream(cfg) 11817 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 11818 si, err := js2.StreamInfo("FMS3") 11819 require_NoError(t, err) 11820 if si.State.Msgs != 50 { 11821 return fmt.Errorf("Expected 50 msgs, got state: %+v", si.State) 11822 } 11823 return nil 11824 }) 11825 11826 // Double check first message 11827 if m, err = js.GetMsg("FMS3", 1); err != nil { 11828 t.Fatalf("Unexpected error: %v", err) 11829 } 11830 if shdr := m.Header.Get(JSStreamSource); shdr == _EMPTY_ { 11831 t.Fatalf("Expected a header, got none") 11832 } else if m.Subject != "dlc2" { 11833 t.Fatalf("Expected subject 'dlc2' and got %s", m.Subject) 11834 } 11835 11836 // Double check first message with the other subject 11837 if m, err = js.GetMsg("FMS3", 21); err != nil { 11838 t.Fatalf("Unexpected error: %v", err) 11839 } 11840 if shdr := m.Header.Get(JSStreamSource); shdr == _EMPTY_ { 11841 t.Fatalf("Expected a header, got none") 11842 } else if m.Subject != "rip" { 11843 t.Fatalf("Expected subject 'rip' and got %s", m.Subject) 11844 } 11845 11846 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 11847 si, err := js2.StreamInfo("FMS3") 11848 require_NoError(t, err) 11849 if si.State.Subjects["jnm"] != 0 { 11850 return fmt.Errorf("Unexpected messages from the source found") 11851 } 11852 return nil 11853 }) 11854 11855 // pre 2.10 backwards compatibility 11856 transformConfig := nats.SubjectTransformConfig{Source: "B.*", Destination: "A.{{Wildcard(1)}}"} 11857 aConfig := nats.StreamConfig{Name: "A", Subjects: []string{"B.*"}, SubjectTransform: &transformConfig} 11858 if _, err := js.AddStream(&aConfig); err != nil { 11859 t.Fatalf("Unexpected error: %v", err) 11860 } 11861 11862 sendBatch("B.A", 1) 11863 sendBatch("B.B", 1) 11864 bConfig := nats.StreamConfig{Name: "B", Subjects: []string{"A.*"}} 11865 11866 if _, err := js.AddStream(&bConfig); err != nil { 11867 t.Fatalf("Unexpected error: %v", err) 11868 } 11869 11870 // fake a message that would have been sourced with pre 2.10 11871 msg := nats.NewMsg("A.A") 11872 // pre 2.10 header format just stream name and sequence number 11873 msg.Header.Set(JSStreamSource, "A 1") 11874 msg.Data = []byte("OK") 11875 11876 if _, err := js.PublishMsg(msg); err != nil { 11877 t.Fatalf("Unexpected publish error: %v", err) 11878 } 11879 11880 bConfig.Sources = []*nats.StreamSource{{Name: "A"}} 11881 if _, err := js.UpdateStream(&bConfig); err != nil { 11882 t.Fatalf("Unexpected error: %v", err) 11883 } 11884 11885 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 11886 si, err := js2.StreamInfo("B") 11887 require_NoError(t, err) 11888 if si.State.Msgs != 2 { 11889 return fmt.Errorf("Expected 2 msgs, got state: %+v", si.State) 11890 } 11891 return nil 11892 }) 11893 } 11894 11895 func TestJetStreamInputTransform(t *testing.T) { 11896 s := RunBasicJetStreamServer(t) 11897 defer s.Shutdown() 11898 11899 // Client for API requests. 11900 nc, js := jsClientConnect(t, s) 11901 defer nc.Close() 11902 11903 createStream := func(cfg *StreamConfig) { 11904 t.Helper() 11905 req, err := json.Marshal(cfg) 11906 require_NoError(t, err) 11907 11908 rm, err := nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second) 11909 require_NoError(t, err) 11910 11911 var resp JSApiStreamCreateResponse 11912 if err := json.Unmarshal(rm.Data, &resp); err != nil { 11913 t.Fatalf("Unexpected error: %v", err) 11914 } 11915 if resp.Error != nil { 11916 t.Fatalf("Unexpected error: %+v", resp.Error) 11917 } 11918 } 11919 11920 createStream(&StreamConfig{Name: "T1", Subjects: []string{"foo"}, SubjectTransform: &SubjectTransformConfig{Source: ">", Destination: "transformed.>"}, Storage: MemoryStorage}) 11921 11922 // publish a message 11923 if _, err := js.Publish("foo", []byte("OK")); err != nil { 11924 t.Fatalf("Unexpected publish error: %v", err) 11925 } 11926 11927 m, err := js.GetMsg("T1", 1) 11928 require_NoError(t, err) 11929 11930 if m.Subject != "transformed.foo" { 11931 t.Fatalf("Expected message subject transformed.foo, got %s", m.Subject) 11932 } 11933 } 11934 11935 func TestJetStreamOperatorAccounts(t *testing.T) { 11936 s, _ := RunServerWithConfig("./configs/js-op.conf") 11937 if config := s.JetStreamConfig(); config != nil { 11938 defer removeDir(t, config.StoreDir) 11939 } 11940 defer s.Shutdown() 11941 11942 nc, js := jsClientConnect(t, s, nats.UserCredentials("./configs/one.creds")) 11943 defer nc.Close() 11944 11945 if _, err := js.AddStream(&nats.StreamConfig{Name: "TEST"}); err != nil { 11946 t.Fatalf("Unexpected error: %v", err) 11947 } 11948 11949 toSend := 100 11950 for i := 0; i < toSend; i++ { 11951 if _, err := js.Publish("TEST", []byte("OK")); err != nil { 11952 t.Fatalf("Unexpected publish error: %v", err) 11953 } 11954 } 11955 11956 // Close our user for account one. 11957 nc.Close() 11958 11959 // Restart the server. 11960 s.Shutdown() 11961 s, _ = RunServerWithConfig("./configs/js-op.conf") 11962 defer s.Shutdown() 11963 11964 jsz, err := s.Jsz(nil) 11965 require_NoError(t, err) 11966 11967 if jsz.Streams != 1 { 11968 t.Fatalf("Expected jsz to report our stream on restart") 11969 } 11970 if jsz.Messages != uint64(toSend) { 11971 t.Fatalf("Expected jsz to report our %d messages on restart, got %d", toSend, jsz.Messages) 11972 } 11973 } 11974 11975 func TestJetStreamServerDomainBadConfig(t *testing.T) { 11976 shouldFail := func(domain string) { 11977 t.Helper() 11978 opts := DefaultTestOptions 11979 opts.JetStreamDomain = domain 11980 if err := validateOptions(&opts); err == nil || !strings.Contains(err.Error(), "invalid domain name") { 11981 t.Fatalf("Expected bad domain error, got %v", err) 11982 } 11983 } 11984 11985 shouldFail("HU..B") 11986 shouldFail("HU B") 11987 shouldFail(" ") 11988 shouldFail("\t") 11989 shouldFail("CORE.") 11990 shouldFail(".CORE") 11991 shouldFail("C.*.O. RE") 11992 shouldFail("C.ORE") 11993 } 11994 11995 func TestJetStreamServerDomainConfig(t *testing.T) { 11996 conf := createConfFile(t, []byte(fmt.Sprintf(` 11997 listen: 127.0.0.1:-1 11998 jetstream: {domain: "HUB", store_dir: %q} 11999 `, t.TempDir()))) 12000 12001 s, _ := RunServerWithConfig(conf) 12002 defer s.Shutdown() 12003 12004 if !s.JetStreamEnabled() { 12005 t.Fatalf("Expected JetStream to be enabled") 12006 } 12007 12008 config := s.JetStreamConfig() 12009 if config.Domain != "HUB" { 12010 t.Fatalf("Expected %q as domain name, got %q", "HUB", config.Domain) 12011 } 12012 } 12013 12014 func TestJetStreamServerDomainConfigButDisabled(t *testing.T) { 12015 conf := createConfFile(t, []byte(` 12016 listen: 127.0.0.1:-1 12017 jetstream: {domain: "HUB", enabled: false} 12018 `)) 12019 12020 s, _ := RunServerWithConfig(conf) 12021 defer s.Shutdown() 12022 12023 if s.JetStreamEnabled() { 12024 t.Fatalf("Expected JetStream NOT to be enabled") 12025 } 12026 12027 opts := s.getOpts() 12028 if opts.JetStreamDomain != "HUB" { 12029 t.Fatalf("Expected %q as opts domain name, got %q", "HUB", opts.JetStreamDomain) 12030 } 12031 } 12032 12033 func TestJetStreamDomainInPubAck(t *testing.T) { 12034 conf := createConfFile(t, []byte(fmt.Sprintf(` 12035 listen: 127.0.0.1:-1 12036 jetstream: {domain: "HUB", store_dir: %q} 12037 `, t.TempDir()))) 12038 12039 s, _ := RunServerWithConfig(conf) 12040 defer s.Shutdown() 12041 12042 nc, js := jsClientConnect(t, s) 12043 defer nc.Close() 12044 12045 cfg := &nats.StreamConfig{ 12046 Name: "TEST", 12047 Storage: nats.MemoryStorage, 12048 Subjects: []string{"foo"}, 12049 } 12050 if _, err := js.AddStream(cfg); err != nil { 12051 t.Fatalf("Unexpected error: %v", err) 12052 } 12053 12054 // Check by hand for now til it makes its way into Go client. 12055 am, err := nc.Request("foo", nil, time.Second) 12056 require_NoError(t, err) 12057 var pa PubAck 12058 json.Unmarshal(am.Data, &pa) 12059 if pa.Domain != "HUB" { 12060 t.Fatalf("Expected PubAck to have domain of %q, got %q", "HUB", pa.Domain) 12061 } 12062 } 12063 12064 // Issue #2213 12065 func TestJetStreamDirectConsumersBeingReported(t *testing.T) { 12066 s := RunBasicJetStreamServer(t) 12067 defer s.Shutdown() 12068 12069 // Client for API requests. 12070 nc, js := jsClientConnect(t, s) 12071 defer nc.Close() 12072 12073 _, err := js.AddStream(&nats.StreamConfig{ 12074 Name: "TEST", 12075 Subjects: []string{"foo"}, 12076 }) 12077 require_NoError(t, err) 12078 12079 _, err = js.AddStream(&nats.StreamConfig{ 12080 Name: "S", 12081 Sources: []*nats.StreamSource{{ 12082 Name: "TEST", 12083 }}, 12084 }) 12085 require_NoError(t, err) 12086 12087 if _, err = js.Publish("foo", nil); err != nil { 12088 t.Fatalf("Unexpected publish error: %v", err) 12089 } 12090 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 12091 si, err := js.StreamInfo("S") 12092 if err != nil { 12093 return fmt.Errorf("Could not get stream info: %v", err) 12094 } 12095 if si.State.Msgs != 1 { 12096 return fmt.Errorf("Expected 1 msg, got state: %+v", si.State) 12097 } 12098 return nil 12099 }) 12100 12101 si, err := js.StreamInfo("TEST") 12102 require_NoError(t, err) 12103 12104 // Direct consumers should not be reported 12105 if si.State.Consumers != 0 { 12106 t.Fatalf("Did not expect any consumers, got %d", si.State.Consumers) 12107 } 12108 12109 // Now check for consumer in consumer names list. 12110 var names []string 12111 for name := range js.ConsumerNames("TEST") { 12112 names = append(names, name) 12113 } 12114 if len(names) != 0 { 12115 t.Fatalf("Expected no consumers but got %+v", names) 12116 } 12117 12118 // Now check detailed list. 12119 var cis []*nats.ConsumerInfo 12120 for ci := range js.ConsumersInfo("TEST") { 12121 cis = append(cis, ci) 12122 } 12123 if len(cis) != 0 { 12124 t.Fatalf("Expected no consumers but got %+v", cis) 12125 } 12126 } 12127 12128 // https://github.com/nats-io/nats-server/issues/2290 12129 func TestJetStreamTemplatedErrorsBug(t *testing.T) { 12130 s := RunBasicJetStreamServer(t) 12131 defer s.Shutdown() 12132 12133 // Client for API requests. 12134 nc, js := jsClientConnect(t, s) 12135 defer nc.Close() 12136 12137 _, err := js.AddStream(&nats.StreamConfig{ 12138 Name: "TEST", 12139 Subjects: []string{"foo"}, 12140 }) 12141 require_NoError(t, err) 12142 12143 _, err = js.PullSubscribe("foo", "") 12144 if err != nil && strings.Contains(err.Error(), "{err}") { 12145 t.Fatalf("Error is not filled in: %v", err) 12146 } 12147 } 12148 12149 func TestJetStreamServerEncryption(t *testing.T) { 12150 cases := []struct { 12151 name string 12152 cstr string 12153 cipher StoreCipher 12154 }{ 12155 {"Default", _EMPTY_, ChaCha}, 12156 {"ChaCha", ", cipher: chacha", ChaCha}, 12157 {"AES", ", cipher: aes", AES}, 12158 } 12159 12160 for _, c := range cases { 12161 t.Run(c.name, func(t *testing.T) { 12162 tmpl := ` 12163 server_name: S22 12164 listen: 127.0.0.1:-1 12165 jetstream: {key: $JS_KEY, store_dir: '%s' %s} 12166 ` 12167 storeDir := t.TempDir() 12168 12169 conf := createConfFile(t, []byte(fmt.Sprintf(tmpl, storeDir, c.cstr))) 12170 12171 os.Setenv("JS_KEY", "s3cr3t!!") 12172 defer os.Unsetenv("JS_KEY") 12173 12174 s, _ := RunServerWithConfig(conf) 12175 defer s.Shutdown() 12176 12177 config := s.JetStreamConfig() 12178 if config == nil { 12179 t.Fatalf("Expected config but got none") 12180 } 12181 defer removeDir(t, config.StoreDir) 12182 12183 // Client based API 12184 nc, js := jsClientConnect(t, s) 12185 defer nc.Close() 12186 12187 cfg := &nats.StreamConfig{ 12188 Name: "TEST", 12189 Subjects: []string{"foo", "bar", "baz"}, 12190 } 12191 if _, err := js.AddStream(cfg); err != nil { 12192 t.Fatalf("Unexpected error: %v", err) 12193 } 12194 12195 msg := []byte("ENCRYPTED PAYLOAD!!") 12196 sendMsg := func(subj string) { 12197 t.Helper() 12198 if _, err := js.Publish(subj, msg); err != nil { 12199 t.Fatalf("Unexpected publish error: %v", err) 12200 } 12201 } 12202 // Send 10 msgs 12203 for i := 0; i < 10; i++ { 12204 sendMsg("foo") 12205 } 12206 12207 // Now create a consumer. 12208 sub, err := js.PullSubscribe("foo", "dlc") 12209 if err != nil { 12210 t.Fatalf("Unexpected error: %v", err) 12211 } 12212 for i, m := range fetchMsgs(t, sub, 10, 5*time.Second) { 12213 if i < 5 { 12214 m.AckSync() 12215 } 12216 } 12217 12218 // Grab our state to compare after restart. 12219 si, _ := js.StreamInfo("TEST") 12220 ci, _ := js.ConsumerInfo("TEST", "dlc") 12221 12222 // Quick check to make sure everything not just plaintext still. 12223 sdir := filepath.Join(config.StoreDir, "$G", "streams", "TEST") 12224 // Make sure we can not find any plaintext strings in the target file. 12225 checkFor := func(fn string, strs ...string) { 12226 t.Helper() 12227 data, err := os.ReadFile(fn) 12228 if err != nil { 12229 t.Fatalf("Unexpected error: %v", err) 12230 } 12231 for _, str := range strs { 12232 if bytes.Contains(data, []byte(str)) { 12233 t.Fatalf("Found %q in body of file contents", str) 12234 } 12235 } 12236 } 12237 checkKeyFile := func(fn string) { 12238 t.Helper() 12239 if _, err := os.Stat(fn); err != nil { 12240 t.Fatalf("Expected a key file at %q", fn) 12241 } 12242 } 12243 12244 // Check stream meta. 12245 checkEncrypted := func() { 12246 t.Helper() 12247 checkKeyFile(filepath.Join(sdir, JetStreamMetaFileKey)) 12248 checkFor(filepath.Join(sdir, JetStreamMetaFile), "TEST", "foo", "bar", "baz", "max_msgs", "max_bytes") 12249 // Check a message block. 12250 checkKeyFile(filepath.Join(sdir, "msgs", "1.key")) 12251 checkFor(filepath.Join(sdir, "msgs", "1.blk"), "ENCRYPTED PAYLOAD!!", "foo", "bar", "baz") 12252 12253 // Check consumer meta and state. 12254 checkKeyFile(filepath.Join(sdir, "obs", "dlc", JetStreamMetaFileKey)) 12255 checkFor(filepath.Join(sdir, "obs", "dlc", JetStreamMetaFile), "TEST", "dlc", "foo", "bar", "baz", "max_msgs", "ack_policy") 12256 // Load and see if we can parse the consumer state. 12257 state, err := os.ReadFile(filepath.Join(sdir, "obs", "dlc", "o.dat")) 12258 require_NoError(t, err) 12259 12260 if _, err := decodeConsumerState(state); err == nil { 12261 t.Fatalf("Expected decoding consumer state to fail") 12262 } 12263 } 12264 12265 // Stop current 12266 s.Shutdown() 12267 12268 checkEncrypted() 12269 12270 // Restart. 12271 s, _ = RunServerWithConfig(conf) 12272 defer s.Shutdown() 12273 12274 // Connect again. 12275 nc, js = jsClientConnect(t, s) 12276 defer nc.Close() 12277 12278 si2, err := js.StreamInfo("TEST") 12279 require_NoError(t, err) 12280 12281 if !reflect.DeepEqual(si, si2) { 12282 t.Fatalf("Stream infos did not match\n%+v\nvs\n%+v", si, si2) 12283 } 12284 12285 ci2, _ := js.ConsumerInfo("TEST", "dlc") 12286 // Consumer create times can be slightly off after restore from disk. 12287 now := time.Now() 12288 ci.Created, ci2.Created = now, now 12289 ci.Delivered.Last, ci2.Delivered.Last = nil, nil 12290 ci.AckFloor.Last, ci2.AckFloor.Last = nil, nil 12291 // Also clusters will be different. 12292 ci.Cluster, ci2.Cluster = nil, nil 12293 if !reflect.DeepEqual(ci, ci2) { 12294 t.Fatalf("Consumer infos did not match\n%+v\nvs\n%+v", ci, ci2) 12295 } 12296 12297 // Send 10 more msgs 12298 for i := 0; i < 10; i++ { 12299 sendMsg("foo") 12300 } 12301 if si, err = js.StreamInfo("TEST"); err != nil { 12302 t.Fatalf("Unexpected error: %v", err) 12303 } 12304 if si.State.Msgs != 20 { 12305 t.Fatalf("Expected 20 msgs total, got %d", si.State.Msgs) 12306 } 12307 12308 // Now test snapshots etc. 12309 acc := s.GlobalAccount() 12310 mset, err := acc.lookupStream("TEST") 12311 require_NoError(t, err) 12312 scfg := mset.config() 12313 sr, err := mset.snapshot(5*time.Second, false, true) 12314 if err != nil { 12315 t.Fatalf("Error getting snapshot: %v", err) 12316 } 12317 snapshot, err := io.ReadAll(sr.Reader) 12318 if err != nil { 12319 t.Fatalf("Error reading snapshot") 12320 } 12321 12322 // Run new server w/o encryption. Make sure we can restore properly (meaning encryption was stripped etc). 12323 ns := RunBasicJetStreamServer(t) 12324 defer ns.Shutdown() 12325 12326 nacc := ns.GlobalAccount() 12327 r := bytes.NewReader(snapshot) 12328 mset, err = nacc.RestoreStream(&scfg, r) 12329 require_NoError(t, err) 12330 ss := mset.store.State() 12331 if ss.Msgs != si.State.Msgs || ss.FirstSeq != si.State.FirstSeq || ss.LastSeq != si.State.LastSeq { 12332 t.Fatalf("Stream states do not match: %+v vs %+v", ss, si.State) 12333 } 12334 12335 // Now restore to our encrypted server as well. 12336 if err := js.DeleteStream("TEST"); err != nil { 12337 t.Fatalf("Unexpected error: %v", err) 12338 } 12339 12340 acc = s.GlobalAccount() 12341 r.Reset(snapshot) 12342 mset, err = acc.RestoreStream(&scfg, r) 12343 require_NoError(t, err) 12344 ss = mset.store.State() 12345 if ss.Msgs != si.State.Msgs || ss.FirstSeq != si.State.FirstSeq || ss.LastSeq != si.State.LastSeq { 12346 t.Fatalf("Stream states do not match: %+v vs %+v", ss, si.State) 12347 } 12348 12349 // Check that all is encrypted like above since we know we need to convert since snapshots always plaintext. 12350 checkEncrypted() 12351 }) 12352 } 12353 } 12354 12355 // User report of bug. 12356 func TestJetStreamConsumerBadNumPending(t *testing.T) { 12357 s := RunBasicJetStreamServer(t) 12358 defer s.Shutdown() 12359 12360 // Client for API requests. 12361 nc, js := jsClientConnect(t, s) 12362 defer nc.Close() 12363 12364 _, err := js.AddStream(&nats.StreamConfig{ 12365 Name: "ORDERS", 12366 Subjects: []string{"orders.*"}, 12367 }) 12368 require_NoError(t, err) 12369 12370 newOrders := func(n int) { 12371 // Queue up new orders. 12372 for i := 0; i < n; i++ { 12373 js.Publish("orders.created", []byte("NEW")) 12374 } 12375 } 12376 12377 newOrders(10) 12378 12379 // Create to subscribers. 12380 process := func(m *nats.Msg) { 12381 js.Publish("orders.approved", []byte("APPROVED")) 12382 } 12383 12384 op, err := js.Subscribe("orders.created", process) 12385 require_NoError(t, err) 12386 12387 defer op.Unsubscribe() 12388 12389 mon, err := js.SubscribeSync("orders.*") 12390 require_NoError(t, err) 12391 12392 defer mon.Unsubscribe() 12393 12394 waitForMsgs := func(n uint64) { 12395 t.Helper() 12396 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 12397 si, err := js.StreamInfo("ORDERS") 12398 if err != nil { 12399 t.Fatalf("Unexpected error: %v", err) 12400 } 12401 if si.State.Msgs != n { 12402 return fmt.Errorf("Expected %d msgs, got state: %+v", n, si.State) 12403 } 12404 return nil 12405 }) 12406 } 12407 12408 checkForNoPending := func(sub *nats.Subscription) { 12409 t.Helper() 12410 if ci, err := sub.ConsumerInfo(); err != nil || ci == nil || ci.NumPending != 0 { 12411 if ci != nil && ci.NumPending != 0 { 12412 t.Fatalf("Bad consumer NumPending, expected 0 but got %d", ci.NumPending) 12413 } else { 12414 t.Fatalf("Bad consumer info: %+v", ci) 12415 } 12416 } 12417 } 12418 12419 waitForMsgs(20) 12420 checkForNoPending(op) 12421 checkForNoPending(mon) 12422 12423 newOrders(10) 12424 12425 waitForMsgs(40) 12426 checkForNoPending(op) 12427 checkForNoPending(mon) 12428 } 12429 12430 func TestJetStreamDeliverLastPerSubject(t *testing.T) { 12431 for _, st := range []StorageType{FileStorage, MemoryStorage} { 12432 t.Run(st.String(), func(t *testing.T) { 12433 s := RunBasicJetStreamServer(t) 12434 defer s.Shutdown() 12435 12436 // Client for API requests. 12437 nc, js := jsClientConnect(t, s) 12438 defer nc.Close() 12439 12440 cfg := StreamConfig{ 12441 Name: "KV", 12442 Subjects: []string{"kv.>"}, 12443 Storage: st, 12444 MaxMsgsPer: 5, 12445 } 12446 12447 req, err := json.Marshal(cfg) 12448 if err != nil { 12449 t.Fatalf("Unexpected error: %v", err) 12450 } 12451 // Do manually for now. 12452 nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second) 12453 si, err := js.StreamInfo("KV") 12454 if err != nil { 12455 t.Fatalf("Unexpected error: %v", err) 12456 } 12457 if si == nil || si.Config.Name != "KV" { 12458 t.Fatalf("StreamInfo is not correct %+v", si) 12459 } 12460 12461 // Interleave them on purpose. 12462 for i := 1; i <= 11; i++ { 12463 msg := []byte(fmt.Sprintf("%d", i)) 12464 js.PublishAsync("kv.b1.foo", msg) 12465 js.PublishAsync("kv.b2.foo", msg) 12466 12467 js.PublishAsync("kv.b1.bar", msg) 12468 js.PublishAsync("kv.b2.bar", msg) 12469 12470 js.PublishAsync("kv.b1.baz", msg) 12471 js.PublishAsync("kv.b2.baz", msg) 12472 } 12473 12474 select { 12475 case <-js.PublishAsyncComplete(): 12476 case <-time.After(2 * time.Second): 12477 t.Fatalf("Did not receive completion signal") 12478 } 12479 12480 // Do quick check that config needs FilteredSubjects otherwise bad config. 12481 badReq := CreateConsumerRequest{ 12482 Stream: "KV", 12483 Config: ConsumerConfig{ 12484 DeliverSubject: "b", 12485 DeliverPolicy: DeliverLastPerSubject, 12486 }, 12487 } 12488 req, err = json.Marshal(badReq) 12489 if err != nil { 12490 t.Fatalf("Unexpected error: %v", err) 12491 } 12492 resp, err := nc.Request(fmt.Sprintf(JSApiConsumerCreateT, "KV"), req, time.Second) 12493 if err != nil { 12494 t.Fatalf("Unexpected error: %v", err) 12495 } 12496 var ccResp JSApiConsumerCreateResponse 12497 if err = json.Unmarshal(resp.Data, &ccResp); err != nil { 12498 t.Fatalf("Unexpected error: %v", err) 12499 } 12500 if ccResp.Error == nil || !strings.Contains(ccResp.Error.Description, "filter subject is not set") { 12501 t.Fatalf("Expected an error, got none") 12502 } 12503 12504 // Now let's consume these via last per subject. 12505 obsReq := CreateConsumerRequest{ 12506 Stream: "KV", 12507 Config: ConsumerConfig{ 12508 DeliverSubject: "d", 12509 DeliverPolicy: DeliverLastPerSubject, 12510 FilterSubject: "kv.b1.*", 12511 }, 12512 } 12513 req, err = json.Marshal(obsReq) 12514 if err != nil { 12515 t.Fatalf("Unexpected error: %v", err) 12516 } 12517 resp, err = nc.Request(fmt.Sprintf(JSApiConsumerCreateT, "KV"), req, time.Second) 12518 if err != nil { 12519 t.Fatalf("Unexpected error: %v", err) 12520 } 12521 ccResp.Error = nil 12522 if err = json.Unmarshal(resp.Data, &ccResp); err != nil { 12523 t.Fatalf("Unexpected error: %v", err) 12524 } 12525 12526 sub, _ := nc.SubscribeSync("d") 12527 defer sub.Unsubscribe() 12528 12529 // Helper to check messages are correct. 12530 checkNext := func(subject string, sseq uint64, v string) { 12531 t.Helper() 12532 m, err := sub.NextMsg(time.Second) 12533 if err != nil { 12534 t.Fatalf("Error receiving message: %v", err) 12535 } 12536 if m.Subject != subject { 12537 t.Fatalf("Expected subject %q but got %q", subject, m.Subject) 12538 } 12539 meta, err := m.Metadata() 12540 if err != nil { 12541 t.Fatalf("didn't get metadata: %s", err) 12542 } 12543 if meta.Sequence.Stream != sseq { 12544 t.Fatalf("Expected stream seq %d but got %d", sseq, meta.Sequence.Stream) 12545 } 12546 if string(m.Data) != v { 12547 t.Fatalf("Expected data of %q but got %q", v, m.Data) 12548 } 12549 } 12550 12551 checkSubsPending(t, sub, 3) 12552 12553 // Now make sure they are what we expect. 12554 checkNext("kv.b1.foo", 61, "11") 12555 checkNext("kv.b1.bar", 63, "11") 12556 checkNext("kv.b1.baz", 65, "11") 12557 12558 msg := []byte(fmt.Sprintf("%d", 22)) 12559 js.Publish("kv.b1.bar", msg) 12560 js.Publish("kv.b2.foo", msg) // Not filtered through.. 12561 12562 checkSubsPending(t, sub, 1) 12563 checkNext("kv.b1.bar", 67, "22") 12564 }) 12565 } 12566 } 12567 12568 func TestJetStreamDeliverLastPerSubjectNumPending(t *testing.T) { 12569 s := RunBasicJetStreamServer(t) 12570 defer s.Shutdown() 12571 12572 // Client for API requests. 12573 nc, js := jsClientConnect(t, s) 12574 defer nc.Close() 12575 12576 if _, err := js.AddStream(&nats.StreamConfig{ 12577 Name: "KV", 12578 Subjects: []string{"KV.>"}, 12579 MaxMsgsPerSubject: 5, 12580 Replicas: 1, 12581 }); err != nil { 12582 t.Fatalf("Error adding stream: %v", err) 12583 } 12584 12585 for i := 0; i < 5; i++ { 12586 msg := []byte(fmt.Sprintf("msg%d", i)) 12587 js.Publish("KV.foo", msg) 12588 js.Publish("KV.bar", msg) 12589 js.Publish("KV.baz", msg) 12590 js.Publish("KV.bat", msg) 12591 } 12592 12593 // Delete some messages 12594 js.DeleteMsg("KV", 2) 12595 js.DeleteMsg("KV", 5) 12596 12597 ci, err := js.AddConsumer("KV", &nats.ConsumerConfig{ 12598 DeliverSubject: nats.NewInbox(), 12599 AckPolicy: nats.AckExplicitPolicy, 12600 DeliverPolicy: nats.DeliverLastPerSubjectPolicy, 12601 FilterSubject: "KV.>", 12602 }) 12603 if err != nil { 12604 t.Fatalf("Error adding consumer: %v", err) 12605 } 12606 if ci.NumPending != 4 { 12607 t.Fatalf("Expected 4 pending msgs, got %v", ci.NumPending) 12608 } 12609 } 12610 12611 // We had a report of a consumer delete crashing the server when in interest retention mode. 12612 // This I believe is only really possible in clustered mode, but we will force the issue here. 12613 func TestJetStreamConsumerCleanupWithRetentionPolicy(t *testing.T) { 12614 s := RunBasicJetStreamServer(t) 12615 defer s.Shutdown() 12616 12617 // Client for API requests. 12618 nc, js := jsClientConnect(t, s) 12619 defer nc.Close() 12620 12621 _, err := js.AddStream(&nats.StreamConfig{ 12622 Name: "ORDERS", 12623 Subjects: []string{"orders.*"}, 12624 Retention: nats.InterestPolicy, 12625 }) 12626 require_NoError(t, err) 12627 12628 sub, err := js.SubscribeSync("orders.*") 12629 require_NoError(t, err) 12630 12631 payload := []byte("Hello World") 12632 for i := 0; i < 10; i++ { 12633 subj := fmt.Sprintf("orders.%d", i+1) 12634 js.Publish(subj, payload) 12635 } 12636 12637 checkSubsPending(t, sub, 10) 12638 12639 for i := 0; i < 10; i++ { 12640 m, err := sub.NextMsg(time.Second) 12641 if err != nil { 12642 t.Fatalf("Unexpected error: %v", err) 12643 } 12644 m.AckSync() 12645 } 12646 12647 ci, err := sub.ConsumerInfo() 12648 if err != nil { 12649 t.Fatalf("Unexpected error getting consumer info: %v", err) 12650 } 12651 12652 acc := s.GlobalAccount() 12653 mset, err := acc.lookupStream("ORDERS") 12654 require_NoError(t, err) 12655 12656 o := mset.lookupConsumer(ci.Name) 12657 if o == nil { 12658 t.Fatalf("Error looking up consumer %q", ci.Name) 12659 } 12660 lseq := mset.lastSeq() 12661 o.mu.Lock() 12662 // Force boundary condition here. 12663 o.asflr = lseq + 2 12664 o.mu.Unlock() 12665 sub.Unsubscribe() 12666 12667 // Make sure server still available. 12668 if _, err := js.StreamInfo("ORDERS"); err != nil { 12669 t.Fatalf("Unexpected error: %v", err) 12670 } 12671 } 12672 12673 // Issue #2392 12674 func TestJetStreamPurgeEffectsConsumerDelivery(t *testing.T) { 12675 s := RunBasicJetStreamServer(t) 12676 defer s.Shutdown() 12677 12678 // Client for API requests. 12679 nc, js := jsClientConnect(t, s) 12680 defer nc.Close() 12681 12682 _, err := js.AddStream(&nats.StreamConfig{ 12683 Name: "TEST", 12684 Subjects: []string{"foo.*"}, 12685 }) 12686 require_NoError(t, err) 12687 12688 js.Publish("foo.a", []byte("show once")) 12689 12690 sub, err := js.SubscribeSync("foo.*", nats.AckWait(250*time.Millisecond), nats.DeliverAll(), nats.AckExplicit()) 12691 require_NoError(t, err) 12692 12693 defer sub.Unsubscribe() 12694 12695 checkSubsPending(t, sub, 1) 12696 12697 // Do not ack. 12698 if _, err := sub.NextMsg(time.Second); err != nil { 12699 t.Fatalf("Error receiving message: %v", err) 12700 } 12701 12702 // Now purge stream. 12703 if err := js.PurgeStream("TEST"); err != nil { 12704 t.Fatalf("Unexpected purge error: %v", err) 12705 } 12706 12707 js.Publish("foo.b", []byte("show twice?")) 12708 // Do not ack again, should show back up. 12709 if _, err := sub.NextMsg(time.Second); err != nil { 12710 t.Fatalf("Error receiving message: %v", err) 12711 } 12712 // Make sure we get it back. 12713 if _, err := sub.NextMsg(time.Second); err != nil { 12714 t.Fatalf("Error receiving message: %v", err) 12715 } 12716 } 12717 12718 // Issue #2403 12719 func TestJetStreamExpireCausesDeadlock(t *testing.T) { 12720 s := RunBasicJetStreamServer(t) 12721 defer s.Shutdown() 12722 12723 // Client for API requests. 12724 nc, js := jsClientConnect(t, s) 12725 defer nc.Close() 12726 12727 _, err := js.AddStream(&nats.StreamConfig{ 12728 Name: "TEST", 12729 Subjects: []string{"foo.*"}, 12730 Storage: nats.MemoryStorage, 12731 MaxMsgs: 10, 12732 Retention: nats.InterestPolicy, 12733 }) 12734 require_NoError(t, err) 12735 12736 sub, err := js.SubscribeSync("foo.bar") 12737 require_NoError(t, err) 12738 12739 defer sub.Unsubscribe() 12740 12741 // Publish from two connections to get the write lock request wedged in between 12742 // having the RLock and wanting it again deeper in the stack. 12743 nc2, js2 := jsClientConnect(t, s) 12744 defer nc2.Close() 12745 12746 for i := 0; i < 1000; i++ { 12747 js.PublishAsync("foo.bar", []byte("HELLO")) 12748 js2.PublishAsync("foo.bar", []byte("HELLO")) 12749 } 12750 select { 12751 case <-js.PublishAsyncComplete(): 12752 case <-time.After(5 * time.Second): 12753 t.Fatalf("Did not receive completion signal") 12754 } 12755 12756 // If we deadlocked then we will not be able to get stream info. 12757 if _, err := js.StreamInfo("TEST"); err != nil { 12758 t.Fatalf("Unexpected error: %v", err) 12759 } 12760 } 12761 12762 func TestJetStreamConsumerPendingBugWithKV(t *testing.T) { 12763 msc := StreamConfig{ 12764 Name: "KV", 12765 Subjects: []string{"kv.>"}, 12766 Storage: MemoryStorage, 12767 MaxMsgsPer: 1, 12768 } 12769 fsc := msc 12770 fsc.Storage = FileStorage 12771 12772 cases := []struct { 12773 name string 12774 mconfig *StreamConfig 12775 }{ 12776 {"MemoryStore", &msc}, 12777 {"FileStore", &fsc}, 12778 } 12779 for _, c := range cases { 12780 t.Run(c.name, func(t *testing.T) { 12781 12782 s := RunBasicJetStreamServer(t) 12783 defer s.Shutdown() 12784 12785 // Client based API 12786 nc, js := jsClientConnect(t, s) 12787 defer nc.Close() 12788 12789 // Not in Go client under server yet. 12790 mset, err := s.GlobalAccount().addStream(c.mconfig) 12791 if err != nil { 12792 t.Fatalf("Unexpected error: %v", err) 12793 } 12794 12795 js.Publish("kv.1", []byte("1")) 12796 js.Publish("kv.2", []byte("2")) 12797 js.Publish("kv.3", []byte("3")) 12798 js.Publish("kv.1", []byte("4")) 12799 12800 si, err := js.StreamInfo("KV") 12801 if err != nil { 12802 t.Fatalf("Unexpected error: %v", err) 12803 } 12804 if si.State.Msgs != 3 { 12805 t.Fatalf("Expected 3 total msgs, got %d", si.State.Msgs) 12806 } 12807 12808 o, err := mset.addConsumer(&ConsumerConfig{ 12809 Durable: "dlc", 12810 DeliverSubject: "xxx", 12811 DeliverPolicy: DeliverLastPerSubject, 12812 FilterSubject: ">", 12813 }) 12814 if err != nil { 12815 t.Fatalf("Unexpected error: %v", err) 12816 } 12817 if ci := o.info(); ci.NumPending != 3 { 12818 t.Fatalf("Expected pending of 3, got %d", ci.NumPending) 12819 } 12820 }) 12821 } 12822 } 12823 12824 // Issue #2420 12825 func TestJetStreamDefaultMaxMsgsPer(t *testing.T) { 12826 s := RunBasicJetStreamServer(t) 12827 defer s.Shutdown() 12828 12829 // Client for API requests. 12830 nc, js := jsClientConnect(t, s) 12831 defer nc.Close() 12832 12833 si, err := js.AddStream(&nats.StreamConfig{ 12834 Name: "TEST", 12835 Subjects: []string{"foo.*"}, 12836 Storage: nats.MemoryStorage, 12837 MaxMsgs: 10, 12838 }) 12839 require_NoError(t, err) 12840 12841 if si.Config.MaxMsgsPerSubject != -1 { 12842 t.Fatalf("Expected default of -1, got %d", si.Config.MaxMsgsPerSubject) 12843 } 12844 } 12845 12846 // Issue #2423 12847 func TestJetStreamBadConsumerCreateErr(t *testing.T) { 12848 s := RunBasicJetStreamServer(t) 12849 defer s.Shutdown() 12850 12851 // Client for API requests. 12852 nc, js := jsClientConnect(t, s) 12853 defer nc.Close() 12854 12855 _, err := js.AddStream(&nats.StreamConfig{ 12856 Name: "TEST", 12857 Subjects: []string{"foo.*"}, 12858 Storage: nats.MemoryStorage, 12859 }) 12860 require_NoError(t, err) 12861 12862 // When adding a consumer with both deliver subject and max wait (push vs pull), 12863 // we got the wrong err about deliver subject having a wildcard. 12864 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 12865 Durable: "nowcerr", 12866 DeliverSubject: "X", 12867 MaxWaiting: 100, 12868 }) 12869 if err == nil { 12870 t.Fatalf("Expected an error but got none") 12871 } 12872 if !strings.Contains(err.Error(), "push mode can not set max waiting") { 12873 t.Fatalf("Incorrect error returned: %v", err) 12874 } 12875 } 12876 12877 func TestJetStreamConsumerPushBound(t *testing.T) { 12878 s := RunBasicJetStreamServer(t) 12879 defer s.Shutdown() 12880 12881 nc, js := jsClientConnect(t, s) 12882 defer nc.Close() 12883 12884 cfg := &nats.StreamConfig{ 12885 Name: "TEST", 12886 Storage: nats.MemoryStorage, 12887 Subjects: []string{"foo"}, 12888 } 12889 if _, err := js.AddStream(cfg); err != nil { 12890 t.Fatalf("Unexpected error: %v", err) 12891 } 12892 12893 // We want to test extended consumer info for push based consumers. 12894 // We need to do these by hand for now. 12895 createConsumer := func(name, deliver string) { 12896 t.Helper() 12897 creq := CreateConsumerRequest{ 12898 Stream: "TEST", 12899 Config: ConsumerConfig{ 12900 Durable: name, 12901 DeliverSubject: deliver, 12902 AckPolicy: AckExplicit, 12903 }, 12904 } 12905 req, err := json.Marshal(creq) 12906 if err != nil { 12907 t.Fatalf("Unexpected error: %v", err) 12908 } 12909 resp, err := nc.Request(fmt.Sprintf(JSApiDurableCreateT, "TEST", name), req, time.Second) 12910 if err != nil { 12911 t.Fatalf("Unexpected error: %v", err) 12912 } 12913 var ccResp JSApiConsumerCreateResponse 12914 if err := json.Unmarshal(resp.Data, &ccResp); err != nil { 12915 t.Fatalf("Unexpected error: %v", err) 12916 } 12917 if ccResp.ConsumerInfo == nil || ccResp.Error != nil { 12918 t.Fatalf("Got a bad response %+v", ccResp) 12919 } 12920 } 12921 12922 consumerInfo := func(name string) *ConsumerInfo { 12923 t.Helper() 12924 resp, err := nc.Request(fmt.Sprintf(JSApiConsumerInfoT, "TEST", name), nil, time.Second) 12925 if err != nil { 12926 t.Fatalf("Unexpected error: %v", err) 12927 } 12928 var cinfo JSApiConsumerInfoResponse 12929 if err := json.Unmarshal(resp.Data, &cinfo); err != nil { 12930 t.Fatalf("Unexpected error: %v", err) 12931 } 12932 if cinfo.ConsumerInfo == nil || cinfo.Error != nil { 12933 t.Fatalf("Got a bad response %+v", cinfo) 12934 } 12935 return cinfo.ConsumerInfo 12936 } 12937 12938 // First create a durable push and make sure we show now active status. 12939 createConsumer("dlc", "d.X") 12940 if ci := consumerInfo("dlc"); ci.PushBound { 12941 t.Fatalf("Expected push bound to be false") 12942 } 12943 // Now bind the deliver subject. 12944 sub, _ := nc.SubscribeSync("d.X") 12945 nc.Flush() // Make sure it registers. 12946 // Check that its reported. 12947 if ci := consumerInfo("dlc"); !ci.PushBound { 12948 t.Fatalf("Expected push bound to be set") 12949 } 12950 sub.Unsubscribe() 12951 nc.Flush() // Make sure it registers. 12952 if ci := consumerInfo("dlc"); ci.PushBound { 12953 t.Fatalf("Expected push bound to be false") 12954 } 12955 12956 // Now make sure we have queue groups indictated as needed. 12957 createConsumer("ik", "d.Z") 12958 // Now bind the deliver subject with a queue group. 12959 sub, _ = nc.QueueSubscribeSync("d.Z", "g22") 12960 defer sub.Unsubscribe() 12961 nc.Flush() // Make sure it registers. 12962 // Check that queue group is not reported. 12963 if ci := consumerInfo("ik"); ci.PushBound { 12964 t.Fatalf("Expected push bound to be false") 12965 } 12966 sub.Unsubscribe() 12967 nc.Flush() // Make sure it registers. 12968 if ci := consumerInfo("ik"); ci.PushBound { 12969 t.Fatalf("Expected push bound to be false") 12970 } 12971 12972 // Make sure pull consumers report PushBound as false by default. 12973 createConsumer("rip", _EMPTY_) 12974 if ci := consumerInfo("rip"); ci.PushBound { 12975 t.Fatalf("Expected push bound to be false") 12976 } 12977 } 12978 12979 // Got a report of memory leaking, tracked it to internal clients for consumers. 12980 func TestJetStreamConsumerInternalClientLeak(t *testing.T) { 12981 s := RunBasicJetStreamServer(t) 12982 defer s.Shutdown() 12983 12984 nc, js := jsClientConnect(t, s) 12985 defer nc.Close() 12986 12987 cfg := &nats.StreamConfig{ 12988 Name: "TEST", 12989 Storage: nats.MemoryStorage, 12990 } 12991 if _, err := js.AddStream(cfg); err != nil { 12992 t.Fatalf("Unexpected error: %v", err) 12993 } 12994 12995 ga, sa := s.GlobalAccount(), s.SystemAccount() 12996 ncb, nscb := ga.NumConnections(), sa.NumConnections() 12997 12998 // Create 10 consumers 12999 for i := 0; i < 10; i++ { 13000 ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{DeliverSubject: "x"}) 13001 if err != nil { 13002 t.Fatalf("Unexpected error: %v", err) 13003 } 13004 // Accelerate ephemeral cleanup. 13005 mset, err := ga.lookupStream("TEST") 13006 if err != nil { 13007 t.Fatalf("Expected to find a stream for %q", "TEST") 13008 } 13009 o := mset.lookupConsumer(ci.Name) 13010 if o == nil { 13011 t.Fatalf("Error looking up consumer %q", ci.Name) 13012 } 13013 o.setInActiveDeleteThreshold(500 * time.Millisecond) 13014 } 13015 13016 // Wait for them to all go away. 13017 checkFor(t, 2*time.Second, 100*time.Millisecond, func() error { 13018 si, err := js.StreamInfo("TEST") 13019 if err != nil { 13020 t.Fatalf("Unexpected error: %v", err) 13021 } 13022 if si.State.Consumers == 0 { 13023 return nil 13024 } 13025 return fmt.Errorf("Consumers still present") 13026 }) 13027 // Make sure we are not leaking clients/connections. 13028 // Server does not see these so need to look at account. 13029 if nca := ga.NumConnections(); nca != ncb { 13030 t.Fatalf("Leaked clients in global account: %d vs %d", ncb, nca) 13031 } 13032 if nsca := sa.NumConnections(); nsca != nscb { 13033 t.Fatalf("Leaked clients in system account: %d vs %d", nscb, nsca) 13034 } 13035 } 13036 13037 func TestJetStreamConsumerEventingRaceOnShutdown(t *testing.T) { 13038 s := RunBasicJetStreamServer(t) 13039 defer s.Shutdown() 13040 13041 nc, js := jsClientConnect(t, s, nats.NoReconnect()) 13042 defer nc.Close() 13043 13044 cfg := &nats.StreamConfig{ 13045 Name: "TEST", 13046 Subjects: []string{"foo"}, 13047 Storage: nats.MemoryStorage, 13048 } 13049 if _, err := js.AddStream(cfg); err != nil { 13050 t.Fatalf("Unexpected error: %v", err) 13051 } 13052 13053 wg := sync.WaitGroup{} 13054 wg.Add(1) 13055 go func() { 13056 defer wg.Done() 13057 13058 for { 13059 if _, err := js.SubscribeSync("foo", nats.BindStream("TEST")); err != nil { 13060 return 13061 } 13062 } 13063 }() 13064 13065 time.Sleep(50 * time.Millisecond) 13066 s.Shutdown() 13067 13068 wg.Wait() 13069 } 13070 13071 // Got a report of streams that expire all messages while the server is down report errors when clients reconnect 13072 // and try to send new messages. 13073 func TestJetStreamExpireAllWhileServerDown(t *testing.T) { 13074 s := RunBasicJetStreamServer(t) 13075 defer s.Shutdown() 13076 13077 nc, js := jsClientConnect(t, s) 13078 defer nc.Close() 13079 13080 cfg := &nats.StreamConfig{ 13081 Name: "TEST", 13082 MaxAge: 250 * time.Millisecond, 13083 } 13084 if _, err := js.AddStream(cfg); err != nil { 13085 t.Fatalf("Unexpected error: %v", err) 13086 } 13087 toSend := 10_000 13088 for i := 0; i < toSend; i++ { 13089 js.PublishAsync("TEST", []byte("OK")) 13090 } 13091 select { 13092 case <-js.PublishAsyncComplete(): 13093 case <-time.After(time.Second): 13094 t.Fatalf("Did not receive completion signal") 13095 } 13096 13097 sd := s.JetStreamConfig().StoreDir 13098 s.Shutdown() 13099 13100 time.Sleep(300 * time.Millisecond) 13101 13102 // Restart after expire. 13103 s = RunJetStreamServerOnPort(-1, sd) 13104 defer s.Shutdown() 13105 13106 nc, js = jsClientConnect(t, s) 13107 defer nc.Close() 13108 13109 if si, err := js.StreamInfo("TEST"); err != nil || si.State.Msgs != 0 { 13110 t.Fatalf("Unexpected stream info state: %+v", si) 13111 } 13112 13113 for i := 0; i < 10; i++ { 13114 if _, err := js.Publish("TEST", []byte("OK")); err != nil { 13115 t.Fatalf("Unexpected error: %v", err) 13116 } 13117 } 13118 13119 if si, err := js.StreamInfo("TEST"); err != nil || si.State.Msgs != 10 { 13120 t.Fatalf("Unexpected stream info state: %+v", si) 13121 } 13122 } 13123 13124 func TestJetStreamLongStreamNamesAndPubAck(t *testing.T) { 13125 s := RunBasicJetStreamServer(t) 13126 defer s.Shutdown() 13127 13128 nc, js := jsClientConnect(t, s) 13129 defer nc.Close() 13130 13131 cfg := &nats.StreamConfig{ 13132 Name: strings.Repeat("ZABC", 256/4)[:255], 13133 Subjects: []string{"foo"}, 13134 } 13135 if _, err := js.AddStream(cfg); err != nil { 13136 t.Fatalf("Unexpected error: %v", err) 13137 } 13138 js.Publish("foo", []byte("HELLO")) 13139 } 13140 13141 func TestJetStreamPerSubjectPending(t *testing.T) { 13142 for _, st := range []nats.StorageType{nats.FileStorage, nats.MemoryStorage} { 13143 t.Run(st.String(), func(t *testing.T) { 13144 13145 s := RunBasicJetStreamServer(t) 13146 defer s.Shutdown() 13147 13148 nc, js := jsClientConnect(t, s) 13149 defer nc.Close() 13150 13151 _, err := js.AddStream(&nats.StreamConfig{ 13152 Name: "KV_X", 13153 Subjects: []string{"$KV.X.>"}, 13154 MaxMsgsPerSubject: 5, 13155 Storage: st, 13156 }) 13157 if err != nil { 13158 t.Fatalf("add stream failed: %s", err) 13159 } 13160 13161 // the message we will care for 13162 _, err = js.Publish("$KV.X.x.y.z", []byte("hello world")) 13163 if err != nil { 13164 t.Fatalf("publish failed: %s", err) 13165 } 13166 13167 // make sure there's some unrelated message after 13168 _, err = js.Publish("$KV.X.1", []byte("hello world")) 13169 if err != nil { 13170 t.Fatalf("publish failed: %s", err) 13171 } 13172 13173 // we expect the wildcard filter subject to match only the one message and so pending will be 0 13174 sub, err := js.SubscribeSync("$KV.X.x.>", nats.DeliverLastPerSubject()) 13175 if err != nil { 13176 t.Fatalf("subscribe failed: %s", err) 13177 } 13178 13179 msg, err := sub.NextMsg(time.Second) 13180 if err != nil { 13181 t.Fatalf("next failed: %s", err) 13182 } 13183 13184 meta, err := msg.Metadata() 13185 if err != nil { 13186 t.Fatalf("meta failed: %s", err) 13187 } 13188 13189 // with DeliverLastPerSubject set this is never 0, but without setting that its 0 correctly 13190 if meta.NumPending != 0 { 13191 t.Fatalf("expected numpending 0 got %d", meta.NumPending) 13192 } 13193 }) 13194 } 13195 } 13196 13197 func TestJetStreamPublishExpectNoMsg(t *testing.T) { 13198 s := RunBasicJetStreamServer(t) 13199 defer s.Shutdown() 13200 13201 nc, js := jsClientConnect(t, s) 13202 defer nc.Close() 13203 13204 _, err := js.AddStream(&nats.StreamConfig{ 13205 Name: "KV", 13206 Subjects: []string{"KV.>"}, 13207 MaxMsgsPerSubject: 5, 13208 }) 13209 if err != nil { 13210 t.Fatalf("add stream failed: %s", err) 13211 } 13212 13213 if _, err = js.Publish("KV.22", []byte("hello world")); err != nil { 13214 t.Fatalf("Unexpected error: %v", err) 13215 } 13216 13217 // This should succeed. 13218 m := nats.NewMsg("KV.33") 13219 m.Header.Set(JSExpectedLastSubjSeq, "0") 13220 if _, err := js.PublishMsg(m); err != nil { 13221 t.Fatalf("Unexpected error: %v", err) 13222 } 13223 13224 // This should fail. 13225 m = nats.NewMsg("KV.22") 13226 m.Header.Set(JSExpectedLastSubjSeq, "0") 13227 if _, err := js.PublishMsg(m); err == nil { 13228 t.Fatalf("Expected error: %v", err) 13229 } 13230 13231 if err := js.PurgeStream("KV"); err != nil { 13232 t.Fatalf("Unexpected purge error: %v", err) 13233 } 13234 13235 // This should succeed now. 13236 if _, err := js.PublishMsg(m); err != nil { 13237 t.Fatalf("Unexpected error: %v", err) 13238 } 13239 } 13240 13241 func TestJetStreamPullLargeBatchExpired(t *testing.T) { 13242 s := RunBasicJetStreamServer(t) 13243 defer s.Shutdown() 13244 13245 nc, js := jsClientConnect(t, s) 13246 defer nc.Close() 13247 13248 _, err := js.AddStream(&nats.StreamConfig{ 13249 Name: "TEST", 13250 Subjects: []string{"foo"}, 13251 }) 13252 if err != nil { 13253 t.Fatalf("add stream failed: %s", err) 13254 } 13255 13256 sub, err := js.PullSubscribe("foo", "dlc", nats.PullMaxWaiting(10), nats.MaxAckPending(10*50_000_000)) 13257 if err != nil { 13258 t.Fatalf("Error creating pull subscriber: %v", err) 13259 } 13260 13261 // Queue up 10 batch requests with timeout. 13262 rsubj := fmt.Sprintf(JSApiRequestNextT, "TEST", "dlc") 13263 req := &JSApiConsumerGetNextRequest{Batch: 50_000_000, Expires: 100 * time.Millisecond} 13264 jreq, _ := json.Marshal(req) 13265 for i := 0; i < 10; i++ { 13266 nc.PublishRequest(rsubj, "bar", jreq) 13267 } 13268 nc.Flush() 13269 13270 // Let them all expire. 13271 time.Sleep(150 * time.Millisecond) 13272 13273 // Now do another and measure how long to timeout and shutdown the server. 13274 start := time.Now() 13275 sub.Fetch(1, nats.MaxWait(100*time.Millisecond)) 13276 s.Shutdown() 13277 13278 if delta := time.Since(start); delta > 200*time.Millisecond { 13279 t.Fatalf("Took too long to expire: %v", delta) 13280 } 13281 } 13282 13283 func TestJetStreamNegativeDupeWindow(t *testing.T) { 13284 s := RunBasicJetStreamServer(t) 13285 defer s.Shutdown() 13286 13287 nc, js := jsClientConnect(t, s) 13288 defer nc.Close() 13289 13290 // we incorrectly set MaxAge to -1 which then as a side effect sets dupe window to -1 which should fail 13291 _, err := js.AddStream(&nats.StreamConfig{ 13292 Name: "TEST", 13293 Subjects: nil, 13294 Retention: nats.WorkQueuePolicy, 13295 MaxConsumers: 1, 13296 MaxMsgs: -1, 13297 MaxBytes: -1, 13298 Discard: nats.DiscardNew, 13299 MaxAge: -1, 13300 MaxMsgsPerSubject: -1, 13301 MaxMsgSize: -1, 13302 Storage: nats.FileStorage, 13303 Replicas: 1, 13304 NoAck: false, 13305 }) 13306 if err == nil || err.Error() != "nats: duplicates window can not be negative" { 13307 t.Fatalf("Expected dupe window error got: %v", err) 13308 } 13309 } 13310 13311 // Issue #2551 13312 func TestJetStreamMirroredConsumerFailAfterRestart(t *testing.T) { 13313 s := RunBasicJetStreamServer(t) 13314 defer s.Shutdown() 13315 13316 nc, js := jsClientConnect(t, s) 13317 defer nc.Close() 13318 13319 _, err := js.AddStream(&nats.StreamConfig{ 13320 Name: "S1", 13321 Storage: nats.FileStorage, 13322 Subjects: []string{"foo", "bar", "baz"}, 13323 }) 13324 if err != nil { 13325 t.Fatalf("create failed: %s", err) 13326 } 13327 13328 _, err = js.AddStream(&nats.StreamConfig{ 13329 Name: "M1", 13330 Storage: nats.FileStorage, 13331 Mirror: &nats.StreamSource{Name: "S1"}, 13332 }) 13333 if err != nil { 13334 t.Fatalf("create failed: %s", err) 13335 } 13336 13337 _, err = js.AddConsumer("M1", &nats.ConsumerConfig{ 13338 Durable: "C1", 13339 FilterSubject: ">", 13340 AckPolicy: nats.AckExplicitPolicy, 13341 }) 13342 if err != nil { 13343 t.Fatalf("consumer create failed: %s", err) 13344 } 13345 13346 // Stop current 13347 sd := s.JetStreamConfig().StoreDir 13348 s.Shutdown() 13349 s.WaitForShutdown() 13350 13351 // Restart. 13352 s = RunJetStreamServerOnPort(-1, sd) 13353 defer s.Shutdown() 13354 13355 nc, js = jsClientConnect(t, s) 13356 defer nc.Close() 13357 13358 _, err = js.StreamInfo("M1") 13359 if err != nil { 13360 t.Fatalf("%s did not exist after start: %s", "M1", err) 13361 } 13362 13363 _, err = js.ConsumerInfo("M1", "C1") 13364 if err != nil { 13365 t.Fatalf("C1 did not exist after start: %s", err) 13366 } 13367 } 13368 13369 func TestJetStreamDisabledLimitsEnforcementJWT(t *testing.T) { 13370 updateJwt := func(url string, akp nkeys.KeyPair, pubKey string, jwt string) { 13371 t.Helper() 13372 c := natsConnect(t, url, createUserCreds(t, nil, akp)) 13373 defer c.Close() 13374 if msg, err := c.Request(fmt.Sprintf(accUpdateEventSubjNew, pubKey), []byte(jwt), time.Second); err != nil { 13375 t.Fatal("error not expected in this test", err) 13376 } else { 13377 content := make(map[string]interface{}) 13378 if err := json.Unmarshal(msg.Data, &content); err != nil { 13379 t.Fatalf("%v", err) 13380 } else if _, ok := content["data"]; !ok { 13381 t.Fatalf("did not get an ok response got: %v", content) 13382 } 13383 } 13384 } 13385 // create system account 13386 sysKp, _ := nkeys.CreateAccount() 13387 sysPub, _ := sysKp.PublicKey() 13388 // limits to apply and check 13389 limits1 := jwt.JetStreamLimits{MemoryStorage: 1024, DiskStorage: 0, Streams: 1, Consumer: 2} 13390 akp, _ := nkeys.CreateAccount() 13391 aPub, _ := akp.PublicKey() 13392 claim := jwt.NewAccountClaims(aPub) 13393 claim.Limits.JetStreamLimits = limits1 13394 aJwt1, err := claim.Encode(oKp) 13395 require_NoError(t, err) 13396 dir := t.TempDir() 13397 storeDir1 := t.TempDir() 13398 conf := createConfFile(t, []byte(fmt.Sprintf(` 13399 listen: -1 13400 jetstream: {store_dir: '%s'} 13401 operator: %s 13402 resolver: { 13403 type: full 13404 dir: '%s' 13405 } 13406 system_account: %s 13407 `, storeDir1, ojwt, dir, sysPub))) 13408 s, _ := RunServerWithConfig(conf) 13409 defer s.Shutdown() 13410 updateJwt(s.ClientURL(), sysKp, aPub, aJwt1) 13411 c := natsConnect(t, s.ClientURL(), createUserCreds(t, nil, akp), nats.ReconnectWait(200*time.Millisecond)) 13412 defer c.Close() 13413 // keep using the same connection 13414 js, err := c.JetStream() 13415 require_NoError(t, err) 13416 _, err = js.AddStream(&nats.StreamConfig{ 13417 Name: "disk", 13418 Storage: nats.FileStorage, 13419 Subjects: []string{"disk"}, 13420 }) 13421 require_Error(t, err) 13422 } 13423 13424 func TestJetStreamDisabledLimitsEnforcement(t *testing.T) { 13425 storeDir1 := t.TempDir() 13426 conf1 := createConfFile(t, []byte(fmt.Sprintf(` 13427 listen: 127.0.0.1:-1 13428 jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s'} 13429 accounts { 13430 one { 13431 jetstream: { 13432 mem: 1024 13433 disk: 0 13434 streams: 1 13435 consumers: 2 13436 } 13437 users [{user: one, password: password}] 13438 } 13439 } 13440 no_auth_user: one 13441 `, storeDir1))) 13442 s, _ := RunServerWithConfig(conf1) 13443 defer s.Shutdown() 13444 13445 c := natsConnect(t, s.ClientURL()) 13446 defer c.Close() 13447 // keep using the same connection 13448 js, err := c.JetStream() 13449 require_NoError(t, err) 13450 _, err = js.AddStream(&nats.StreamConfig{ 13451 Name: "disk", 13452 Storage: nats.FileStorage, 13453 Subjects: []string{"disk"}, 13454 }) 13455 require_Error(t, err) 13456 } 13457 13458 func TestJetStreamConsumerNoMsgPayload(t *testing.T) { 13459 s := RunBasicJetStreamServer(t) 13460 defer s.Shutdown() 13461 13462 nc, js := jsClientConnect(t, s) 13463 defer nc.Close() 13464 13465 _, err := js.AddStream(&nats.StreamConfig{Name: "S"}) 13466 require_NoError(t, err) 13467 13468 msg := nats.NewMsg("S") 13469 msg.Header.Set("name", "derek") 13470 msg.Data = bytes.Repeat([]byte("A"), 128) 13471 for i := 0; i < 10; i++ { 13472 msg.Reply = _EMPTY_ // Fixed in Go client but not in embdedded on yet. 13473 _, err = js.PublishMsgAsync(msg) 13474 require_NoError(t, err) 13475 } 13476 13477 mset, err := s.GlobalAccount().lookupStream("S") 13478 require_NoError(t, err) 13479 13480 // Now create our consumer with no payload option. 13481 _, err = mset.addConsumer(&ConsumerConfig{DeliverSubject: "_d_", Durable: "d22", HeadersOnly: true}) 13482 require_NoError(t, err) 13483 13484 sub, err := js.SubscribeSync("S", nats.Durable("d22")) 13485 require_NoError(t, err) 13486 13487 for i := 0; i < 10; i++ { 13488 m, err := sub.NextMsg(time.Second) 13489 require_NoError(t, err) 13490 if len(m.Data) > 0 { 13491 t.Fatalf("Expected no payload") 13492 } 13493 if ms := m.Header.Get(JSMsgSize); ms != "128" { 13494 t.Fatalf("Expected a header with msg size, got %q", ms) 13495 } 13496 } 13497 } 13498 13499 // Issue #2607 13500 func TestJetStreamPurgeAndFilteredConsumers(t *testing.T) { 13501 s := RunBasicJetStreamServer(t) 13502 defer s.Shutdown() 13503 13504 nc, js := jsClientConnect(t, s) 13505 defer nc.Close() 13506 13507 _, err := js.AddStream(&nats.StreamConfig{Name: "S", Subjects: []string{"FOO.*"}}) 13508 require_NoError(t, err) 13509 13510 for i := 0; i < 10; i++ { 13511 _, err = js.Publish("FOO.adam", []byte("M")) 13512 require_NoError(t, err) 13513 _, err = js.Publish("FOO.eve", []byte("F")) 13514 require_NoError(t, err) 13515 } 13516 13517 ci, err := js.AddConsumer("S", &nats.ConsumerConfig{ 13518 Durable: "adam", 13519 AckPolicy: nats.AckExplicitPolicy, 13520 FilterSubject: "FOO.adam", 13521 }) 13522 require_NoError(t, err) 13523 if ci.NumPending != 10 { 13524 t.Fatalf("Expected NumPending to be 10, got %d", ci.NumPending) 13525 } 13526 13527 ci, err = js.AddConsumer("S", &nats.ConsumerConfig{ 13528 Durable: "eve", 13529 AckPolicy: nats.AckExplicitPolicy, 13530 FilterSubject: "FOO.eve", 13531 }) 13532 require_NoError(t, err) 13533 if ci.NumPending != 10 { 13534 t.Fatalf("Expected NumPending to be 10, got %d", ci.NumPending) 13535 } 13536 13537 // Also check unfiltered with interleaving messages. 13538 _, err = js.AddConsumer("S", &nats.ConsumerConfig{ 13539 Durable: "all", 13540 AckPolicy: nats.AckExplicitPolicy, 13541 }) 13542 require_NoError(t, err) 13543 13544 // Now purge only adam. 13545 jr, _ := json.Marshal(&JSApiStreamPurgeRequest{Subject: "FOO.adam"}) 13546 _, err = nc.Request(fmt.Sprintf(JSApiStreamPurgeT, "S"), jr, time.Second) 13547 require_NoError(t, err) 13548 13549 si, err := js.StreamInfo("S") 13550 require_NoError(t, err) 13551 if si.State.Msgs != 10 { 13552 t.Fatalf("Expected 10 messages after purge, got %d", si.State.Msgs) 13553 } 13554 13555 ci, err = js.ConsumerInfo("S", "eve") 13556 require_NoError(t, err) 13557 if ci.NumPending != 10 { 13558 t.Fatalf("Expected NumPending to be 10, got %d", ci.NumPending) 13559 } 13560 13561 ci, err = js.ConsumerInfo("S", "adam") 13562 require_NoError(t, err) 13563 if ci.NumPending != 0 { 13564 t.Fatalf("Expected NumPending to be 0, got %d", ci.NumPending) 13565 } 13566 if ci.AckFloor.Stream != 20 { 13567 t.Fatalf("Expected AckFloor for stream to be 20, got %d", ci.AckFloor.Stream) 13568 } 13569 13570 ci, err = js.ConsumerInfo("S", "all") 13571 require_NoError(t, err) 13572 if ci.NumPending != 10 { 13573 t.Fatalf("Expected NumPending to be 10, got %d", ci.NumPending) 13574 } 13575 } 13576 13577 // Issue #2662 13578 func TestJetStreamLargeExpiresAndServerRestart(t *testing.T) { 13579 s := RunBasicJetStreamServer(t) 13580 defer s.Shutdown() 13581 13582 nc, js := jsClientConnect(t, s) 13583 defer nc.Close() 13584 13585 maxAge := 2 * time.Second 13586 13587 _, err := js.AddStream(&nats.StreamConfig{ 13588 Name: "S", 13589 Subjects: []string{"foo"}, 13590 MaxAge: maxAge, 13591 }) 13592 require_NoError(t, err) 13593 13594 start := time.Now() 13595 _, err = js.Publish("foo", []byte("ok")) 13596 require_NoError(t, err) 13597 13598 // Wait total of maxAge - 1s. 13599 time.Sleep(maxAge - time.Since(start) - time.Second) 13600 13601 // Stop current 13602 sd := s.JetStreamConfig().StoreDir 13603 s.Shutdown() 13604 // Restart. 13605 s = RunJetStreamServerOnPort(-1, sd) 13606 defer s.Shutdown() 13607 13608 nc, js = jsClientConnect(t, s) 13609 defer nc.Close() 13610 13611 checkFor(t, 5*time.Second, 10*time.Millisecond, func() error { 13612 si, err := js.StreamInfo("S") 13613 require_NoError(t, err) 13614 if si.State.Msgs != 0 { 13615 return fmt.Errorf("Expected no messages, got %d", si.State.Msgs) 13616 } 13617 return nil 13618 }) 13619 13620 if waited := time.Since(start); waited > maxAge+time.Second { 13621 t.Fatalf("Waited to long %v vs %v for messages to expire", waited, maxAge) 13622 } 13623 } 13624 13625 // Bug that was reported showing memstore not handling max per subject of 1. 13626 func TestJetStreamMessagePerSubjectKeepBug(t *testing.T) { 13627 test := func(t *testing.T, keep int64, store nats.StorageType) { 13628 s := RunBasicJetStreamServer(t) 13629 defer s.Shutdown() 13630 13631 nc, js := jsClientConnect(t, s) 13632 defer nc.Close() 13633 13634 _, err := js.AddStream(&nats.StreamConfig{ 13635 Name: "TEST", 13636 MaxMsgsPerSubject: keep, 13637 Storage: store, 13638 }) 13639 require_NoError(t, err) 13640 13641 for i := 0; i < 100; i++ { 13642 _, err = js.Publish("TEST", []byte(fmt.Sprintf("test %d", i))) 13643 require_NoError(t, err) 13644 } 13645 13646 nfo, err := js.StreamInfo("TEST") 13647 require_NoError(t, err) 13648 13649 if nfo.State.Msgs != uint64(keep) { 13650 t.Fatalf("Expected %d message got %d", keep, nfo.State.Msgs) 13651 } 13652 } 13653 13654 t.Run("FileStore", func(t *testing.T) { 13655 t.Run("Keep 10", func(t *testing.T) { test(t, 10, nats.FileStorage) }) 13656 t.Run("Keep 1", func(t *testing.T) { test(t, 1, nats.FileStorage) }) 13657 }) 13658 13659 t.Run("MemStore", func(t *testing.T) { 13660 t.Run("Keep 10", func(t *testing.T) { test(t, 10, nats.MemoryStorage) }) 13661 t.Run("Keep 1", func(t *testing.T) { test(t, 1, nats.MemoryStorage) }) 13662 }) 13663 } 13664 13665 func TestJetStreamInvalidDeliverSubject(t *testing.T) { 13666 s := RunBasicJetStreamServer(t) 13667 defer s.Shutdown() 13668 13669 nc, js := jsClientConnect(t, s) 13670 defer nc.Close() 13671 13672 _, err := js.AddStream(&nats.StreamConfig{ 13673 Name: "TEST", 13674 }) 13675 require_NoError(t, err) 13676 13677 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{DeliverSubject: " x"}) 13678 require_Error(t, err, NewJSConsumerInvalidDeliverSubjectError()) 13679 } 13680 13681 func TestJetStreamMemoryCorruption(t *testing.T) { 13682 s := RunBasicJetStreamServer(t) 13683 defer s.Shutdown() 13684 13685 nc, js := jsClientConnect(t, s) 13686 defer nc.Close() 13687 13688 errCh := make(chan error, 10) 13689 nc.SetErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, e error) { 13690 select { 13691 case errCh <- e: 13692 default: 13693 } 13694 }) 13695 13696 // The storage has to be MemoryStorage to show the issue 13697 kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "bucket", Storage: nats.MemoryStorage}) 13698 require_NoError(t, err) 13699 13700 w1, err := kv.WatchAll() 13701 require_NoError(t, err) 13702 13703 w2, err := kv.WatchAll(nats.MetaOnly()) 13704 require_NoError(t, err) 13705 13706 kv.Put("key1", []byte("aaa")) 13707 kv.Put("key1", []byte("aab")) 13708 kv.Put("key2", []byte("zza")) 13709 kv.Put("key2", []byte("zzb")) 13710 kv.Delete("key1") 13711 kv.Delete("key2") 13712 kv.Put("key1", []byte("aac")) 13713 kv.Put("key2", []byte("zzc")) 13714 kv.Delete("key1") 13715 kv.Delete("key2") 13716 kv.Purge("key1") 13717 kv.Purge("key2") 13718 13719 checkUpdates := func(updates <-chan nats.KeyValueEntry) { 13720 t.Helper() 13721 count := 0 13722 for { 13723 select { 13724 case <-updates: 13725 count++ 13726 if count == 13 { 13727 return 13728 } 13729 case <-time.After(time.Second): 13730 t.Fatal("Did not receive all updates") 13731 } 13732 } 13733 } 13734 checkUpdates(w1.Updates()) 13735 checkUpdates(w2.Updates()) 13736 13737 select { 13738 case e := <-errCh: 13739 t.Fatal(e) 13740 case <-time.After(250 * time.Millisecond): 13741 // OK 13742 } 13743 } 13744 13745 func TestJetStreamRecoverBadStreamSubjects(t *testing.T) { 13746 s := RunBasicJetStreamServer(t) 13747 sd := s.JetStreamConfig().StoreDir 13748 s.Shutdown() 13749 13750 f := filepath.Join(sd, "$G", "streams", "TEST") 13751 fs, err := newFileStore(FileStoreConfig{StoreDir: f}, StreamConfig{ 13752 Name: "TEST", 13753 Subjects: []string{"foo", "bar", " baz "}, // baz has spaces 13754 Storage: FileStorage, 13755 }) 13756 require_NoError(t, err) 13757 fs.Stop() 13758 13759 s = RunJetStreamServerOnPort(-1, sd) 13760 defer s.Shutdown() 13761 13762 nc, js := jsClientConnect(t, s) 13763 defer nc.Close() 13764 13765 si, err := js.StreamInfo("TEST") 13766 require_NoError(t, err) 13767 13768 if len(si.Config.Subjects) != 3 { 13769 t.Fatalf("Expected to recover all subjects") 13770 } 13771 } 13772 13773 func TestJetStreamRecoverBadMirrorConfigWithSubjects(t *testing.T) { 13774 s := RunBasicJetStreamServer(t) 13775 defer s.Shutdown() 13776 sd := s.JetStreamConfig().StoreDir 13777 13778 // Client for API requests. 13779 nc, js := jsClientConnect(t, s) 13780 defer nc.Close() 13781 13782 // Origin 13783 _, err := js.AddStream(&nats.StreamConfig{ 13784 Name: "S", 13785 Subjects: []string{"foo"}, 13786 }) 13787 require_NoError(t, err) 13788 13789 s.Shutdown() 13790 13791 f := filepath.Join(sd, "$G", "streams", "M") 13792 fs, err := newFileStore(FileStoreConfig{StoreDir: f}, StreamConfig{ 13793 Name: "M", 13794 Subjects: []string{"foo", "bar", "baz"}, // Mirrors should not have spaces. 13795 Mirror: &StreamSource{Name: "S"}, 13796 Storage: FileStorage, 13797 }) 13798 require_NoError(t, err) 13799 fs.Stop() 13800 13801 s = RunJetStreamServerOnPort(-1, sd) 13802 defer s.Shutdown() 13803 13804 nc, js = jsClientConnect(t, s) 13805 defer nc.Close() 13806 13807 si, err := js.StreamInfo("M") 13808 require_NoError(t, err) 13809 13810 if len(si.Config.Subjects) != 0 { 13811 t.Fatalf("Expected to have NO subjects on mirror") 13812 } 13813 } 13814 13815 func TestJetStreamCrossAccountsDeliverSubjectInterest(t *testing.T) { 13816 conf := createConfFile(t, []byte(fmt.Sprintf(` 13817 listen: 127.0.0.1:-1 13818 jetstream: {max_mem_store: 4GB, max_file_store: 1TB, store_dir: %q} 13819 accounts: { 13820 A: { 13821 jetstream: enabled 13822 users: [ {user: a, password: pwd} ] 13823 exports [ 13824 { stream: "_d_" } # For the delivery subject for the consumer 13825 ] 13826 }, 13827 B: { 13828 users: [ {user: b, password: pwd} ] 13829 imports [ 13830 { stream: { account: A, subject: "_d_"}, to: "foo" } 13831 ] 13832 }, 13833 } 13834 `, t.TempDir()))) 13835 13836 s, _ := RunServerWithConfig(conf) 13837 defer s.Shutdown() 13838 13839 nc, js := jsClientConnect(t, s, nats.UserInfo("a", "pwd")) 13840 defer nc.Close() 13841 13842 _, err := js.AddStream(&nats.StreamConfig{ 13843 Name: "TEST", 13844 Subjects: []string{"foo"}, 13845 }) 13846 require_NoError(t, err) 13847 13848 msg, toSend := []byte("OK"), 100 13849 for i := 0; i < toSend; i++ { 13850 if _, err := js.PublishAsync("foo", msg); err != nil { 13851 t.Fatalf("Unexpected publish error: %v", err) 13852 } 13853 } 13854 select { 13855 case <-js.PublishAsyncComplete(): 13856 case <-time.After(5 * time.Second): 13857 t.Fatalf("Did not receive completion signal") 13858 } 13859 13860 // Now create the consumer as well here manually that we will want to reference from Account B. 13861 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "dlc", DeliverSubject: "_d_"}) 13862 require_NoError(t, err) 13863 13864 // Wait to see if the stream import signals to deliver messages with no real subscriber interest. 13865 time.Sleep(200 * time.Millisecond) 13866 13867 ci, err := js.ConsumerInfo("TEST", "dlc") 13868 require_NoError(t, err) 13869 13870 // Make sure we have not delivered any messages based on the import signal alone. 13871 if ci.NumPending != uint64(toSend) || ci.Delivered.Consumer != 0 { 13872 t.Fatalf("Bad consumer info, looks like we started delivering: %+v", ci) 13873 } 13874 13875 // Now create interest in the delivery subject through the import on account B. 13876 nc, _ = jsClientConnect(t, s, nats.UserInfo("b", "pwd")) 13877 defer nc.Close() 13878 sub, err := nc.SubscribeSync("foo") 13879 require_NoError(t, err) 13880 checkSubsPending(t, sub, toSend) 13881 13882 ci, err = js.ConsumerInfo("TEST", "dlc") 13883 require_NoError(t, err) 13884 13885 // Make sure our consumer info reflects we delivered the messages. 13886 if ci.NumPending != 0 || ci.Delivered.Consumer != uint64(toSend) { 13887 t.Fatalf("Bad consumer info, looks like we did not deliver: %+v", ci) 13888 } 13889 } 13890 13891 func TestJetStreamPullConsumerRequestCleanup(t *testing.T) { 13892 s := RunBasicJetStreamServer(t) 13893 defer s.Shutdown() 13894 13895 nc, js := jsClientConnect(t, s) 13896 defer nc.Close() 13897 13898 _, err := js.AddStream(&nats.StreamConfig{Name: "T", Storage: nats.MemoryStorage}) 13899 require_NoError(t, err) 13900 13901 _, err = js.AddConsumer("T", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy}) 13902 require_NoError(t, err) 13903 13904 req := &JSApiConsumerGetNextRequest{Batch: 10, Expires: 100 * time.Millisecond} 13905 jreq, err := json.Marshal(req) 13906 require_NoError(t, err) 13907 13908 // Need interest otherwise the requests will be recycled based on that. 13909 _, err = nc.SubscribeSync("xx") 13910 require_NoError(t, err) 13911 13912 // Queue up 100 requests. 13913 rsubj := fmt.Sprintf(JSApiRequestNextT, "T", "dlc") 13914 for i := 0; i < 100; i++ { 13915 err = nc.PublishRequest(rsubj, "xx", jreq) 13916 require_NoError(t, err) 13917 } 13918 // Wait to expire 13919 time.Sleep(200 * time.Millisecond) 13920 13921 ci, err := js.ConsumerInfo("T", "dlc") 13922 require_NoError(t, err) 13923 13924 if ci.NumWaiting != 0 { 13925 t.Fatalf("Expected to see no waiting requests, got %d", ci.NumWaiting) 13926 } 13927 } 13928 13929 func TestJetStreamPullConsumerRequestMaximums(t *testing.T) { 13930 s := RunBasicJetStreamServer(t) 13931 defer s.Shutdown() 13932 13933 nc, _ := jsClientConnect(t, s) 13934 defer nc.Close() 13935 13936 // Need to do this via server for now. 13937 acc := s.GlobalAccount() 13938 mset, err := acc.addStream(&StreamConfig{Name: "TEST", Storage: MemoryStorage}) 13939 require_NoError(t, err) 13940 13941 _, err = mset.addConsumer(&ConsumerConfig{ 13942 Durable: "dlc", 13943 MaxRequestBatch: 10, 13944 MaxRequestMaxBytes: 10_000, 13945 MaxRequestExpires: time.Second, 13946 AckPolicy: AckExplicit, 13947 }) 13948 require_NoError(t, err) 13949 13950 genReq := func(b, mb int, e time.Duration) []byte { 13951 req := &JSApiConsumerGetNextRequest{Batch: b, Expires: e, MaxBytes: mb} 13952 jreq, err := json.Marshal(req) 13953 require_NoError(t, err) 13954 return jreq 13955 } 13956 13957 rsubj := fmt.Sprintf(JSApiRequestNextT, "TEST", "dlc") 13958 13959 // Exceeds max batch size. 13960 resp, err := nc.Request(rsubj, genReq(11, 0, 100*time.Millisecond), time.Second) 13961 require_NoError(t, err) 13962 if status := resp.Header.Get("Status"); status != "409" { 13963 t.Fatalf("Expected a 409 status code, got %q", status) 13964 } 13965 13966 // Exceeds max expires. 13967 resp, err = nc.Request(rsubj, genReq(1, 0, 10*time.Minute), time.Second) 13968 require_NoError(t, err) 13969 if status := resp.Header.Get("Status"); status != "409" { 13970 t.Fatalf("Expected a 409 status code, got %q", status) 13971 } 13972 13973 // Exceeds max bytes. 13974 resp, err = nc.Request(rsubj, genReq(10, 10_000*2, 10*time.Minute), time.Second) 13975 require_NoError(t, err) 13976 if status := resp.Header.Get("Status"); status != "409" { 13977 t.Fatalf("Expected a 409 status code, got %q", status) 13978 } 13979 } 13980 13981 func TestJetStreamEphemeralPullConsumers(t *testing.T) { 13982 s := RunBasicJetStreamServer(t) 13983 defer s.Shutdown() 13984 13985 nc, js := jsClientConnect(t, s) 13986 defer nc.Close() 13987 13988 _, err := js.AddStream(&nats.StreamConfig{Name: "EC", Storage: nats.MemoryStorage}) 13989 require_NoError(t, err) 13990 13991 ci, err := js.AddConsumer("EC", &nats.ConsumerConfig{AckPolicy: nats.AckExplicitPolicy}) 13992 require_NoError(t, err) 13993 13994 mset, err := s.GlobalAccount().lookupStream("EC") 13995 require_NoError(t, err) 13996 o := mset.lookupConsumer(ci.Name) 13997 if o == nil { 13998 t.Fatalf("Error looking up consumer %q", ci.Name) 13999 } 14000 err = o.setInActiveDeleteThreshold(50 * time.Millisecond) 14001 require_NoError(t, err) 14002 14003 time.Sleep(100 * time.Millisecond) 14004 // Should no longer be around. 14005 if o := mset.lookupConsumer(ci.Name); o != nil { 14006 t.Fatalf("Expected consumer to be closed and removed") 14007 } 14008 14009 // Make sure timer keeps firing etc. and does not delete until interest is gone. 14010 ci, err = js.AddConsumer("EC", &nats.ConsumerConfig{AckPolicy: nats.AckExplicitPolicy}) 14011 require_NoError(t, err) 14012 if o = mset.lookupConsumer(ci.Name); o == nil { 14013 t.Fatalf("Error looking up consumer %q", ci.Name) 14014 } 14015 err = o.setInActiveDeleteThreshold(50 * time.Millisecond) 14016 require_NoError(t, err) 14017 14018 // Need interest otherwise the requests will be recycled based on no real interest. 14019 sub, err := nc.SubscribeSync("xx") 14020 require_NoError(t, err) 14021 14022 req := &JSApiConsumerGetNextRequest{Batch: 10, Expires: 250 * time.Millisecond} 14023 jreq, err := json.Marshal(req) 14024 require_NoError(t, err) 14025 rsubj := fmt.Sprintf(JSApiRequestNextT, "EC", ci.Name) 14026 err = nc.PublishRequest(rsubj, "xx", jreq) 14027 require_NoError(t, err) 14028 nc.Flush() 14029 14030 time.Sleep(100 * time.Millisecond) 14031 // Should still be alive here. 14032 if o := mset.lookupConsumer(ci.Name); o == nil { 14033 t.Fatalf("Expected consumer to still be active") 14034 } 14035 // Remove interest. 14036 sub.Unsubscribe() 14037 // Make sure this EPC goes away now. 14038 checkFor(t, 5*time.Second, 10*time.Millisecond, func() error { 14039 if o := mset.lookupConsumer(ci.Name); o != nil { 14040 return fmt.Errorf("Consumer still present") 14041 } 14042 return nil 14043 }) 14044 } 14045 14046 func TestJetStreamEphemeralPullConsumersInactiveThresholdAndNoWait(t *testing.T) { 14047 s := RunBasicJetStreamServer(t) 14048 defer s.Shutdown() 14049 14050 nc, js := jsClientConnect(t, s) 14051 defer nc.Close() 14052 14053 _, err := js.AddStream(&nats.StreamConfig{Name: "ECIT", Storage: nats.MemoryStorage}) 14054 require_NoError(t, err) 14055 14056 ci, err := js.AddConsumer("ECIT", &nats.ConsumerConfig{ 14057 AckPolicy: nats.AckExplicitPolicy, 14058 InactiveThreshold: 100 * time.Millisecond, 14059 }) 14060 require_NoError(t, err) 14061 14062 // Send 10 no_wait requests every 25ms and consumer should still be present. 14063 req := &JSApiConsumerGetNextRequest{Batch: 10, NoWait: true} 14064 jreq, err := json.Marshal(req) 14065 require_NoError(t, err) 14066 rsubj := fmt.Sprintf(JSApiRequestNextT, "ECIT", ci.Name) 14067 for i := 0; i < 10; i++ { 14068 err = nc.PublishRequest(rsubj, "xx", jreq) 14069 require_NoError(t, err) 14070 nc.Flush() 14071 time.Sleep(25 * time.Millisecond) 14072 } 14073 14074 _, err = js.ConsumerInfo("ECIT", ci.Name) 14075 require_NoError(t, err) 14076 } 14077 14078 func TestJetStreamPullConsumerCrossAccountExpires(t *testing.T) { 14079 conf := createConfFile(t, []byte(fmt.Sprintf(` 14080 listen: 127.0.0.1:-1 14081 jetstream: {max_mem_store: 4GB, max_file_store: 1TB, store_dir: %q} 14082 accounts: { 14083 JS: { 14084 jetstream: enabled 14085 users: [ {user: dlc, password: foo} ] 14086 exports [ { service: "$JS.API.CONSUMER.MSG.NEXT.>", response: stream } ] 14087 }, 14088 IU: { 14089 users: [ {user: mh, password: bar} ] 14090 imports [ { service: { subject: "$JS.API.CONSUMER.MSG.NEXT.*.*", account: JS } }] 14091 # Re-export for dasiy chain test. 14092 exports [ { service: "$JS.API.CONSUMER.MSG.NEXT.>", response: stream } ] 14093 }, 14094 IU2: { 14095 users: [ {user: ik, password: bar} ] 14096 imports [ { service: { subject: "$JS.API.CONSUMER.MSG.NEXT.*.*", account: IU } } ] 14097 }, 14098 } 14099 `, t.TempDir()))) 14100 14101 s, _ := RunServerWithConfig(conf) 14102 defer s.Shutdown() 14103 14104 // Connect to JS account and create stream, put some messages into it. 14105 nc, js := jsClientConnect(t, s, nats.UserInfo("dlc", "foo")) 14106 defer nc.Close() 14107 14108 _, err := js.AddStream(&nats.StreamConfig{Name: "PC", Subjects: []string{"foo"}}) 14109 require_NoError(t, err) 14110 14111 toSend := 50 14112 for i := 0; i < toSend; i++ { 14113 _, err := js.Publish("foo", []byte("OK")) 14114 require_NoError(t, err) 14115 } 14116 14117 // Now create pull consumer. 14118 _, err = js.AddConsumer("PC", &nats.ConsumerConfig{Durable: "PC", AckPolicy: nats.AckExplicitPolicy}) 14119 require_NoError(t, err) 14120 14121 // Now access from the importing account. 14122 nc2, _ := jsClientConnect(t, s, nats.UserInfo("mh", "bar")) 14123 defer nc2.Close() 14124 14125 // Make sure batch request works properly with stream response. 14126 req := &JSApiConsumerGetNextRequest{Batch: 10} 14127 jreq, err := json.Marshal(req) 14128 require_NoError(t, err) 14129 rsubj := fmt.Sprintf(JSApiRequestNextT, "PC", "PC") 14130 // Make sure we can get a batch correctly etc. 14131 // This requires response stream above in the export definition. 14132 sub, err := nc2.SubscribeSync("xx") 14133 require_NoError(t, err) 14134 err = nc2.PublishRequest(rsubj, "xx", jreq) 14135 require_NoError(t, err) 14136 checkSubsPending(t, sub, 10) 14137 14138 // Now let's queue up a bunch of requests and then delete interest to make sure the system 14139 // removes those requests. 14140 14141 // Purge stream 14142 err = js.PurgeStream("PC") 14143 require_NoError(t, err) 14144 14145 // Queue up 10 requests 14146 for i := 0; i < 10; i++ { 14147 err = nc2.PublishRequest(rsubj, "xx", jreq) 14148 require_NoError(t, err) 14149 } 14150 // Since using different connection, flush to make sure processed. 14151 nc2.Flush() 14152 14153 ci, err := js.ConsumerInfo("PC", "PC") 14154 require_NoError(t, err) 14155 if ci.NumWaiting != 10 { 14156 t.Fatalf("Expected to see 10 waiting requests, got %d", ci.NumWaiting) 14157 } 14158 14159 // Now remove interest and make sure requests are removed. 14160 sub.Unsubscribe() 14161 checkFor(t, 5*time.Second, 10*time.Millisecond, func() error { 14162 ci, err := js.ConsumerInfo("PC", "PC") 14163 require_NoError(t, err) 14164 if ci.NumWaiting != 0 { 14165 return fmt.Errorf("Requests still present") 14166 } 14167 return nil 14168 }) 14169 14170 // Now let's test that ephemerals will go away as well when interest etc is no longer around. 14171 ci, err = js.AddConsumer("PC", &nats.ConsumerConfig{AckPolicy: nats.AckExplicitPolicy}) 14172 require_NoError(t, err) 14173 14174 // Set the inactivity threshold by hand for now. 14175 jsacc, err := s.LookupAccount("JS") 14176 require_NoError(t, err) 14177 mset, err := jsacc.lookupStream("PC") 14178 require_NoError(t, err) 14179 o := mset.lookupConsumer(ci.Name) 14180 if o == nil { 14181 t.Fatalf("Error looking up consumer %q", ci.Name) 14182 } 14183 err = o.setInActiveDeleteThreshold(50 * time.Millisecond) 14184 require_NoError(t, err) 14185 14186 rsubj = fmt.Sprintf(JSApiRequestNextT, "PC", ci.Name) 14187 sub, err = nc2.SubscribeSync("zz") 14188 require_NoError(t, err) 14189 err = nc2.PublishRequest(rsubj, "zz", jreq) 14190 require_NoError(t, err) 14191 14192 // Wait past inactive threshold. 14193 time.Sleep(100 * time.Millisecond) 14194 // Make sure it is still there.. 14195 ci, err = js.ConsumerInfo("PC", ci.Name) 14196 require_NoError(t, err) 14197 if ci.NumWaiting != 1 { 14198 t.Fatalf("Expected to see 1 waiting request, got %d", ci.NumWaiting) 14199 } 14200 14201 // Now release interest. 14202 sub.Unsubscribe() 14203 checkFor(t, 5*time.Second, 10*time.Millisecond, func() error { 14204 _, err := js.ConsumerInfo("PC", ci.Name) 14205 if err == nil { 14206 return fmt.Errorf("Consumer still present") 14207 } 14208 return nil 14209 }) 14210 14211 // Now test daisy chained. 14212 toSend = 10 14213 for i := 0; i < toSend; i++ { 14214 _, err := js.Publish("foo", []byte("OK")) 14215 require_NoError(t, err) 14216 } 14217 14218 ci, err = js.AddConsumer("PC", &nats.ConsumerConfig{AckPolicy: nats.AckExplicitPolicy}) 14219 require_NoError(t, err) 14220 14221 // Set the inactivity threshold by hand for now. 14222 o = mset.lookupConsumer(ci.Name) 14223 if o == nil { 14224 t.Fatalf("Error looking up consumer %q", ci.Name) 14225 } 14226 // Make this one longer so we test request purge and ephemerals in same test. 14227 err = o.setInActiveDeleteThreshold(500 * time.Millisecond) 14228 require_NoError(t, err) 14229 14230 // Now access from the importing account. 14231 nc3, _ := jsClientConnect(t, s, nats.UserInfo("ik", "bar")) 14232 defer nc3.Close() 14233 14234 sub, err = nc3.SubscribeSync("yy") 14235 require_NoError(t, err) 14236 14237 rsubj = fmt.Sprintf(JSApiRequestNextT, "PC", ci.Name) 14238 err = nc3.PublishRequest(rsubj, "yy", jreq) 14239 require_NoError(t, err) 14240 checkSubsPending(t, sub, 10) 14241 14242 // Purge stream 14243 err = js.PurgeStream("PC") 14244 require_NoError(t, err) 14245 14246 // Queue up 10 requests 14247 for i := 0; i < 10; i++ { 14248 err = nc3.PublishRequest(rsubj, "yy", jreq) 14249 require_NoError(t, err) 14250 } 14251 // Since using different connection, flush to make sure processed. 14252 nc3.Flush() 14253 14254 ci, err = js.ConsumerInfo("PC", ci.Name) 14255 require_NoError(t, err) 14256 if ci.NumWaiting != 10 { 14257 t.Fatalf("Expected to see 10 waiting requests, got %d", ci.NumWaiting) 14258 } 14259 14260 // Now remove interest and make sure requests are removed. 14261 sub.Unsubscribe() 14262 checkFor(t, 5*time.Second, 10*time.Millisecond, func() error { 14263 ci, err := js.ConsumerInfo("PC", ci.Name) 14264 require_NoError(t, err) 14265 if ci.NumWaiting != 0 { 14266 return fmt.Errorf("Requests still present") 14267 } 14268 return nil 14269 }) 14270 // Now make sure the ephemeral goes away too. 14271 // Ephemerals have jitter by default of up to 1s. 14272 checkFor(t, 6*time.Second, 10*time.Millisecond, func() error { 14273 _, err := js.ConsumerInfo("PC", ci.Name) 14274 if err == nil { 14275 return fmt.Errorf("Consumer still present") 14276 } 14277 return nil 14278 }) 14279 } 14280 14281 func TestJetStreamPullConsumerCrossAccountExpiresNoDataRace(t *testing.T) { 14282 conf := createConfFile(t, []byte(fmt.Sprintf(` 14283 listen: 127.0.0.1:-1 14284 jetstream: {max_mem_store: 4GB, max_file_store: 1TB, store_dir: %q} 14285 accounts: { 14286 JS: { 14287 jetstream: enabled 14288 users: [ {user: dlc, password: foo} ] 14289 exports [ { service: "$JS.API.CONSUMER.MSG.NEXT.>", response: stream } ] 14290 }, 14291 IU: { 14292 jetstream: enabled 14293 users: [ {user: ik, password: bar} ] 14294 imports [ { service: { subject: "$JS.API.CONSUMER.MSG.NEXT.*.*", account: JS } }] 14295 }, 14296 } 14297 `, t.TempDir()))) 14298 14299 test := func() { 14300 s, _ := RunServerWithConfig(conf) 14301 defer s.Shutdown() 14302 14303 // Connect to JS account and create stream, put some messages into it. 14304 nc, js := jsClientConnect(t, s, nats.UserInfo("dlc", "foo")) 14305 defer nc.Close() 14306 14307 _, err := js.AddStream(&nats.StreamConfig{Name: "PC", Subjects: []string{"foo"}}) 14308 require_NoError(t, err) 14309 14310 toSend := 100 14311 for i := 0; i < toSend; i++ { 14312 _, err := js.Publish("foo", []byte("OK")) 14313 require_NoError(t, err) 14314 } 14315 14316 // Create pull consumer. 14317 _, err = js.AddConsumer("PC", &nats.ConsumerConfig{Durable: "PC", AckPolicy: nats.AckExplicitPolicy}) 14318 require_NoError(t, err) 14319 14320 // Now access from the importing account. 14321 nc2, _ := jsClientConnect(t, s, nats.UserInfo("ik", "bar")) 14322 defer nc2.Close() 14323 14324 req := &JSApiConsumerGetNextRequest{Batch: 1} 14325 jreq, err := json.Marshal(req) 14326 require_NoError(t, err) 14327 rsubj := fmt.Sprintf(JSApiRequestNextT, "PC", "PC") 14328 sub, err := nc2.SubscribeSync("xx") 14329 require_NoError(t, err) 14330 14331 wg := sync.WaitGroup{} 14332 wg.Add(1) 14333 go func() { 14334 time.Sleep(5 * time.Millisecond) 14335 sub.Unsubscribe() 14336 wg.Done() 14337 }() 14338 for i := 0; i < toSend; i++ { 14339 nc2.PublishRequest(rsubj, "xx", jreq) 14340 } 14341 wg.Wait() 14342 } 14343 // Need to rerun this test several times to get the race (which then would possible be panic 14344 // such as: "fatal error: concurrent map read and map write" 14345 for iter := 0; iter < 10; iter++ { 14346 test() 14347 } 14348 } 14349 14350 // This tests account export/import replies across a LN connection with account import/export 14351 // on both sides of the LN. 14352 func TestJetStreamPullConsumerCrossAccountsAndLeafNodes(t *testing.T) { 14353 conf := createConfFile(t, []byte(fmt.Sprintf(` 14354 server_name: SJS 14355 listen: 127.0.0.1:-1 14356 jetstream: {max_mem_store: 4GB, max_file_store: 1TB, domain: JSD, store_dir: %q } 14357 accounts: { 14358 JS: { 14359 jetstream: enabled 14360 users: [ {user: dlc, password: foo} ] 14361 exports [ { service: "$JS.API.CONSUMER.MSG.NEXT.>", response: stream } ] 14362 }, 14363 IU: { 14364 users: [ {user: mh, password: bar} ] 14365 imports [ { service: { subject: "$JS.API.CONSUMER.MSG.NEXT.*.*", account: JS } }] 14366 }, 14367 } 14368 leaf { listen: "127.0.0.1:-1" } 14369 `, t.TempDir()))) 14370 14371 s, o := RunServerWithConfig(conf) 14372 defer s.Shutdown() 14373 14374 conf2 := createConfFile(t, []byte(fmt.Sprintf(` 14375 server_name: SLN 14376 listen: 127.0.0.1:-1 14377 accounts: { 14378 A: { 14379 users: [ {user: l, password: p} ] 14380 exports [ { service: "$JS.JSD.API.CONSUMER.MSG.NEXT.>", response: stream } ] 14381 }, 14382 B: { 14383 users: [ {user: m, password: p} ] 14384 imports [ { service: { subject: "$JS.JSD.API.CONSUMER.MSG.NEXT.*.*", account: A } }] 14385 }, 14386 } 14387 # bind local A to IU account on other side of LN. 14388 leaf { remotes [ { url: nats://mh:bar@127.0.0.1:%d; account: A } ] } 14389 `, o.LeafNode.Port))) 14390 14391 s2, _ := RunServerWithConfig(conf2) 14392 defer s2.Shutdown() 14393 14394 checkLeafNodeConnectedCount(t, s, 1) 14395 14396 // Connect to JS account, create stream and consumer and put in some messages. 14397 nc, js := jsClientConnect(t, s, nats.UserInfo("dlc", "foo")) 14398 defer nc.Close() 14399 14400 _, err := js.AddStream(&nats.StreamConfig{Name: "PC", Subjects: []string{"foo"}}) 14401 require_NoError(t, err) 14402 14403 toSend := 10 14404 for i := 0; i < toSend; i++ { 14405 _, err := js.Publish("foo", []byte("OK")) 14406 require_NoError(t, err) 14407 } 14408 14409 // Now create durable pull consumer. 14410 _, err = js.AddConsumer("PC", &nats.ConsumerConfig{Durable: "PC", AckPolicy: nats.AckExplicitPolicy}) 14411 require_NoError(t, err) 14412 14413 // Now access from the account on the leafnode, so importing on both sides and crossing a leafnode connection. 14414 nc2, _ := jsClientConnect(t, s2, nats.UserInfo("m", "p")) 14415 defer nc2.Close() 14416 14417 req := &JSApiConsumerGetNextRequest{Batch: toSend, Expires: 500 * time.Millisecond} 14418 jreq, err := json.Marshal(req) 14419 require_NoError(t, err) 14420 14421 // Make sure we can get a batch correctly etc. 14422 // This requires response stream above in the export definition. 14423 sub, err := nc2.SubscribeSync("xx") 14424 require_NoError(t, err) 14425 14426 rsubj := "$JS.JSD.API.CONSUMER.MSG.NEXT.PC.PC" 14427 err = nc2.PublishRequest(rsubj, "xx", jreq) 14428 require_NoError(t, err) 14429 checkSubsPending(t, sub, 10) 14430 14431 // Queue up a bunch of requests. 14432 for i := 0; i < 10; i++ { 14433 err = nc2.PublishRequest(rsubj, "xx", jreq) 14434 require_NoError(t, err) 14435 } 14436 checkFor(t, 5*time.Second, 10*time.Millisecond, func() error { 14437 ci, err := js.ConsumerInfo("PC", "PC") 14438 require_NoError(t, err) 14439 if ci.NumWaiting != 10 { 14440 return fmt.Errorf("Expected to see 10 waiting requests, got %d", ci.NumWaiting) 14441 } 14442 return nil 14443 }) 14444 14445 // Remove interest. 14446 sub.Unsubscribe() 14447 // Make sure requests go away eventually after they expire. 14448 checkFor(t, 5*time.Second, 10*time.Millisecond, func() error { 14449 ci, err := js.ConsumerInfo("PC", "PC") 14450 require_NoError(t, err) 14451 if ci.NumWaiting != 0 { 14452 return fmt.Errorf("Expected to see no waiting requests, got %d", ci.NumWaiting) 14453 } 14454 return nil 14455 }) 14456 } 14457 14458 // This test is to explicitly test for all combinations of pull consumer behavior. 14459 // 1. Long poll, will be used to emulate push. A request is only invalidated when batch is filled, it expires, or we lose interest. 14460 // 2. Batch 1, will return no messages or a message. Works today. 14461 // 3. Conditional wait, or one shot. This is what the clients do when the do a fetch(). 14462 // They expect to wait up to a given time for any messages but will return once they have any to deliver, so parital fills. 14463 // 4. Try, which never waits at all ever. 14464 func TestJetStreamPullConsumersOneShotBehavior(t *testing.T) { 14465 s := RunBasicJetStreamServer(t) 14466 defer s.Shutdown() 14467 14468 // Client for API requests. 14469 nc, js := jsClientConnect(t, s) 14470 defer nc.Close() 14471 14472 _, err := js.AddStream(&nats.StreamConfig{ 14473 Name: "TEST", 14474 Subjects: []string{"foo"}, 14475 }) 14476 require_NoError(t, err) 14477 14478 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 14479 Durable: "dlc", 14480 AckPolicy: nats.AckExplicitPolicy, 14481 FilterSubject: "foo", 14482 }) 14483 require_NoError(t, err) 14484 14485 // We will do low level requests by hand for this test as to not depend on any client impl. 14486 rsubj := fmt.Sprintf(JSApiRequestNextT, "TEST", "dlc") 14487 14488 getNext := func(batch int, expires time.Duration, noWait bool) (numMsgs int, elapsed time.Duration, hdr *nats.Header) { 14489 t.Helper() 14490 req := &JSApiConsumerGetNextRequest{Batch: batch, Expires: expires, NoWait: noWait} 14491 jreq, err := json.Marshal(req) 14492 require_NoError(t, err) 14493 // Create listener. 14494 reply, msgs := nats.NewInbox(), make(chan *nats.Msg, batch) 14495 sub, err := nc.ChanSubscribe(reply, msgs) 14496 require_NoError(t, err) 14497 defer sub.Unsubscribe() 14498 14499 // Send request. 14500 start := time.Now() 14501 err = nc.PublishRequest(rsubj, reply, jreq) 14502 require_NoError(t, err) 14503 14504 for { 14505 select { 14506 case m := <-msgs: 14507 if len(m.Data) == 0 && m.Header != nil { 14508 return numMsgs, time.Since(start), &m.Header 14509 } 14510 numMsgs++ 14511 if numMsgs >= batch { 14512 return numMsgs, time.Since(start), nil 14513 } 14514 case <-time.After(expires + 250*time.Millisecond): 14515 t.Fatalf("Did not receive all the msgs in time") 14516 } 14517 } 14518 } 14519 14520 expect := func(batch int, expires time.Duration, noWait bool, ne int, he *nats.Header, lt time.Duration, gt time.Duration) { 14521 t.Helper() 14522 n, e, h := getNext(batch, expires, noWait) 14523 if n != ne { 14524 t.Fatalf("Expected %d msgs, got %d", ne, n) 14525 } 14526 if !reflect.DeepEqual(h, he) { 14527 t.Fatalf("Expected %+v hdr, got %+v", he, h) 14528 } 14529 if lt > 0 && e > lt { 14530 t.Fatalf("Expected elapsed of %v to be less than %v", e, lt) 14531 } 14532 if gt > 0 && e < gt { 14533 t.Fatalf("Expected elapsed of %v to be greater than %v", e, gt) 14534 } 14535 } 14536 expectAfter := func(batch int, expires time.Duration, noWait bool, ne int, he *nats.Header, gt time.Duration) { 14537 t.Helper() 14538 expect(batch, expires, noWait, ne, he, 0, gt) 14539 } 14540 expectInstant := func(batch int, expires time.Duration, noWait bool, ne int, he *nats.Header) { 14541 t.Helper() 14542 expect(batch, expires, noWait, ne, he, 5*time.Millisecond, 0) 14543 } 14544 expectOK := func(batch int, expires time.Duration, noWait bool, ne int) { 14545 t.Helper() 14546 expectInstant(batch, expires, noWait, ne, nil) 14547 } 14548 14549 noMsgs := &nats.Header{"Status": []string{"404"}, "Description": []string{"No Messages"}} 14550 reqTimeout := &nats.Header{"Status": []string{"408"}, "Description": []string{"Request Timeout"}, "Nats-Pending-Bytes": []string{"0"}, "Nats-Pending-Messages": []string{"1"}} 14551 14552 // We are empty here, meaning no messages available. 14553 // Do not wait, should get noMsgs. 14554 expectInstant(1, 0, true, 0, noMsgs) 14555 // We should wait here for the full second. 14556 expectAfter(1, 250*time.Millisecond, false, 0, reqTimeout, 250*time.Millisecond) 14557 // This should also wait since no messages are available. This is the one shot scenario, or wait for at least a message if none are there. 14558 expectAfter(1, 500*time.Millisecond, true, 0, reqTimeout, 500*time.Millisecond) 14559 14560 // Now let's put some messages into the system. 14561 for i := 0; i < 20; i++ { 14562 _, err := js.Publish("foo", []byte("HELLO")) 14563 require_NoError(t, err) 14564 } 14565 14566 // Now run same 3 scenarios. 14567 expectOK(1, 0, true, 1) 14568 expectOK(5, 500*time.Millisecond, false, 5) 14569 expectOK(5, 500*time.Millisecond, true, 5) 14570 } 14571 14572 func TestJetStreamPullConsumersMultipleRequestsExpireOutOfOrder(t *testing.T) { 14573 s := RunBasicJetStreamServer(t) 14574 defer s.Shutdown() 14575 14576 // Client for API requests. 14577 nc, js := jsClientConnect(t, s) 14578 defer nc.Close() 14579 14580 _, err := js.AddStream(&nats.StreamConfig{ 14581 Name: "TEST", 14582 Subjects: []string{"foo"}, 14583 }) 14584 require_NoError(t, err) 14585 14586 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 14587 Durable: "dlc", 14588 AckPolicy: nats.AckExplicitPolicy, 14589 FilterSubject: "foo", 14590 }) 14591 require_NoError(t, err) 14592 14593 // We will now queue up 4 requests. All should expire but they will do so out of order. 14594 // We want to make sure we get them in correct order. 14595 rsubj := fmt.Sprintf(JSApiRequestNextT, "TEST", "dlc") 14596 sub, err := nc.SubscribeSync("i.*") 14597 require_NoError(t, err) 14598 defer sub.Unsubscribe() 14599 14600 for _, expires := range []time.Duration{200, 100, 25, 75} { 14601 reply := fmt.Sprintf("i.%d", expires) 14602 req := &JSApiConsumerGetNextRequest{Expires: expires * time.Millisecond} 14603 jreq, err := json.Marshal(req) 14604 require_NoError(t, err) 14605 err = nc.PublishRequest(rsubj, reply, jreq) 14606 require_NoError(t, err) 14607 } 14608 start := time.Now() 14609 checkSubsPending(t, sub, 4) 14610 elapsed := time.Since(start) 14611 14612 if elapsed < 200*time.Millisecond || elapsed > 500*time.Millisecond { 14613 t.Fatalf("Expected elapsed to be close to %v, but got %v", 200*time.Millisecond, elapsed) 14614 } 14615 14616 var rs []string 14617 for i := 0; i < 4; i++ { 14618 m, err := sub.NextMsg(0) 14619 require_NoError(t, err) 14620 rs = append(rs, m.Subject) 14621 } 14622 if expected := []string{"i.25", "i.75", "i.100", "i.200"}; !reflect.DeepEqual(rs, expected) { 14623 t.Fatalf("Received in wrong order, wanted %+v, got %+v", expected, rs) 14624 } 14625 } 14626 14627 func TestJetStreamConsumerUpdateSurvival(t *testing.T) { 14628 s := RunBasicJetStreamServer(t) 14629 defer s.Shutdown() 14630 14631 nc, js := jsClientConnect(t, s) 14632 defer nc.Close() 14633 14634 _, err := js.AddStream(&nats.StreamConfig{Name: "X"}) 14635 require_NoError(t, err) 14636 14637 // First create a consumer with max ack pending. 14638 _, err = js.AddConsumer("X", &nats.ConsumerConfig{ 14639 Durable: "dlc", 14640 AckPolicy: nats.AckExplicitPolicy, 14641 MaxAckPending: 1024, 14642 }) 14643 require_NoError(t, err) 14644 14645 // Now do same name but pull. This will update the MaxAcKPending 14646 ci, err := js.UpdateConsumer("X", &nats.ConsumerConfig{ 14647 Durable: "dlc", 14648 AckPolicy: nats.AckExplicitPolicy, 14649 MaxAckPending: 22, 14650 }) 14651 require_NoError(t, err) 14652 14653 if ci.Config.MaxAckPending != 22 { 14654 t.Fatalf("Expected MaxAckPending to be 22, got %d", ci.Config.MaxAckPending) 14655 } 14656 14657 // Make sure this survives across a restart. 14658 sd := s.JetStreamConfig().StoreDir 14659 s.Shutdown() 14660 // Restart. 14661 s = RunJetStreamServerOnPort(-1, sd) 14662 defer s.Shutdown() 14663 14664 nc, js = jsClientConnect(t, s) 14665 defer nc.Close() 14666 14667 ci, err = js.ConsumerInfo("X", "dlc") 14668 require_NoError(t, err) 14669 14670 if ci.Config.MaxAckPending != 22 { 14671 t.Fatalf("Expected MaxAckPending to be 22, got %d", ci.Config.MaxAckPending) 14672 } 14673 } 14674 14675 func TestJetStreamNakRedeliveryWithNoWait(t *testing.T) { 14676 s := RunBasicJetStreamServer(t) 14677 defer s.Shutdown() 14678 14679 nc, js := jsClientConnect(t, s) 14680 defer nc.Close() 14681 14682 _, err := js.AddStream(&nats.StreamConfig{ 14683 Name: "TEST", 14684 Subjects: []string{"foo"}, 14685 }) 14686 require_NoError(t, err) 14687 14688 _, err = js.Publish("foo", []byte("NAK")) 14689 require_NoError(t, err) 14690 14691 ccReq := &CreateConsumerRequest{ 14692 Stream: "TEST", 14693 Config: ConsumerConfig{ 14694 Durable: "dlc", 14695 AckPolicy: AckExplicit, 14696 MaxDeliver: 3, 14697 AckWait: time.Minute, 14698 BackOff: []time.Duration{5 * time.Second, 10 * time.Second}, 14699 }, 14700 } 14701 // Do by hand for now until Go client catches up. 14702 req, err := json.Marshal(ccReq) 14703 require_NoError(t, err) 14704 resp, err := nc.Request(fmt.Sprintf(JSApiDurableCreateT, "TEST", "dlc"), req, time.Second) 14705 require_NoError(t, err) 14706 var ccResp JSApiConsumerCreateResponse 14707 err = json.Unmarshal(resp.Data, &ccResp) 14708 require_NoError(t, err) 14709 if ccResp.Error != nil { 14710 t.Fatalf("Unexpected error: %+v", ccResp.Error) 14711 } 14712 14713 rsubj := fmt.Sprintf(JSApiRequestNextT, "TEST", "dlc") 14714 m, err := nc.Request(rsubj, nil, time.Second) 14715 require_NoError(t, err) 14716 14717 // NAK this message. 14718 delay, err := json.Marshal(&ConsumerNakOptions{Delay: 500 * time.Millisecond}) 14719 require_NoError(t, err) 14720 dnak := []byte(fmt.Sprintf("%s %s", AckNak, delay)) 14721 m.Respond(dnak) 14722 14723 // This message should come back to us after 500ms. If we do a one-shot request, with NoWait and Expires 14724 // this will do the right thing and we get the message. 14725 // What we want to test here is a true NoWait request with Expires==0 and eventually seeing the message be redelivered. 14726 expires := time.Now().Add(time.Second) 14727 for time.Now().Before(expires) { 14728 m, err = nc.Request(rsubj, []byte(`{"batch":1, "no_wait": true}`), time.Second) 14729 require_NoError(t, err) 14730 if len(m.Data) > 0 { 14731 // We got our message, so we are good. 14732 return 14733 } 14734 // So we do not spin. 14735 time.Sleep(100 * time.Millisecond) 14736 } 14737 t.Fatalf("Did not get the message in time") 14738 } 14739 14740 // Test that we properly enforce per subject msg limits when DiscardNew is set. 14741 // DiscardNew should only apply to stream limits, subject based limits should always be DiscardOld. 14742 func TestJetStreamMaxMsgsPerSubjectWithDiscardNew(t *testing.T) { 14743 msc := StreamConfig{ 14744 Name: "TEST", 14745 Subjects: []string{"foo", "bar", "baz", "x"}, 14746 Discard: DiscardNew, 14747 Storage: MemoryStorage, 14748 MaxMsgsPer: 4, 14749 MaxMsgs: 10, 14750 MaxBytes: 500, 14751 } 14752 fsc := msc 14753 fsc.Storage = FileStorage 14754 14755 cases := []struct { 14756 name string 14757 mconfig *StreamConfig 14758 }{ 14759 {"MemoryStore", &msc}, 14760 {"FileStore", &fsc}, 14761 } 14762 14763 for _, c := range cases { 14764 t.Run(c.name, func(t *testing.T) { 14765 s := RunBasicJetStreamServer(t) 14766 defer s.Shutdown() 14767 14768 mset, err := s.GlobalAccount().addStream(c.mconfig) 14769 require_NoError(t, err) 14770 defer mset.delete() 14771 14772 // Client for API requests. 14773 nc, js := jsClientConnect(t, s) 14774 defer nc.Close() 14775 14776 pubAndCheck := func(subj string, num int, expectedNumMsgs uint64) { 14777 t.Helper() 14778 for i := 0; i < num; i++ { 14779 _, err = js.Publish(subj, []byte("TSLA")) 14780 require_NoError(t, err) 14781 } 14782 si, err := js.StreamInfo("TEST") 14783 require_NoError(t, err) 14784 if si.State.Msgs != expectedNumMsgs { 14785 t.Fatalf("Expected %d msgs, got %d", expectedNumMsgs, si.State.Msgs) 14786 } 14787 } 14788 14789 pubExpectErr := func(subj string, sz int) { 14790 t.Helper() 14791 _, err = js.Publish(subj, bytes.Repeat([]byte("X"), sz)) 14792 require_Error(t, err, errors.New("nats: maximum bytes exceeded"), errors.New("nats: maximum messages exceeded")) 14793 } 14794 14795 pubAndCheck("foo", 1, 1) 14796 // We should treat this as DiscardOld and only have 4 msgs after. 14797 pubAndCheck("foo", 4, 4) 14798 // Same thing here, shoud only have 4 foo and 4 bar for total of 8. 14799 pubAndCheck("bar", 8, 8) 14800 // We have 8 here, so only 2 left. If we add in a new subject when we have room it will be accepted. 14801 pubAndCheck("baz", 2, 10) 14802 // Now we are full, but makeup is foo-4 bar-4 baz-2. 14803 // We can add to foo and bar since they are at their max and adding new ones there stays the same in terms of total of 10. 14804 pubAndCheck("foo", 1, 10) 14805 pubAndCheck("bar", 1, 10) 14806 // Try to send a large message under an established subject that will exceed the 500 maximum. 14807 // Even though we have a bar subject and its at its maximum, the message to be dropped is not big enough, so this should err. 14808 pubExpectErr("bar", 300) 14809 // Also even though we have room bytes wise, if we introduce a new subject this should fail too on msg limit exceeded. 14810 pubExpectErr("x", 2) 14811 }) 14812 } 14813 } 14814 14815 func TestJetStreamStreamInfoSubjectsDetails(t *testing.T) { 14816 s := RunBasicJetStreamServer(t) 14817 defer s.Shutdown() 14818 14819 nc, js := jsClientConnect(t, s) 14820 defer nc.Close() 14821 14822 getInfo := func(t *testing.T, filter string) *StreamInfo { 14823 t.Helper() 14824 // Need to grab StreamInfo by hand for now. 14825 req, err := json.Marshal(&JSApiStreamInfoRequest{SubjectsFilter: filter}) 14826 require_NoError(t, err) 14827 resp, err := nc.Request(fmt.Sprintf(JSApiStreamInfoT, "TEST"), req, time.Second) 14828 require_NoError(t, err) 14829 var si StreamInfo 14830 err = json.Unmarshal(resp.Data, &si) 14831 require_NoError(t, err) 14832 if si.State.NumSubjects != 3 { 14833 t.Fatalf("Expected NumSubjects to be 3, but got %d", si.State.NumSubjects) 14834 } 14835 return &si 14836 } 14837 14838 testSubjects := func(t *testing.T, st nats.StorageType) { 14839 _, err := js.AddStream(&nats.StreamConfig{ 14840 Name: "TEST", 14841 Subjects: []string{"*"}, 14842 Storage: st, 14843 }) 14844 require_NoError(t, err) 14845 defer js.DeleteStream("TEST") 14846 14847 counts, msg := []int{22, 33, 44}, []byte("ok") 14848 // Now place msgs, foo-22, bar-33 and baz-44. 14849 for i, subj := range []string{"foo", "bar", "baz"} { 14850 for n := 0; n < counts[i]; n++ { 14851 _, err = js.Publish(subj, msg) 14852 require_NoError(t, err) 14853 } 14854 } 14855 14856 // Test all subjects first. 14857 expected := map[string]uint64{"foo": 22, "bar": 33, "baz": 44} 14858 if si := getInfo(t, nats.AllKeys); !reflect.DeepEqual(si.State.Subjects, expected) { 14859 t.Fatalf("Expected subjects of %+v, but got %+v", expected, si.State.Subjects) 14860 } 14861 if si := getInfo(t, "*"); !reflect.DeepEqual(si.State.Subjects, expected) { 14862 t.Fatalf("Expected subjects of %+v, but got %+v", expected, si.State.Subjects) 14863 } 14864 // Filtered to 1. 14865 expected = map[string]uint64{"foo": 22} 14866 if si := getInfo(t, "foo"); !reflect.DeepEqual(si.State.Subjects, expected) { 14867 t.Fatalf("Expected subjects of %+v, but got %+v", expected, si.State.Subjects) 14868 } 14869 } 14870 14871 t.Run("MemoryStore", func(t *testing.T) { testSubjects(t, nats.MemoryStorage) }) 14872 t.Run("FileStore", func(t *testing.T) { testSubjects(t, nats.FileStorage) }) 14873 } 14874 14875 func TestJetStreamStreamInfoSubjectsDetailsWithDeleteAndPurge(t *testing.T) { 14876 s := RunBasicJetStreamServer(t) 14877 defer s.Shutdown() 14878 14879 nc, js := jsClientConnect(t, s) 14880 defer nc.Close() 14881 14882 getInfo := func(t *testing.T, filter string) *StreamInfo { 14883 t.Helper() 14884 // Need to grab StreamInfo by hand for now. 14885 req, err := json.Marshal(&JSApiStreamInfoRequest{SubjectsFilter: filter}) 14886 require_NoError(t, err) 14887 resp, err := nc.Request(fmt.Sprintf(JSApiStreamInfoT, "TEST"), req, time.Second) 14888 require_NoError(t, err) 14889 var si StreamInfo 14890 err = json.Unmarshal(resp.Data, &si) 14891 require_NoError(t, err) 14892 return &si 14893 } 14894 14895 checkResults := func(t *testing.T, expected map[string]uint64) { 14896 t.Helper() 14897 si := getInfo(t, nats.AllKeys) 14898 if !reflect.DeepEqual(si.State.Subjects, expected) { 14899 t.Fatalf("Expected subjects of %+v, but got %+v", expected, si.State.Subjects) 14900 } 14901 if si.State.NumSubjects != len(expected) { 14902 t.Fatalf("Expected NumSubjects to be %d, but got %d", len(expected), si.State.NumSubjects) 14903 } 14904 } 14905 14906 testSubjects := func(t *testing.T, st nats.StorageType) { 14907 _, err := js.AddStream(&nats.StreamConfig{ 14908 Name: "TEST", 14909 Subjects: []string{"*"}, 14910 Storage: st, 14911 }) 14912 require_NoError(t, err) 14913 defer js.DeleteStream("TEST") 14914 14915 msg := []byte("ok") 14916 js.Publish("foo", msg) // 1 14917 js.Publish("foo", msg) // 2 14918 js.Publish("bar", msg) // 3 14919 js.Publish("baz", msg) // 4 14920 js.Publish("baz", msg) // 5 14921 js.Publish("bar", msg) // 6 14922 js.Publish("bar", msg) // 7 14923 14924 checkResults(t, map[string]uint64{"foo": 2, "bar": 3, "baz": 2}) 14925 14926 // Now delete some messages. 14927 js.DeleteMsg("TEST", 6) 14928 14929 checkResults(t, map[string]uint64{"foo": 2, "bar": 2, "baz": 2}) 14930 14931 // Delete and add right back, so no-op 14932 js.DeleteMsg("TEST", 5) // baz 14933 js.Publish("baz", msg) // 8 14934 14935 checkResults(t, map[string]uint64{"foo": 2, "bar": 2, "baz": 2}) 14936 14937 // Now do a purge only of bar. 14938 jr, _ := json.Marshal(&JSApiStreamPurgeRequest{Subject: "bar"}) 14939 _, err = nc.Request(fmt.Sprintf(JSApiStreamPurgeT, "TEST"), jr, time.Second) 14940 require_NoError(t, err) 14941 14942 checkResults(t, map[string]uint64{"foo": 2, "baz": 2}) 14943 14944 // Now purge everything 14945 err = js.PurgeStream("TEST") 14946 require_NoError(t, err) 14947 14948 si := getInfo(t, nats.AllKeys) 14949 if len(si.State.Subjects) != 0 { 14950 t.Fatalf("Expected no subjects, but got %+v", si.State.Subjects) 14951 } 14952 if si.State.NumSubjects != 0 { 14953 t.Fatalf("Expected NumSubjects to be 0, but got %d", si.State.NumSubjects) 14954 } 14955 } 14956 14957 t.Run("MemoryStore", func(t *testing.T) { testSubjects(t, nats.MemoryStorage) }) 14958 t.Run("FileStore", func(t *testing.T) { testSubjects(t, nats.FileStorage) }) 14959 } 14960 14961 func TestJetStreamStreamInfoSubjectsDetailsAfterRestart(t *testing.T) { 14962 s := RunBasicJetStreamServer(t) 14963 defer s.Shutdown() 14964 14965 nc, js := jsClientConnect(t, s) 14966 defer nc.Close() 14967 14968 getInfo := func(t *testing.T, filter string) *StreamInfo { 14969 t.Helper() 14970 // Need to grab StreamInfo by hand for now. 14971 req, err := json.Marshal(&JSApiStreamInfoRequest{SubjectsFilter: filter}) 14972 require_NoError(t, err) 14973 resp, err := nc.Request(fmt.Sprintf(JSApiStreamInfoT, "TEST"), req, time.Second) 14974 require_NoError(t, err) 14975 var si StreamInfo 14976 err = json.Unmarshal(resp.Data, &si) 14977 require_NoError(t, err) 14978 return &si 14979 } 14980 14981 _, err := js.AddStream(&nats.StreamConfig{ 14982 Name: "TEST", 14983 Subjects: []string{"*"}, 14984 }) 14985 require_NoError(t, err) 14986 defer js.DeleteStream("TEST") 14987 14988 msg := []byte("ok") 14989 js.Publish("foo", msg) // 1 14990 js.Publish("foo", msg) // 2 14991 js.Publish("bar", msg) // 3 14992 js.Publish("baz", msg) // 4 14993 js.Publish("baz", msg) // 5 14994 14995 si := getInfo(t, nats.AllKeys) 14996 if si.State.NumSubjects != 3 { 14997 t.Fatalf("Expected 3 subjects, but got %d", si.State.NumSubjects) 14998 } 14999 15000 // Stop current 15001 nc.Close() 15002 sd := s.JetStreamConfig().StoreDir 15003 s.Shutdown() 15004 // Restart. 15005 s = RunJetStreamServerOnPort(-1, sd) 15006 defer s.Shutdown() 15007 15008 nc, _ = jsClientConnect(t, s) 15009 defer nc.Close() 15010 15011 si = getInfo(t, nats.AllKeys) 15012 if si.State.NumSubjects != 3 { 15013 t.Fatalf("Expected 3 subjects, but got %d", si.State.NumSubjects) 15014 } 15015 } 15016 15017 // Issue #2836 15018 func TestJetStreamInterestRetentionBug(t *testing.T) { 15019 s := RunBasicJetStreamServer(t) 15020 defer s.Shutdown() 15021 15022 nc, js := jsClientConnect(t, s) 15023 defer nc.Close() 15024 15025 _, err := js.AddStream(&nats.StreamConfig{ 15026 Name: "TEST", 15027 Subjects: []string{"foo.>"}, 15028 Retention: nats.InterestPolicy, 15029 }) 15030 require_NoError(t, err) 15031 15032 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "c1", AckPolicy: nats.AckExplicitPolicy}) 15033 require_NoError(t, err) 15034 15035 test := func(token string, fseq, msgs uint64) { 15036 t.Helper() 15037 subj := fmt.Sprintf("foo.%s", token) 15038 _, err = js.Publish(subj, nil) 15039 require_NoError(t, err) 15040 si, err := js.StreamInfo("TEST") 15041 require_NoError(t, err) 15042 if si.State.FirstSeq != fseq { 15043 t.Fatalf("Expected first to be %d, got %d", fseq, si.State.FirstSeq) 15044 } 15045 if si.State.Msgs != msgs { 15046 t.Fatalf("Expected msgs to be %d, got %d", msgs, si.State.Msgs) 15047 } 15048 } 15049 15050 test("bar", 1, 1) 15051 15052 // Create second filtered consumer. 15053 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "c2", FilterSubject: "foo.foo", AckPolicy: nats.AckExplicitPolicy}) 15054 require_NoError(t, err) 15055 15056 test("bar", 1, 2) 15057 } 15058 15059 // Under load testing for K3S and the KINE interface we saw some stalls. 15060 // These were caused by a dynamic that would not send a second FC item with one 15061 // pending, but when we sent the next message and got blocked, if that msg would 15062 // exceed the outstanding FC we would become stalled. 15063 func TestJetStreamFlowControlStall(t *testing.T) { 15064 s := RunBasicJetStreamServer(t) 15065 defer s.Shutdown() 15066 15067 nc, js := jsClientConnect(t, s) 15068 defer nc.Close() 15069 15070 _, err := js.AddStream(&nats.StreamConfig{Name: "FC"}) 15071 require_NoError(t, err) 15072 15073 msg := []byte(strings.Repeat("X", 32768)) 15074 _, err = js.Publish("FC", msg) 15075 require_NoError(t, err) 15076 15077 msg = []byte(strings.Repeat("X", 8*32768)) 15078 _, err = js.Publish("FC", msg) 15079 require_NoError(t, err) 15080 _, err = js.Publish("FC", msg) 15081 require_NoError(t, err) 15082 15083 sub, err := js.SubscribeSync("FC", nats.OrderedConsumer()) 15084 require_NoError(t, err) 15085 15086 checkSubsPending(t, sub, 3) 15087 } 15088 15089 func TestJetStreamConsumerPendingCountWithRedeliveries(t *testing.T) { 15090 s := RunBasicJetStreamServer(t) 15091 defer s.Shutdown() 15092 15093 nc, js := jsClientConnect(t, s) 15094 defer nc.Close() 15095 15096 _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"foo"}}) 15097 require_NoError(t, err) 15098 15099 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 15100 Durable: "test", 15101 AckPolicy: nats.AckExplicitPolicy, 15102 AckWait: 50 * time.Millisecond, 15103 MaxDeliver: 1, 15104 }) 15105 require_NoError(t, err) 15106 15107 // Publish 1st message 15108 _, err = js.Publish("foo", []byte("msg1")) 15109 require_NoError(t, err) 15110 15111 sub, err := js.PullSubscribe("foo", "test") 15112 require_NoError(t, err) 15113 msgs, err := sub.Fetch(1) 15114 require_NoError(t, err) 15115 for _, m := range msgs { 15116 require_Equal(t, string(m.Data), "msg1") 15117 // Do not ack the message 15118 } 15119 // Check consumer info, pending should be 0 15120 ci, err := js.ConsumerInfo("TEST", "test") 15121 require_NoError(t, err) 15122 if ci.NumPending != 0 { 15123 t.Fatalf("Expected consumer info pending count to be 0, got %v", ci.NumPending) 15124 } 15125 15126 // Wait for more than expiration 15127 time.Sleep(100 * time.Millisecond) 15128 15129 // Publish 2nd message 15130 _, err = js.Publish("foo", []byte("msg2")) 15131 require_NoError(t, err) 15132 15133 msgs, err = sub.Fetch(1) 15134 require_NoError(t, err) 15135 for _, m := range msgs { 15136 require_Equal(t, string(m.Data), "msg2") 15137 // Its deliver count should be 1 15138 meta, err := m.Metadata() 15139 require_NoError(t, err) 15140 if meta.NumDelivered != 1 { 15141 t.Fatalf("Expected message's deliver count to be 1, got %v", meta.NumDelivered) 15142 } 15143 } 15144 // Check consumer info, pending should be 0 15145 ci, err = js.ConsumerInfo("TEST", "test") 15146 require_NoError(t, err) 15147 if ci.NumPending != 0 { 15148 t.Fatalf("Expected consumer info pending count to be 0, got %v", ci.NumPending) 15149 } 15150 } 15151 15152 func TestJetStreamPullConsumerHeartBeats(t *testing.T) { 15153 s := RunBasicJetStreamServer(t) 15154 defer s.Shutdown() 15155 15156 nc, js := jsClientConnect(t, s) 15157 defer nc.Close() 15158 15159 _, err := js.AddStream(&nats.StreamConfig{Name: "T", Storage: nats.MemoryStorage}) 15160 require_NoError(t, err) 15161 15162 _, err = js.AddConsumer("T", &nats.ConsumerConfig{Durable: "dlc", AckPolicy: nats.AckExplicitPolicy}) 15163 require_NoError(t, err) 15164 15165 rsubj := fmt.Sprintf(JSApiRequestNextT, "T", "dlc") 15166 15167 type tsMsg struct { 15168 received time.Time 15169 msg *nats.Msg 15170 } 15171 15172 doReq := func(batch int, hb, expires time.Duration, expected int) []*tsMsg { 15173 t.Helper() 15174 req := &JSApiConsumerGetNextRequest{Batch: batch, Expires: expires, Heartbeat: hb} 15175 jreq, err := json.Marshal(req) 15176 require_NoError(t, err) 15177 reply := nats.NewInbox() 15178 var msgs []*tsMsg 15179 var mu sync.Mutex 15180 15181 sub, err := nc.Subscribe(reply, func(m *nats.Msg) { 15182 mu.Lock() 15183 msgs = append(msgs, &tsMsg{time.Now(), m}) 15184 mu.Unlock() 15185 }) 15186 require_NoError(t, err) 15187 15188 err = nc.PublishRequest(rsubj, reply, jreq) 15189 require_NoError(t, err) 15190 checkFor(t, time.Second, 50*time.Millisecond, func() error { 15191 mu.Lock() 15192 nr := len(msgs) 15193 mu.Unlock() 15194 if nr >= expected { 15195 return nil 15196 } 15197 return fmt.Errorf("Only have seen %d of %d responses", nr, expected) 15198 }) 15199 sub.Unsubscribe() 15200 return msgs 15201 } 15202 15203 reqBad := nats.Header{"Status": []string{"400"}, "Description": []string{"Bad Request - heartbeat value too large"}} 15204 expectErr := func(msgs []*tsMsg) { 15205 t.Helper() 15206 if len(msgs) != 1 { 15207 t.Fatalf("Expected 1 msg, got %d", len(msgs)) 15208 } 15209 if !reflect.DeepEqual(msgs[0].msg.Header, reqBad) { 15210 t.Fatalf("Expected %+v hdr, got %+v", reqBad, msgs[0].msg.Header) 15211 } 15212 } 15213 15214 // Test errors first. 15215 // Setting HB with no expires. 15216 expectErr(doReq(1, 100*time.Millisecond, 0, 1)) 15217 // If HB larger than 50% of expires.. 15218 expectErr(doReq(1, 75*time.Millisecond, 100*time.Millisecond, 1)) 15219 15220 expectHBs := func(start time.Time, msgs []*tsMsg, expected int, hbi time.Duration) { 15221 t.Helper() 15222 if len(msgs) != expected { 15223 t.Fatalf("Expected %d but got %d", expected, len(msgs)) 15224 } 15225 // expected -1 should be all HBs. 15226 for i, ts := 0, start; i < expected-1; i++ { 15227 tr, m := msgs[i].received, msgs[i].msg 15228 if m.Header.Get("Status") != "100" { 15229 t.Fatalf("Expected a 100 status code, got %q", m.Header.Get("Status")) 15230 } 15231 if m.Header.Get("Description") != "Idle Heartbeat" { 15232 t.Fatalf("Wrong description, got %q", m.Header.Get("Description")) 15233 } 15234 ts = ts.Add(hbi) 15235 if tr.Before(ts) { 15236 t.Fatalf("Received at wrong time: %v vs %v", tr, ts) 15237 } 15238 } 15239 // Last msg should be timeout. 15240 lm := msgs[len(msgs)-1].msg 15241 if key := lm.Header.Get("Status"); key != "408" { 15242 t.Fatalf("Expected 408 Request Timeout, got %s", key) 15243 } 15244 } 15245 15246 // These should work. Test idle first. 15247 start, msgs := time.Now(), doReq(1, 50*time.Millisecond, 250*time.Millisecond, 5) 15248 expectHBs(start, msgs, 5, 50*time.Millisecond) 15249 15250 // Now test that we do not send heartbeats while we receive traffic. 15251 go func() { 15252 for i := 0; i < 5; i++ { 15253 time.Sleep(50 * time.Millisecond) 15254 js.Publish("T", nil) 15255 } 15256 }() 15257 15258 msgs = doReq(10, 75*time.Millisecond, 350*time.Millisecond, 6) 15259 // The first 5 should be msgs, no HBs. 15260 for i := 0; i < 5; i++ { 15261 if m := msgs[i].msg; len(m.Header) > 0 { 15262 t.Fatalf("Got a potential heartbeat msg when we should not have: %+v", m.Header) 15263 } 15264 } 15265 // Last should be timeout. 15266 lm := msgs[len(msgs)-1].msg 15267 if key := lm.Header.Get("Status"); key != "408" { 15268 t.Fatalf("Expected 408 Request Timeout, got %s", key) 15269 } 15270 } 15271 15272 func TestJetStreamStorageReservedBytes(t *testing.T) { 15273 const systemLimit = 1024 15274 opts := DefaultTestOptions 15275 opts.Port = -1 15276 opts.JetStream = true 15277 opts.JetStreamMaxMemory = systemLimit 15278 opts.JetStreamMaxStore = systemLimit 15279 opts.StoreDir = t.TempDir() 15280 opts.HTTPPort = -1 15281 s := RunServer(&opts) 15282 15283 defer s.Shutdown() 15284 15285 // Client for API requests. 15286 nc, js := jsClientConnect(t, s) 15287 defer nc.Close() 15288 15289 getJetStreamVarz := func(hc *http.Client, addr string) (JetStreamVarz, error) { 15290 resp, err := hc.Get(addr) 15291 if err != nil { 15292 return JetStreamVarz{}, err 15293 } 15294 defer resp.Body.Close() 15295 15296 var v Varz 15297 if err := json.NewDecoder(resp.Body).Decode(&v); err != nil { 15298 return JetStreamVarz{}, err 15299 } 15300 15301 return v.JetStream, nil 15302 } 15303 getReserved := func(hc *http.Client, addr string, st nats.StorageType) (uint64, error) { 15304 jsv, err := getJetStreamVarz(hc, addr) 15305 if err != nil { 15306 return 0, err 15307 } 15308 if st == nats.MemoryStorage { 15309 return jsv.Stats.ReservedMemory, nil 15310 } 15311 return jsv.Stats.ReservedStore, nil 15312 } 15313 15314 varzAddr := fmt.Sprintf("http://127.0.0.1:%d/varz", s.MonitorAddr().Port) 15315 hc := &http.Client{Timeout: 5 * time.Second} 15316 15317 jsv, err := getJetStreamVarz(hc, varzAddr) 15318 require_NoError(t, err) 15319 15320 if got, want := systemLimit, int(jsv.Config.MaxMemory); got != want { 15321 t.Fatalf("Unexpected max memory: got=%d, want=%d", got, want) 15322 } 15323 if got, want := systemLimit, int(jsv.Config.MaxStore); got != want { 15324 t.Fatalf("Unexpected max store: got=%d, want=%d", got, want) 15325 } 15326 15327 cases := []struct { 15328 name string 15329 accountLimit int64 15330 storage nats.StorageType 15331 createMaxBytes int64 15332 updateMaxBytes int64 15333 wantUpdateError bool 15334 }{ 15335 { 15336 name: "file reserve 66% of system limit", 15337 accountLimit: -1, 15338 storage: nats.FileStorage, 15339 createMaxBytes: int64(math.Round(float64(systemLimit) * .666)), 15340 updateMaxBytes: int64(math.Round(float64(systemLimit)*.666)) + 1, 15341 }, 15342 { 15343 name: "memory reserve 66% of system limit", 15344 accountLimit: -1, 15345 storage: nats.MemoryStorage, 15346 createMaxBytes: int64(math.Round(float64(systemLimit) * .666)), 15347 updateMaxBytes: int64(math.Round(float64(systemLimit)*.666)) + 1, 15348 }, 15349 { 15350 name: "file update past system limit", 15351 accountLimit: -1, 15352 storage: nats.FileStorage, 15353 createMaxBytes: systemLimit, 15354 updateMaxBytes: systemLimit + 1, 15355 wantUpdateError: true, 15356 }, 15357 { 15358 name: "memory update past system limit", 15359 accountLimit: -1, 15360 storage: nats.MemoryStorage, 15361 createMaxBytes: systemLimit, 15362 updateMaxBytes: systemLimit + 1, 15363 wantUpdateError: true, 15364 }, 15365 { 15366 name: "file update to system limit", 15367 accountLimit: -1, 15368 storage: nats.FileStorage, 15369 createMaxBytes: systemLimit - 1, 15370 updateMaxBytes: systemLimit, 15371 }, 15372 { 15373 name: "memory update to system limit", 15374 accountLimit: -1, 15375 storage: nats.MemoryStorage, 15376 createMaxBytes: systemLimit - 1, 15377 updateMaxBytes: systemLimit, 15378 }, 15379 { 15380 name: "file reserve 66% of account limit", 15381 accountLimit: systemLimit / 2, 15382 storage: nats.FileStorage, 15383 createMaxBytes: int64(math.Round(float64(systemLimit/2) * .666)), 15384 updateMaxBytes: int64(math.Round(float64(systemLimit/2)*.666)) + 1, 15385 }, 15386 { 15387 name: "memory reserve 66% of account limit", 15388 accountLimit: systemLimit / 2, 15389 storage: nats.MemoryStorage, 15390 createMaxBytes: int64(math.Round(float64(systemLimit/2) * .666)), 15391 updateMaxBytes: int64(math.Round(float64(systemLimit/2)*.666)) + 1, 15392 }, 15393 { 15394 name: "file update past account limit", 15395 accountLimit: systemLimit / 2, 15396 storage: nats.FileStorage, 15397 createMaxBytes: (systemLimit / 2), 15398 updateMaxBytes: (systemLimit / 2) + 1, 15399 wantUpdateError: true, 15400 }, 15401 { 15402 name: "memory update past account limit", 15403 accountLimit: systemLimit / 2, 15404 storage: nats.MemoryStorage, 15405 createMaxBytes: (systemLimit / 2), 15406 updateMaxBytes: (systemLimit / 2) + 1, 15407 wantUpdateError: true, 15408 }, 15409 { 15410 name: "file update to account limit", 15411 accountLimit: systemLimit / 2, 15412 storage: nats.FileStorage, 15413 createMaxBytes: (systemLimit / 2) - 1, 15414 updateMaxBytes: (systemLimit / 2), 15415 }, 15416 { 15417 name: "memory update to account limit", 15418 accountLimit: systemLimit / 2, 15419 storage: nats.MemoryStorage, 15420 createMaxBytes: (systemLimit / 2) - 1, 15421 updateMaxBytes: (systemLimit / 2), 15422 }, 15423 } 15424 for i := 0; i < len(cases) && !t.Failed(); i++ { 15425 c := cases[i] 15426 t.Run(c.name, func(st *testing.T) { 15427 // Setup limits 15428 err = s.GlobalAccount().UpdateJetStreamLimits(map[string]JetStreamAccountLimits{ 15429 _EMPTY_: { 15430 MaxMemory: c.accountLimit, 15431 MaxStore: c.accountLimit, 15432 }, 15433 }) 15434 require_NoError(st, err) 15435 15436 // Create initial stream 15437 cfg := &nats.StreamConfig{ 15438 Name: "TEST", 15439 Subjects: []string{"foo"}, 15440 Storage: c.storage, 15441 MaxBytes: c.createMaxBytes, 15442 } 15443 _, err = js.AddStream(cfg) 15444 require_NoError(st, err) 15445 15446 // Update stream MaxBytes 15447 cfg.MaxBytes = c.updateMaxBytes 15448 info, err := js.UpdateStream(cfg) 15449 if c.wantUpdateError && err == nil { 15450 got := info.Config.MaxBytes 15451 st.Fatalf("Unexpected update success, newMaxBytes=%d; systemLimit=%d; accountLimit=%d", 15452 got, systemLimit, c.accountLimit) 15453 } else if !c.wantUpdateError && err != nil { 15454 st.Fatalf("Unexpected update error: %s", err) 15455 } 15456 15457 if !c.wantUpdateError && err == nil { 15458 // If update was successful, then ensure reserved shows new 15459 // amount 15460 reserved, err := getReserved(hc, varzAddr, c.storage) 15461 require_NoError(st, err) 15462 if got, want := reserved, uint64(c.updateMaxBytes); got != want { 15463 st.Fatalf("Unexpected reserved: %d, want %d", got, want) 15464 } 15465 } 15466 15467 // Delete stream 15468 err = js.DeleteStream("TEST") 15469 require_NoError(st, err) 15470 15471 // Ensure reserved shows 0 because we've deleted the stream 15472 reserved, err := getReserved(hc, varzAddr, c.storage) 15473 require_NoError(st, err) 15474 if reserved != 0 { 15475 st.Fatalf("Unexpected reserved: %d, want 0", reserved) 15476 } 15477 }) 15478 } 15479 } 15480 15481 func TestJetStreamRestoreBadStream(t *testing.T) { 15482 s := RunBasicJetStreamServer(t) 15483 defer s.Shutdown() 15484 15485 nc, _ := jsClientConnect(t, s) 15486 defer nc.Close() 15487 15488 var rreq JSApiStreamRestoreRequest 15489 buf, err := os.ReadFile("../test/configs/jetstream/restore_bad_stream/backup.json") 15490 require_NoError(t, err) 15491 err = json.Unmarshal(buf, &rreq) 15492 require_NoError(t, err) 15493 15494 data, err := os.Open("../test/configs/jetstream/restore_bad_stream/stream.tar.s2") 15495 require_NoError(t, err) 15496 defer data.Close() 15497 15498 var rresp JSApiStreamRestoreResponse 15499 msg, err := nc.Request(fmt.Sprintf(JSApiStreamRestoreT, rreq.Config.Name), buf, 5*time.Second) 15500 require_NoError(t, err) 15501 json.Unmarshal(msg.Data, &rresp) 15502 if rresp.Error != nil { 15503 t.Fatalf("Error on restore: %+v", rresp.Error) 15504 } 15505 15506 var chunk [1024]byte 15507 for { 15508 n, err := data.Read(chunk[:]) 15509 if err == io.EOF { 15510 break 15511 } 15512 require_NoError(t, err) 15513 15514 msg, err = nc.Request(rresp.DeliverSubject, chunk[:n], 5*time.Second) 15515 require_NoError(t, err) 15516 json.Unmarshal(msg.Data, &rresp) 15517 if rresp.Error != nil { 15518 t.Fatalf("Error on restore: %+v", rresp.Error) 15519 } 15520 } 15521 msg, err = nc.Request(rresp.DeliverSubject, nil, 5*time.Second) 15522 require_NoError(t, err) 15523 json.Unmarshal(msg.Data, &rresp) 15524 if rresp.Error == nil || !strings.Contains(rresp.Error.Description, "unexpected") { 15525 t.Fatalf("Expected error about unexpected content, got: %+v", rresp.Error) 15526 } 15527 15528 dir := filepath.Join(s.JetStreamConfig().StoreDir, globalAccountName) 15529 f1 := filepath.Join(dir, "fail1.txt") 15530 f2 := filepath.Join(dir, "fail2.txt") 15531 for _, f := range []string{f1, f2} { 15532 if _, err := os.Stat(f); err == nil { 15533 t.Fatalf("Found file %s", f) 15534 } 15535 } 15536 } 15537 15538 func TestJetStreamConsumerAckSampling(t *testing.T) { 15539 s := RunBasicJetStreamServer(t) 15540 defer s.Shutdown() 15541 15542 nc, js := jsClientConnect(t, s) 15543 defer nc.Close() 15544 15545 _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"foo"}}) 15546 require_NoError(t, err) 15547 15548 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 15549 Durable: "dlc", 15550 AckPolicy: nats.AckExplicitPolicy, 15551 FilterSubject: "foo", 15552 SampleFrequency: "100%", 15553 }) 15554 require_NoError(t, err) 15555 15556 sub, err := js.PullSubscribe("foo", "dlc") 15557 require_NoError(t, err) 15558 15559 _, err = js.Publish("foo", []byte("Hello")) 15560 require_NoError(t, err) 15561 15562 msub, err := nc.SubscribeSync("$JS.EVENT.METRIC.>") 15563 require_NoError(t, err) 15564 15565 for _, m := range fetchMsgs(t, sub, 1, time.Second) { 15566 err = m.AckSync() 15567 require_NoError(t, err) 15568 } 15569 15570 m, err := msub.NextMsg(time.Second) 15571 require_NoError(t, err) 15572 15573 var am JSConsumerAckMetric 15574 err = json.Unmarshal(m.Data, &am) 15575 require_NoError(t, err) 15576 15577 if am.Stream != "TEST" || am.Consumer != "dlc" || am.ConsumerSeq != 1 { 15578 t.Fatalf("Not a proper ack metric: %+v", am) 15579 } 15580 15581 // Do less than 100% 15582 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 15583 Durable: "alps", 15584 AckPolicy: nats.AckExplicitPolicy, 15585 FilterSubject: "foo", 15586 SampleFrequency: "50%", 15587 }) 15588 require_NoError(t, err) 15589 15590 asub, err := js.PullSubscribe("foo", "alps") 15591 require_NoError(t, err) 15592 15593 total := 500 15594 for i := 0; i < total; i++ { 15595 _, err = js.Publish("foo", []byte("Hello")) 15596 require_NoError(t, err) 15597 } 15598 15599 mp := 0 15600 for _, m := range fetchMsgs(t, asub, total, time.Second) { 15601 err = m.AckSync() 15602 require_NoError(t, err) 15603 mp++ 15604 } 15605 nc.Flush() 15606 15607 if mp != total { 15608 t.Fatalf("Got only %d msgs out of %d", mp, total) 15609 } 15610 15611 nmsgs, _, err := msub.Pending() 15612 require_NoError(t, err) 15613 15614 // Should be ~250 15615 if nmsgs < 200 || nmsgs > 300 { 15616 t.Fatalf("Expected about 250, got %d", nmsgs) 15617 } 15618 } 15619 15620 func TestJetStreamConsumerAckSamplingSpecifiedUsingUpdateConsumer(t *testing.T) { 15621 s := RunBasicJetStreamServer(t) 15622 defer s.Shutdown() 15623 15624 nc, js := jsClientConnect(t, s) 15625 defer nc.Close() 15626 15627 _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"foo"}}) 15628 require_NoError(t, err) 15629 15630 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 15631 Durable: "dlc", 15632 AckPolicy: nats.AckExplicitPolicy, 15633 FilterSubject: "foo", 15634 }) 15635 require_NoError(t, err) 15636 15637 _, err = js.UpdateConsumer("TEST", &nats.ConsumerConfig{ 15638 Durable: "dlc", 15639 AckPolicy: nats.AckExplicitPolicy, 15640 FilterSubject: "foo", 15641 SampleFrequency: "100%", 15642 }) 15643 require_NoError(t, err) 15644 15645 sub, err := js.PullSubscribe("foo", "dlc") 15646 require_NoError(t, err) 15647 15648 _, err = js.Publish("foo", []byte("Hello")) 15649 require_NoError(t, err) 15650 15651 msub, err := nc.SubscribeSync("$JS.EVENT.METRIC.>") 15652 require_NoError(t, err) 15653 15654 for _, m := range fetchMsgs(t, sub, 1, time.Second) { 15655 err = m.AckSync() 15656 require_NoError(t, err) 15657 } 15658 15659 m, err := msub.NextMsg(time.Second) 15660 require_NoError(t, err) 15661 15662 var am JSConsumerAckMetric 15663 err = json.Unmarshal(m.Data, &am) 15664 require_NoError(t, err) 15665 15666 if am.Stream != "TEST" || am.Consumer != "dlc" || am.ConsumerSeq != 1 { 15667 t.Fatalf("Not a proper ack metric: %+v", am) 15668 } 15669 } 15670 15671 func TestJetStreamConsumerMaxDeliverUpdate(t *testing.T) { 15672 s := RunBasicJetStreamServer(t) 15673 defer s.Shutdown() 15674 15675 nc, js := jsClientConnect(t, s) 15676 defer nc.Close() 15677 15678 _, err := js.AddStream(&nats.StreamConfig{Name: "TEST", Subjects: []string{"foo"}}) 15679 require_NoError(t, err) 15680 15681 maxDeliver := 2 15682 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 15683 Durable: "ard", 15684 AckPolicy: nats.AckExplicitPolicy, 15685 FilterSubject: "foo", 15686 MaxDeliver: maxDeliver, 15687 }) 15688 require_NoError(t, err) 15689 15690 sub, err := js.PullSubscribe("foo", "ard") 15691 require_NoError(t, err) 15692 15693 checkMaxDeliver := func() { 15694 t.Helper() 15695 for i := 0; i <= maxDeliver; i++ { 15696 msgs, err := sub.Fetch(2, nats.MaxWait(100*time.Millisecond)) 15697 if i < maxDeliver { 15698 require_NoError(t, err) 15699 require_Len(t, 1, len(msgs)) 15700 _ = msgs[0].Nak() 15701 } else { 15702 require_Error(t, err, nats.ErrTimeout) 15703 } 15704 } 15705 } 15706 15707 _, err = js.Publish("foo", []byte("Hello")) 15708 require_NoError(t, err) 15709 checkMaxDeliver() 15710 15711 // update maxDeliver 15712 maxDeliver++ 15713 _, err = js.UpdateConsumer("TEST", &nats.ConsumerConfig{ 15714 Durable: "ard", 15715 AckPolicy: nats.AckExplicitPolicy, 15716 FilterSubject: "foo", 15717 MaxDeliver: maxDeliver, 15718 }) 15719 require_NoError(t, err) 15720 15721 _, err = js.Publish("foo", []byte("Hello")) 15722 require_NoError(t, err) 15723 checkMaxDeliver() 15724 } 15725 15726 func TestJetStreamRemoveExternalSource(t *testing.T) { 15727 ho := DefaultTestOptions 15728 ho.Port = 4000 //-1 15729 ho.LeafNode.Host = "127.0.0.1" 15730 ho.LeafNode.Port = -1 15731 hs := RunServer(&ho) 15732 defer hs.Shutdown() 15733 15734 lu, err := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", ho.LeafNode.Port)) 15735 require_NoError(t, err) 15736 15737 lo1 := DefaultTestOptions 15738 lo1.Port = 4111 //-1 15739 lo1.ServerName = "a-leaf" 15740 lo1.JetStream = true 15741 lo1.StoreDir = t.TempDir() 15742 lo1.JetStreamDomain = "a-leaf" 15743 lo1.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{lu}}} 15744 l1 := RunServer(&lo1) 15745 defer l1.Shutdown() 15746 15747 lo2 := DefaultTestOptions 15748 lo2.Port = 2111 //-1 15749 lo2.ServerName = "b-leaf" 15750 lo2.JetStream = true 15751 lo2.StoreDir = t.TempDir() 15752 lo2.JetStreamDomain = "b-leaf" 15753 lo2.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{lu}}} 15754 l2 := RunServer(&lo2) 15755 defer l2.Shutdown() 15756 15757 checkLeafNodeConnected(t, l1) 15758 checkLeafNodeConnected(t, l2) 15759 15760 checkStreamMsgs := func(js nats.JetStreamContext, stream string, expected uint64) { 15761 t.Helper() 15762 checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { 15763 si, err := js.StreamInfo(stream) 15764 if err != nil { 15765 return err 15766 } 15767 if si.State.Msgs != expected { 15768 return fmt.Errorf("Expected %v messages, got %v", expected, si.State.Msgs) 15769 } 15770 return nil 15771 }) 15772 } 15773 15774 sendToStreamTest := func(js nats.JetStreamContext) { 15775 t.Helper() 15776 for i := 0; i < 10; i++ { 15777 _, err = js.Publish("test", []byte("hello")) 15778 require_NoError(t, err) 15779 } 15780 } 15781 15782 nca, jsa := jsClientConnect(t, l1) 15783 defer nca.Close() 15784 _, err = jsa.AddStream(&nats.StreamConfig{Name: "queue", Subjects: []string{"queue"}}) 15785 require_NoError(t, err) 15786 15787 _, err = jsa.AddStream(&nats.StreamConfig{Name: "testdel", Subjects: []string{"testdel"}}) 15788 require_NoError(t, err) 15789 15790 ncb, jsb := jsClientConnect(t, l2) 15791 defer ncb.Close() 15792 _, err = jsb.AddStream(&nats.StreamConfig{Name: "test", Subjects: []string{"test"}}) 15793 require_NoError(t, err) 15794 sendToStreamTest(jsb) 15795 checkStreamMsgs(jsb, "test", 10) 15796 15797 _, err = jsb.AddStream(&nats.StreamConfig{Name: "testdelsrc1", Subjects: []string{"testdelsrc1"}}) 15798 require_NoError(t, err) 15799 _, err = jsb.AddStream(&nats.StreamConfig{Name: "testdelsrc2", Subjects: []string{"testdelsrc2"}}) 15800 require_NoError(t, err) 15801 15802 // Add test as source to queue 15803 si, err := jsa.UpdateStream(&nats.StreamConfig{ 15804 Name: "queue", 15805 Subjects: []string{"queue"}, 15806 Sources: []*nats.StreamSource{ 15807 { 15808 Name: "test", 15809 External: &nats.ExternalStream{ 15810 APIPrefix: "$JS.b-leaf.API", 15811 }, 15812 }, 15813 }, 15814 }) 15815 require_NoError(t, err) 15816 require_True(t, len(si.Config.Sources) == 1) 15817 checkStreamMsgs(jsa, "queue", 10) 15818 15819 // add more entries to "test" 15820 sendToStreamTest(jsb) 15821 15822 // verify entries are both in "test" and "queue" 15823 checkStreamMsgs(jsb, "test", 20) 15824 checkStreamMsgs(jsa, "queue", 20) 15825 15826 // Remove source 15827 si, err = jsa.UpdateStream(&nats.StreamConfig{ 15828 Name: "queue", 15829 Subjects: []string{"queue"}, 15830 }) 15831 require_NoError(t, err) 15832 require_True(t, len(si.Config.Sources) == 0) 15833 15834 // add more entries to "test" 15835 sendToStreamTest(jsb) 15836 // verify entries are in "test" 15837 checkStreamMsgs(jsb, "test", 30) 15838 15839 // But they should not be in "queue". We will wait a bit before checking 15840 // to make sure that we are letting enough time for the sourcing to 15841 // incorrectly happen if there is a bug. 15842 time.Sleep(250 * time.Millisecond) 15843 checkStreamMsgs(jsa, "queue", 20) 15844 15845 // Test that we delete correctly. First add source to a "testdel" 15846 si, err = jsa.UpdateStream(&nats.StreamConfig{ 15847 Name: "testdel", 15848 Subjects: []string{"testdel"}, 15849 Sources: []*nats.StreamSource{ 15850 { 15851 Name: "testdelsrc1", 15852 External: &nats.ExternalStream{ 15853 APIPrefix: "$JS.b-leaf.API", 15854 }, 15855 }, 15856 }, 15857 }) 15858 require_NoError(t, err) 15859 require_True(t, len(si.Config.Sources) == 1) 15860 // Now add the second one... 15861 si, err = jsa.UpdateStream(&nats.StreamConfig{ 15862 Name: "testdel", 15863 Subjects: []string{"testdel"}, 15864 Sources: []*nats.StreamSource{ 15865 { 15866 Name: "testdelsrc1", 15867 External: &nats.ExternalStream{ 15868 APIPrefix: "$JS.b-leaf.API", 15869 }, 15870 }, 15871 { 15872 Name: "testdelsrc2", 15873 External: &nats.ExternalStream{ 15874 APIPrefix: "$JS.b-leaf.API", 15875 }, 15876 }, 15877 }, 15878 }) 15879 require_NoError(t, err) 15880 require_True(t, len(si.Config.Sources) == 2) 15881 // Now check that the stream testdel has still 2 source consumers... 15882 acc, err := l1.lookupAccount(globalAccountName) 15883 require_NoError(t, err) 15884 mset, err := acc.lookupStream("testdel") 15885 require_NoError(t, err) 15886 mset.mu.RLock() 15887 n := len(mset.sources) 15888 mset.mu.RUnlock() 15889 if n != 2 { 15890 t.Fatalf("Expected 2 sources, got %v", n) 15891 } 15892 15893 // Restart leaf "a" 15894 nca.Close() 15895 l1.Shutdown() 15896 l1 = RunServer(&lo1) 15897 defer l1.Shutdown() 15898 15899 // add more entries to "test" 15900 sendToStreamTest(jsb) 15901 checkStreamMsgs(jsb, "test", 40) 15902 15903 nca, jsa = jsClientConnect(t, l1) 15904 defer nca.Close() 15905 time.Sleep(250 * time.Millisecond) 15906 checkStreamMsgs(jsa, "queue", 20) 15907 } 15908 15909 func TestJetStreamAddStreamWithFilestoreFailure(t *testing.T) { 15910 s := RunBasicJetStreamServer(t) 15911 defer s.Shutdown() 15912 15913 // Cause failure to create stream with filestore. 15914 // In one of ipQueue changes, this could cause a panic, so verify that we get 15915 // a failure to create, but no panic. 15916 if _, err := s.globalAccount().addStreamWithStore( 15917 &StreamConfig{Name: "TEST"}, 15918 &FileStoreConfig{BlockSize: 2 * maxBlockSize}); err == nil { 15919 t.Fatal("Expected failure, did not get one") 15920 } 15921 } 15922 15923 type checkFastState struct { 15924 count int64 15925 StreamStore 15926 } 15927 15928 func (s *checkFastState) FastState(state *StreamState) { 15929 // Keep track only when called from checkPending() 15930 if bytes.Contains(debug.Stack(), []byte("checkPending(")) { 15931 atomic.AddInt64(&s.count, 1) 15932 } 15933 s.StreamStore.FastState(state) 15934 } 15935 15936 func TestJetStreamBackOffCheckPending(t *testing.T) { 15937 s := RunBasicJetStreamServer(t) 15938 defer s.Shutdown() 15939 15940 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: "TEST", Subjects: []string{"foo"}}) 15941 if err != nil { 15942 t.Fatalf("Unexpected error adding stream: %v", err) 15943 } 15944 defer mset.delete() 15945 15946 // Plug or store to see how many times we invoke FastState, which is done in checkPending 15947 mset.mu.Lock() 15948 st := &checkFastState{StreamStore: mset.store} 15949 mset.store = st 15950 mset.mu.Unlock() 15951 15952 nc := clientConnectToServer(t, s) 15953 defer nc.Close() 15954 15955 sendStreamMsg(t, nc, "foo", "Hello World!") 15956 15957 sub, _ := nc.SubscribeSync(nats.NewInbox()) 15958 defer sub.Unsubscribe() 15959 nc.Flush() 15960 15961 o, err := mset.addConsumer(&ConsumerConfig{ 15962 DeliverSubject: sub.Subject, 15963 AckPolicy: AckExplicit, 15964 MaxDeliver: 1000, 15965 BackOff: []time.Duration{50 * time.Millisecond, 250 * time.Millisecond, time.Second}, 15966 }) 15967 if err != nil { 15968 t.Fatalf("Expected no error, got %v", err) 15969 } 15970 defer o.delete() 15971 15972 // Check the first delivery and the following 2 redeliveries 15973 start := time.Now() 15974 natsNexMsg(t, sub, time.Second) 15975 if dur := time.Since(start); dur >= 50*time.Millisecond { 15976 t.Fatalf("Expected first delivery to be fast, took: %v", dur) 15977 } 15978 start = time.Now() 15979 natsNexMsg(t, sub, time.Second) 15980 if dur := time.Since(start); dur < 25*time.Millisecond || dur > 75*time.Millisecond { 15981 t.Fatalf("Expected first redelivery to be ~50ms, took: %v", dur) 15982 } 15983 start = time.Now() 15984 natsNexMsg(t, sub, time.Second) 15985 if dur := time.Since(start); dur < 200*time.Millisecond || dur > 300*time.Millisecond { 15986 t.Fatalf("Expected first redelivery to be ~250ms, took: %v", dur) 15987 } 15988 // There was a bug that would cause checkPending to be invoked based on the 15989 // ackWait (which in this case would be the first value of BackOff, which 15990 // is 50ms). So we would call checkPending() too many times. 15991 time.Sleep(500 * time.Millisecond) 15992 // Check now, it should have been invoked twice. 15993 if n := atomic.LoadInt64(&st.count); n != 2 { 15994 t.Fatalf("Expected checkPending to be invoked 2 times, was %v", n) 15995 } 15996 } 15997 15998 func TestJetStreamCrossAccounts(t *testing.T) { 15999 conf := createConfFile(t, []byte(fmt.Sprintf(` 16000 listen: 127.0.0.1:-1 16001 jetstream { 16002 store_dir = %q 16003 } 16004 accounts: { 16005 A: { 16006 users: [ {user: a, password: a} ] 16007 jetstream: enabled 16008 exports: [ 16009 {service: '$JS.API.>' } 16010 {service: '$KV.>'} 16011 {stream: 'accI.>'} 16012 ] 16013 }, 16014 I: { 16015 users: [ {user: i, password: i} ] 16016 imports: [ 16017 {service: {account: A, subject: '$JS.API.>'}, to: 'fromA.>' } 16018 {service: {account: A, subject: '$KV.>'}, to: 'fromA.$KV.>' } 16019 {stream: {subject: 'accI.>', account: A}} 16020 ] 16021 } 16022 }`, t.TempDir()))) 16023 s, _ := RunServerWithConfig(conf) 16024 defer s.Shutdown() 16025 16026 watchNext := func(w nats.KeyWatcher) nats.KeyValueEntry { 16027 t.Helper() 16028 select { 16029 case e := <-w.Updates(): 16030 return e 16031 case <-time.After(time.Second): 16032 t.Fatal("Fail to get the next update") 16033 } 16034 return nil 16035 } 16036 16037 nc1, js1 := jsClientConnect(t, s, nats.UserInfo("a", "a")) 16038 defer nc1.Close() 16039 16040 kv1, err := js1.CreateKeyValue(&nats.KeyValueConfig{Bucket: "Map", History: 10}) 16041 if err != nil { 16042 t.Fatalf("Error creating kv store: %v", err) 16043 } 16044 16045 w1, err := kv1.Watch("map") 16046 if err != nil { 16047 t.Fatalf("Error creating watcher: %v", err) 16048 } 16049 if e := watchNext(w1); e != nil { 16050 t.Fatalf("Expected nil entry, got %+v", e) 16051 } 16052 16053 nc2, err := nats.Connect(s.ClientURL(), nats.UserInfo("i", "i"), nats.CustomInboxPrefix("accI")) 16054 if err != nil { 16055 t.Fatalf("Error on connect: %v", err) 16056 } 16057 defer nc2.Close() 16058 js2, err := nc2.JetStream(nats.APIPrefix("fromA")) 16059 if err != nil { 16060 t.Fatalf("Error getting jetstream context: %v", err) 16061 } 16062 16063 kv2, err := js2.CreateKeyValue(&nats.KeyValueConfig{Bucket: "Map", History: 10}) 16064 if err != nil { 16065 t.Fatalf("Error creating kv store: %v", err) 16066 } 16067 16068 w2, err := kv2.Watch("map") 16069 if err != nil { 16070 t.Fatalf("Error creating watcher: %v", err) 16071 } 16072 if e := watchNext(w2); e != nil { 16073 t.Fatalf("Expected nil entry, got %+v", e) 16074 } 16075 16076 // Do a Put from kv2 16077 rev, err := kv2.Put("map", []byte("value")) 16078 if err != nil { 16079 t.Fatalf("Error on put: %v", err) 16080 } 16081 16082 // Get from kv1 16083 e, err := kv1.Get("map") 16084 if err != nil { 16085 t.Fatalf("Error on get: %v", err) 16086 } 16087 if e.Key() != "map" || string(e.Value()) != "value" { 16088 t.Fatalf("Unexpected entry: +%v", e) 16089 } 16090 16091 // Get from kv2 16092 e, err = kv2.Get("map") 16093 if err != nil { 16094 t.Fatalf("Error on get: %v", err) 16095 } 16096 if e.Key() != "map" || string(e.Value()) != "value" { 16097 t.Fatalf("Unexpected entry: +%v", e) 16098 } 16099 16100 // Watcher 1 16101 if e := watchNext(w1); e == nil || e.Key() != "map" || string(e.Value()) != "value" { 16102 t.Fatalf("Unexpected entry: %+v", e) 16103 } 16104 16105 // Watcher 2 16106 if e := watchNext(w2); e == nil || e.Key() != "map" || string(e.Value()) != "value" { 16107 t.Fatalf("Unexpected entry: %+v", e) 16108 } 16109 16110 // Try an update form kv2 16111 if _, err := kv2.Update("map", []byte("updated"), rev); err != nil { 16112 t.Fatalf("Failed to update: %v", err) 16113 } 16114 16115 // Get from kv1 16116 e, err = kv1.Get("map") 16117 if err != nil { 16118 t.Fatalf("Error on get: %v", err) 16119 } 16120 if e.Key() != "map" || string(e.Value()) != "updated" { 16121 t.Fatalf("Unexpected entry: +%v", e) 16122 } 16123 16124 // Get from kv2 16125 e, err = kv2.Get("map") 16126 if err != nil { 16127 t.Fatalf("Error on get: %v", err) 16128 } 16129 if e.Key() != "map" || string(e.Value()) != "updated" { 16130 t.Fatalf("Unexpected entry: +%v", e) 16131 } 16132 16133 // Watcher 1 16134 if e := watchNext(w1); e == nil || e.Key() != "map" || string(e.Value()) != "updated" { 16135 t.Fatalf("Unexpected entry: %+v", e) 16136 } 16137 16138 // Watcher 2 16139 if e := watchNext(w2); e == nil || e.Key() != "map" || string(e.Value()) != "updated" { 16140 t.Fatalf("Unexpected entry: %+v", e) 16141 } 16142 16143 // Purge from kv2 16144 if err := kv2.Purge("map"); err != nil { 16145 t.Fatalf("Error on purge: %v", err) 16146 } 16147 16148 // Check purge ok from w1 16149 if e := watchNext(w1); e == nil || e.Operation() != nats.KeyValuePurge { 16150 t.Fatalf("Unexpected entry: %+v", e) 16151 } 16152 16153 // Check purge ok from w2 16154 if e := watchNext(w2); e == nil || e.Operation() != nats.KeyValuePurge { 16155 t.Fatalf("Unexpected entry: %+v", e) 16156 } 16157 16158 // Delete purge records from kv2 16159 if err := kv2.PurgeDeletes(nats.DeleteMarkersOlderThan(-1)); err != nil { 16160 t.Fatalf("Error on purge deletes: %v", err) 16161 } 16162 16163 // Check all gone from js1 16164 if si, err := js1.StreamInfo("KV_Map"); err != nil || si == nil || si.State.Msgs != 0 { 16165 t.Fatalf("Error getting stream info: err=%v si=%+v", err, si) 16166 } 16167 16168 // Delete key from kv2 16169 if err := kv2.Delete("map"); err != nil { 16170 t.Fatalf("Error on delete: %v", err) 16171 } 16172 16173 // Check key gone from kv1 16174 if e, err := kv1.Get("map"); err != nats.ErrKeyNotFound || e != nil { 16175 t.Fatalf("Expected key not found, got err=%v e=%+v", err, e) 16176 } 16177 } 16178 16179 func TestJetStreamInvalidRestoreRequests(t *testing.T) { 16180 test := func(t *testing.T, s *Server, replica int) { 16181 nc := natsConnect(t, s.ClientURL()) 16182 defer nc.Close() 16183 // test invalid stream config in restore request 16184 require_fail := func(cfg StreamConfig, errDesc string) { 16185 t.Helper() 16186 rreq := &JSApiStreamRestoreRequest{ 16187 Config: cfg, 16188 } 16189 req, err := json.Marshal(rreq) 16190 require_NoError(t, err) 16191 rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamRestoreT, "fail"), req, time.Second) 16192 if err != nil { 16193 t.Fatalf("Unexpected error: %v", err) 16194 } 16195 var rresp JSApiStreamRestoreResponse 16196 json.Unmarshal(rmsg.Data, &rresp) 16197 require_True(t, rresp.Error != nil) 16198 require_Equal(t, rresp.Error.Description, errDesc) 16199 } 16200 require_fail(StreamConfig{Name: "fail", MaxBytes: 1024, Storage: FileStorage, Replicas: 6}, 16201 "maximum replicas is 5") 16202 require_fail(StreamConfig{Name: "fail", MaxBytes: 2 * 1012 * 1024, Storage: FileStorage, Replicas: replica}, 16203 "insufficient storage resources available") 16204 js, err := nc.JetStream() 16205 require_NoError(t, err) 16206 _, err = js.AddStream(&nats.StreamConfig{Name: "stream", MaxBytes: 1024, Storage: nats.FileStorage, Replicas: 1}) 16207 require_NoError(t, err) 16208 require_fail(StreamConfig{Name: "fail", MaxBytes: 1024, Storage: FileStorage}, 16209 "maximum number of streams reached") 16210 } 16211 16212 commonAccSection := ` 16213 no_auth_user: u 16214 accounts { 16215 ONE { 16216 users = [ { user: "u", pass: "s3cr3t!" } ] 16217 jetstream: { 16218 max_store: 1Mb 16219 max_streams: 1 16220 } 16221 } 16222 $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } 16223 }` 16224 16225 t.Run("clustered", func(t *testing.T) { 16226 c := createJetStreamClusterWithTemplate(t, ` 16227 listen: 127.0.0.1:-1 16228 server_name: %s 16229 jetstream: { 16230 max_mem_store: 2MB, 16231 max_file_store: 8MB, 16232 store_dir: '%s', 16233 } 16234 cluster { 16235 name: %s 16236 listen: 127.0.0.1:%d 16237 routes = [%s] 16238 }`+commonAccSection, "clust", 3) 16239 defer c.shutdown() 16240 s := c.randomServer() 16241 test(t, s, 3) 16242 }) 16243 t.Run("single", func(t *testing.T) { 16244 storeDir := t.TempDir() 16245 conf := createConfFile(t, []byte(fmt.Sprintf(` 16246 listen: 127.0.0.1:-1 16247 jetstream: {max_mem_store: 2MB, max_file_store: 8MB, store_dir: '%s'} 16248 %s`, storeDir, commonAccSection))) 16249 s, _ := RunServerWithConfig(conf) 16250 defer s.Shutdown() 16251 test(t, s, 1) 16252 }) 16253 } 16254 16255 func TestJetStreamLimits(t *testing.T) { 16256 test := func(t *testing.T, s *Server) { 16257 nc := natsConnect(t, s.ClientURL()) 16258 defer nc.Close() 16259 16260 js, err := nc.JetStream() 16261 require_NoError(t, err) 16262 16263 si, err := js.AddStream(&nats.StreamConfig{Name: "foo"}) 16264 require_NoError(t, err) 16265 require_True(t, si.Config.Duplicates == time.Minute) 16266 16267 si, err = js.AddStream(&nats.StreamConfig{Name: "bar", Duplicates: 1500 * time.Millisecond}) 16268 require_NoError(t, err) 16269 require_True(t, si.Config.Duplicates == 1500*time.Millisecond) 16270 16271 _, err = js.UpdateStream(&nats.StreamConfig{Name: "bar", Duplicates: 2 * time.Minute}) 16272 require_Error(t, err) 16273 require_Equal(t, err.Error(), "nats: duplicates window can not be larger then server limit of 1m0s") 16274 16275 _, err = js.AddStream(&nats.StreamConfig{Name: "baz", Duplicates: 2 * time.Minute}) 16276 require_Error(t, err) 16277 require_Equal(t, err.Error(), "nats: duplicates window can not be larger then server limit of 1m0s") 16278 16279 ci, err := js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "dur1", AckPolicy: nats.AckExplicitPolicy}) 16280 require_NoError(t, err) 16281 require_True(t, ci.Config.MaxAckPending == 1000) 16282 require_True(t, ci.Config.MaxRequestBatch == 250) 16283 16284 _, err = js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "dur2", AckPolicy: nats.AckExplicitPolicy, MaxRequestBatch: 500}) 16285 require_Error(t, err) 16286 require_Equal(t, err.Error(), "nats: consumer max request batch exceeds server limit of 250") 16287 16288 ci, err = js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "dur2", AckPolicy: nats.AckExplicitPolicy, MaxAckPending: 500}) 16289 require_NoError(t, err) 16290 require_True(t, ci.Config.MaxAckPending == 500) 16291 require_True(t, ci.Config.MaxRequestBatch == 250) 16292 16293 _, err = js.UpdateConsumer("foo", &nats.ConsumerConfig{Durable: "dur2", AckPolicy: nats.AckExplicitPolicy, MaxAckPending: 2000}) 16294 require_Error(t, err) 16295 require_Equal(t, err.Error(), "nats: consumer max ack pending exceeds system limit of 1000") 16296 16297 _, err = js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "dur3", AckPolicy: nats.AckExplicitPolicy, MaxAckPending: 2000}) 16298 require_Error(t, err) 16299 require_Equal(t, err.Error(), "nats: consumer max ack pending exceeds system limit of 1000") 16300 } 16301 16302 t.Run("clustered", func(t *testing.T) { 16303 tmpl := ` 16304 listen: 127.0.0.1:-1 16305 server_name: %s 16306 jetstream: { 16307 max_mem_store: 2MB, 16308 max_file_store: 8MB, 16309 store_dir: '%s', 16310 limits: {duplicate_window: "1m", max_request_batch: 250} 16311 } 16312 cluster { 16313 name: %s 16314 listen: 127.0.0.1:%d 16315 routes = [%s] 16316 } 16317 no_auth_user: u 16318 accounts { 16319 ONE { 16320 users = [ { user: "u", pass: "s3cr3t!" } ] 16321 jetstream: enabled 16322 } 16323 $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } 16324 }` 16325 limitsTest := func(t *testing.T, tmpl string) { 16326 c := createJetStreamClusterWithTemplate(t, tmpl, "clust", 3) 16327 defer c.shutdown() 16328 s := c.randomServer() 16329 test(t, s) 16330 } 16331 // test with max_ack_pending being defined in operator or account 16332 t.Run("operator", func(t *testing.T) { 16333 limitsTest(t, strings.Replace(tmpl, "duplicate_window", "max_ack_pending: 1000, duplicate_window", 1)) 16334 }) 16335 t.Run("account", func(t *testing.T) { 16336 limitsTest(t, strings.Replace(tmpl, "jetstream: enabled", "jetstream: {max_ack_pending: 1000}", 1)) 16337 }) 16338 }) 16339 16340 t.Run("single", func(t *testing.T) { 16341 tmpl := ` 16342 listen: 127.0.0.1:-1 16343 jetstream: { 16344 max_mem_store: 2MB, 16345 max_file_store: 8MB, 16346 store_dir: '%s', 16347 limits: {duplicate_window: "1m", max_request_batch: 250} 16348 } 16349 no_auth_user: u 16350 accounts { 16351 ONE { 16352 users = [ { user: "u", pass: "s3cr3t!" } ] 16353 jetstream: enabled 16354 } 16355 $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } 16356 }` 16357 limitsTest := func(t *testing.T, tmpl string) { 16358 storeDir := t.TempDir() 16359 conf := createConfFile(t, []byte(fmt.Sprintf(tmpl, storeDir))) 16360 s, opts := RunServerWithConfig(conf) 16361 defer s.Shutdown() 16362 require_True(t, opts.JetStreamLimits.Duplicates == time.Minute) 16363 test(t, s) 16364 } 16365 // test with max_ack_pending being defined in operator or account 16366 t.Run("operator", func(t *testing.T) { 16367 limitsTest(t, strings.Replace(tmpl, "duplicate_window", "max_ack_pending: 1000, duplicate_window", 1)) 16368 }) 16369 t.Run("account", func(t *testing.T) { 16370 limitsTest(t, strings.Replace(tmpl, "jetstream: enabled", "jetstream: {max_ack_pending: 1000}", 1)) 16371 }) 16372 }) 16373 } 16374 16375 func TestJetStreamConsumerStreamUpdate(t *testing.T) { 16376 test := func(t *testing.T, s *Server, replica int) { 16377 nc := natsConnect(t, s.ClientURL()) 16378 defer nc.Close() 16379 js, err := nc.JetStream() 16380 require_NoError(t, err) 16381 _, err = js.AddStream(&nats.StreamConfig{Name: "foo", Duplicates: 1 * time.Minute, Replicas: replica}) 16382 defer js.DeleteStream("foo") 16383 require_NoError(t, err) 16384 // Update with no change 16385 _, err = js.UpdateStream(&nats.StreamConfig{Name: "foo", Duplicates: 1 * time.Minute, Replicas: replica}) 16386 require_NoError(t, err) 16387 // Update with change 16388 _, err = js.UpdateStream(&nats.StreamConfig{Description: "stream", Name: "foo", Duplicates: 1 * time.Minute, Replicas: replica}) 16389 require_NoError(t, err) 16390 _, err = js.AddConsumer("foo", &nats.ConsumerConfig{Durable: "dur1", AckPolicy: nats.AckExplicitPolicy}) 16391 require_NoError(t, err) 16392 // Update with no change 16393 _, err = js.UpdateConsumer("foo", &nats.ConsumerConfig{Durable: "dur1", AckPolicy: nats.AckExplicitPolicy}) 16394 require_NoError(t, err) 16395 // Update with change 16396 _, err = js.UpdateConsumer("foo", &nats.ConsumerConfig{Description: "consumer", Durable: "dur1", AckPolicy: nats.AckExplicitPolicy}) 16397 require_NoError(t, err) 16398 } 16399 t.Run("clustered", func(t *testing.T) { 16400 c := createJetStreamClusterWithTemplate(t, ` 16401 listen: 127.0.0.1:-1 16402 server_name: %s 16403 jetstream: { 16404 max_mem_store: 2MB, 16405 max_file_store: 8MB, 16406 store_dir: '%s', 16407 } 16408 cluster { 16409 name: %s 16410 listen: 127.0.0.1:%d 16411 routes = [%s] 16412 } 16413 no_auth_user: u 16414 accounts { 16415 ONE { 16416 users = [ { user: "u", pass: "s3cr3t!" } ] 16417 jetstream: enabled 16418 } 16419 $SYS { users = [ { user: "admin", pass: "s3cr3t!" } ] } 16420 }`, "clust", 3) 16421 defer c.shutdown() 16422 s := c.randomServer() 16423 t.Run("r3", func(t *testing.T) { 16424 test(t, s, 3) 16425 }) 16426 t.Run("r1", func(t *testing.T) { 16427 test(t, s, 1) 16428 }) 16429 }) 16430 t.Run("single", func(t *testing.T) { 16431 storeDir := t.TempDir() 16432 conf := createConfFile(t, []byte(fmt.Sprintf(` 16433 listen: 127.0.0.1:-1 16434 jetstream: {max_mem_store: 2MB, max_file_store: 8MB, store_dir: '%s'}`, 16435 storeDir))) 16436 s, _ := RunServerWithConfig(conf) 16437 defer s.Shutdown() 16438 test(t, s, 1) 16439 }) 16440 } 16441 16442 func TestJetStreamImportReload(t *testing.T) { 16443 storeDir := t.TempDir() 16444 16445 conf := createConfFile(t, []byte(fmt.Sprintf(` 16446 listen: 127.0.0.1:-1 16447 jetstream: {max_mem_store: 2MB, max_file_store: 8MB, store_dir: '%s'} 16448 accounts: { 16449 account_a: { 16450 users: [{user: user_a, password: pwd}] 16451 exports: [{stream: news.>}] 16452 } 16453 account_b: { 16454 users: [{user: user_b, password: pwd}] 16455 jetstream: enabled 16456 imports: [{stream: {subject: news.>, account: account_a}}] 16457 } 16458 }`, storeDir))) 16459 s, _ := RunServerWithConfig(conf) 16460 defer s.Shutdown() 16461 16462 ncA := natsConnect(t, s.ClientURL(), nats.UserInfo("user_a", "pwd")) 16463 defer ncA.Close() 16464 16465 ncB := natsConnect(t, s.ClientURL(), nats.UserInfo("user_b", "pwd")) 16466 defer ncB.Close() 16467 16468 jsB, err := ncB.JetStream() 16469 require_NoError(t, err) 16470 16471 _, err = jsB.AddStream(&nats.StreamConfig{Name: "news", Subjects: []string{"news.>"}}) 16472 require_NoError(t, err) 16473 16474 require_NoError(t, ncA.Publish("news.article", nil)) 16475 require_NoError(t, ncA.Flush()) 16476 16477 si, err := jsB.StreamInfo("news") 16478 require_NoError(t, err) 16479 require_True(t, si.State.Msgs == 1) 16480 16481 // Remove exports/imports 16482 reloadUpdateConfig(t, s, conf, fmt.Sprintf(` 16483 listen: 127.0.0.1:-1 16484 jetstream: {max_mem_store: 2MB, max_file_store: 8MB, store_dir: '%s'} 16485 accounts: { 16486 account_a: { 16487 users: [{user: user_a, password: pwd}] 16488 } 16489 account_b: { 16490 users: [{user: user_b, password: pwd}] 16491 jetstream: enabled 16492 } 16493 }`, storeDir)) 16494 16495 require_NoError(t, ncA.Publish("news.article", nil)) 16496 require_NoError(t, ncA.Flush()) 16497 16498 si, err = jsB.StreamInfo("news") 16499 require_NoError(t, err) 16500 require_True(t, si.State.Msgs == 1) 16501 } 16502 16503 func TestJetStreamRecoverSealedAfterServerRestart(t *testing.T) { 16504 s := RunBasicJetStreamServer(t) 16505 defer s.Shutdown() 16506 16507 nc, js := jsClientConnect(t, s) 16508 defer nc.Close() 16509 16510 _, err := js.AddStream(&nats.StreamConfig{Name: "foo"}) 16511 require_NoError(t, err) 16512 16513 for i := 0; i < 100; i++ { 16514 js.PublishAsync("foo", []byte("OK")) 16515 } 16516 <-js.PublishAsyncComplete() 16517 16518 _, err = js.UpdateStream(&nats.StreamConfig{Name: "foo", Sealed: true}) 16519 require_NoError(t, err) 16520 16521 nc.Close() 16522 16523 // Stop current 16524 sd := s.JetStreamConfig().StoreDir 16525 s.Shutdown() 16526 // Restart. 16527 s = RunJetStreamServerOnPort(-1, sd) 16528 defer s.Shutdown() 16529 16530 nc, js = jsClientConnect(t, s) 16531 defer nc.Close() 16532 16533 si, err := js.StreamInfo("foo") 16534 require_NoError(t, err) 16535 require_True(t, si.State.Msgs == 100) 16536 } 16537 16538 func TestJetStreamImportConsumerStreamSubjectRemapSingle(t *testing.T) { 16539 conf := createConfFile(t, []byte(fmt.Sprintf(` 16540 listen: 127.0.0.1:-1 16541 jetstream: {max_mem_store: 4GB, max_file_store: 1TB, store_dir: %q} 16542 accounts: { 16543 JS: { 16544 jetstream: enabled 16545 users: [ {user: js, password: pwd} ] 16546 exports [ 16547 # This is streaming to a delivery subject for a push based consumer. 16548 { stream: "deliver.*" } 16549 { stream: "foo.*" } 16550 # This is to ack received messages. This is a service to support sync ack. 16551 { service: "$JS.ACK.ORDERS.*.>" } 16552 # To support ordered consumers, flow control. 16553 { service: "$JS.FC.>" } 16554 ] 16555 }, 16556 IM: { 16557 users: [ {user: im, password: pwd} ] 16558 imports [ 16559 { stream: { account: JS, subject: "deliver.ORDERS" }, to: "d.*" } 16560 { stream: { account: JS, subject: "foo.*" }, to: "bar.*" } 16561 { service: { account: JS, subject: "$JS.FC.>" }} 16562 ] 16563 }, 16564 } 16565 `, t.TempDir()))) 16566 16567 test := func(t *testing.T, queue bool) { 16568 s, _ := RunServerWithConfig(conf) 16569 defer s.Shutdown() 16570 16571 nc, js := jsClientConnect(t, s, nats.UserInfo("js", "pwd")) 16572 defer nc.Close() 16573 16574 _, err := js.AddStream(&nats.StreamConfig{ 16575 Name: "ORDERS", 16576 Subjects: []string{"foo"}, // The JS subject. 16577 Storage: nats.MemoryStorage}, 16578 ) 16579 require_NoError(t, err) 16580 16581 _, err = js.Publish("foo", []byte("OK")) 16582 require_NoError(t, err) 16583 16584 queueName := "" 16585 if queue { 16586 queueName = "queue" 16587 } 16588 16589 _, err = js.AddConsumer("ORDERS", &nats.ConsumerConfig{ 16590 DeliverSubject: "deliver.ORDERS", 16591 AckPolicy: nats.AckExplicitPolicy, 16592 DeliverGroup: queueName, 16593 }) 16594 require_NoError(t, err) 16595 16596 nc2, err := nats.Connect(s.ClientURL(), nats.UserInfo("im", "pwd")) 16597 require_NoError(t, err) 16598 defer nc2.Close() 16599 16600 var sub *nats.Subscription 16601 if queue { 16602 sub, err = nc2.QueueSubscribeSync("d.ORDERS", queueName) 16603 require_NoError(t, err) 16604 } else { 16605 sub, err = nc2.SubscribeSync("d.ORDERS") 16606 require_NoError(t, err) 16607 } 16608 16609 m, err := sub.NextMsg(time.Second) 16610 require_NoError(t, err) 16611 16612 if m.Subject != "foo" { 16613 t.Fatalf("Subject not mapped correctly across account boundary, expected %q got %q", "foo", m.Subject) 16614 } 16615 16616 // Now do one that would kick in a transform. 16617 _, err = js.AddConsumer("ORDERS", &nats.ConsumerConfig{ 16618 DeliverSubject: "foo.ORDERS", 16619 AckPolicy: nats.AckExplicitPolicy, 16620 DeliverGroup: queueName, 16621 }) 16622 require_NoError(t, err) 16623 16624 if queue { 16625 sub, err = nc2.QueueSubscribeSync("bar.ORDERS", queueName) 16626 require_NoError(t, err) 16627 } else { 16628 sub, err = nc2.SubscribeSync("bar.ORDERS") 16629 require_NoError(t, err) 16630 } 16631 m, err = sub.NextMsg(time.Second) 16632 require_NoError(t, err) 16633 16634 if m.Subject != "foo" { 16635 t.Fatalf("Subject not mapped correctly across account boundary, expected %q got %q", "foo", m.Subject) 16636 } 16637 } 16638 16639 t.Run("noqueue", func(t *testing.T) { 16640 test(t, false) 16641 }) 16642 t.Run("queue", func(t *testing.T) { 16643 test(t, true) 16644 }) 16645 } 16646 16647 func TestJetStreamWorkQueueSourceRestart(t *testing.T) { 16648 s := RunBasicJetStreamServer(t) 16649 defer s.Shutdown() 16650 16651 nc, js := jsClientConnect(t, s) 16652 defer nc.Close() 16653 16654 sent := 10 16655 _, err := js.AddStream(&nats.StreamConfig{ 16656 Name: "FOO", 16657 Replicas: 1, 16658 Subjects: []string{"foo"}, 16659 }) 16660 require_NoError(t, err) 16661 16662 for i := 0; i < sent; i++ { 16663 _, err = js.Publish("foo", nil) 16664 require_NoError(t, err) 16665 } 16666 16667 _, err = js.AddStream(&nats.StreamConfig{ 16668 Name: "TEST", 16669 Replicas: 1, 16670 // TODO test will pass when retention commented out 16671 Retention: nats.WorkQueuePolicy, 16672 Sources: []*nats.StreamSource{{Name: "FOO"}}}) 16673 require_NoError(t, err) 16674 16675 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{Durable: "dur", AckPolicy: nats.AckExplicitPolicy}) 16676 require_NoError(t, err) 16677 16678 sub, err := js.PullSubscribe("foo", "dur", nats.BindStream("TEST")) 16679 require_NoError(t, err) 16680 16681 time.Sleep(100 * time.Millisecond) 16682 16683 ci, err := js.ConsumerInfo("TEST", "dur") 16684 require_NoError(t, err) 16685 require_True(t, ci.NumPending == uint64(sent)) 16686 16687 msgs, err := sub.Fetch(sent) 16688 require_NoError(t, err) 16689 require_True(t, len(msgs) == sent) 16690 16691 for i := 0; i < sent; i++ { 16692 err = msgs[i].AckSync() 16693 require_NoError(t, err) 16694 } 16695 16696 ci, err = js.ConsumerInfo("TEST", "dur") 16697 require_NoError(t, err) 16698 require_True(t, ci.NumPending == 0) 16699 16700 si, err := js.StreamInfo("TEST") 16701 require_NoError(t, err) 16702 require_True(t, si.State.Msgs == 0) 16703 16704 // Restart server 16705 nc.Close() 16706 sd := s.JetStreamConfig().StoreDir 16707 s.Shutdown() 16708 time.Sleep(200 * time.Millisecond) 16709 s = RunJetStreamServerOnPort(-1, sd) 16710 defer s.Shutdown() 16711 16712 checkFor(t, 10*time.Second, 200*time.Millisecond, func() error { 16713 hs := s.healthz(nil) 16714 if hs.Status == "ok" && hs.Error == _EMPTY_ { 16715 return nil 16716 } 16717 return fmt.Errorf("healthz %s %s", hs.Error, hs.Status) 16718 }) 16719 16720 nc, js = jsClientConnect(t, s) 16721 defer nc.Close() 16722 16723 si, err = js.StreamInfo("TEST") 16724 require_NoError(t, err) 16725 16726 if si.State.Msgs != 0 { 16727 t.Fatalf("Expected 0 messages on restart, got %d", si.State.Msgs) 16728 } 16729 16730 ctest, err := js.ConsumerInfo("TEST", "dur") 16731 require_NoError(t, err) 16732 16733 //TODO (mh) I have experienced in other tests that NumPending has a value of 1 post restart. 16734 // seems to go awary in single server setup. It's also unrelated to work queue 16735 // but that error seems benign. 16736 if ctest.NumPending != 0 { 16737 t.Fatalf("Expected pending of 0 but got %d", ctest.NumPending) 16738 } 16739 16740 sub, err = js.PullSubscribe("foo", "dur", nats.BindStream("TEST")) 16741 require_NoError(t, err) 16742 _, err = sub.Fetch(1, nats.MaxWait(time.Second)) 16743 if err != nats.ErrTimeout { 16744 require_NoError(t, err) 16745 } 16746 } 16747 16748 func TestJetStreamWorkQueueSourceNamingRestart(t *testing.T) { 16749 s := RunBasicJetStreamServer(t) 16750 defer s.Shutdown() 16751 16752 nc, js := jsClientConnect(t, s) 16753 defer nc.Close() 16754 16755 _, err := js.AddStream(&nats.StreamConfig{Name: "C1", Subjects: []string{"foo.*"}}) 16756 require_NoError(t, err) 16757 _, err = js.AddStream(&nats.StreamConfig{Name: "C2", Subjects: []string{"bar.*"}}) 16758 require_NoError(t, err) 16759 16760 sendCount := 10 16761 for i := 0; i < sendCount; i++ { 16762 _, err = js.Publish(fmt.Sprintf("foo.%d", i), nil) 16763 require_NoError(t, err) 16764 _, err = js.Publish(fmt.Sprintf("bar.%d", i), nil) 16765 require_NoError(t, err) 16766 } 16767 16768 // TODO Test will always pass if pending is 0 16769 pending := 1 16770 // For some yet unknown reason this failure seems to require 2 streams to source from. 16771 // This might possibly be timing, as the test sometimes passes 16772 streams := 2 16773 totalPending := uint64(streams * pending) 16774 totalMsgs := streams * sendCount 16775 totalNonPending := streams * (sendCount - pending) 16776 16777 // TODO Test will always pass if this is named A (go returns directory names sorted) 16778 // A: this stream is recovered BEFORE C1/C2, tbh, I'd expect this to be the case to fail, but it isn't 16779 // D: this stream is recovered AFTER C1/C2, which is the case that fails (perhaps it is timing) 16780 srcName := "D" 16781 _, err = js.AddStream(&nats.StreamConfig{ 16782 Name: srcName, 16783 Retention: nats.WorkQueuePolicy, 16784 Sources: []*nats.StreamSource{{Name: "C1"}, {Name: "C2"}}, 16785 }) 16786 require_NoError(t, err) 16787 16788 // Add a consumer and consume all but totalPending messages 16789 _, err = js.AddConsumer(srcName, &nats.ConsumerConfig{Durable: "dur", AckPolicy: nats.AckExplicitPolicy}) 16790 require_NoError(t, err) 16791 16792 sub, err := js.PullSubscribe("", "dur", nats.BindStream(srcName)) 16793 require_NoError(t, err) 16794 16795 checkFor(t, 5*time.Second, time.Millisecond*200, func() error { 16796 if ci, err := js.ConsumerInfo(srcName, "dur"); err != nil { 16797 return err 16798 } else if ci.NumPending != uint64(totalMsgs) { 16799 return fmt.Errorf("not enough messages: %d", ci.NumPending) 16800 } 16801 return nil 16802 }) 16803 16804 // consume all but messages we want pending 16805 msgs, err := sub.Fetch(totalNonPending) 16806 require_NoError(t, err) 16807 require_True(t, len(msgs) == totalNonPending) 16808 16809 for _, m := range msgs { 16810 err = m.AckSync() 16811 require_NoError(t, err) 16812 } 16813 16814 ci, err := js.ConsumerInfo(srcName, "dur") 16815 require_NoError(t, err) 16816 require_True(t, ci.NumPending == totalPending) 16817 16818 si, err := js.StreamInfo(srcName) 16819 require_NoError(t, err) 16820 require_True(t, si.State.Msgs == totalPending) 16821 16822 // Restart server 16823 nc.Close() 16824 sd := s.JetStreamConfig().StoreDir 16825 s.Shutdown() 16826 time.Sleep(200 * time.Millisecond) 16827 s = RunJetStreamServerOnPort(-1, sd) 16828 defer s.Shutdown() 16829 16830 checkFor(t, 10*time.Second, 200*time.Millisecond, func() error { 16831 hs := s.healthz(nil) 16832 if hs.Status == "ok" && hs.Error == _EMPTY_ { 16833 return nil 16834 } 16835 return fmt.Errorf("healthz %s %s", hs.Error, hs.Status) 16836 }) 16837 16838 nc, js = jsClientConnect(t, s) 16839 defer nc.Close() 16840 16841 si, err = js.StreamInfo(srcName) 16842 require_NoError(t, err) 16843 16844 if si.State.Msgs != totalPending { 16845 t.Fatalf("Expected 0 messages on restart, got %d", si.State.Msgs) 16846 } 16847 } 16848 16849 func TestJetStreamDisabledHealthz(t *testing.T) { 16850 s := RunBasicJetStreamServer(t) 16851 defer s.Shutdown() 16852 16853 if !s.JetStreamEnabled() { 16854 t.Fatalf("Expected JetStream to be enabled") 16855 } 16856 16857 s.DisableJetStream() 16858 16859 hs := s.healthz(&HealthzOptions{JSEnabledOnly: true}) 16860 if hs.Status == "unavailable" && hs.Error == NewJSNotEnabledError().Error() { 16861 return 16862 } 16863 16864 t.Fatalf("Expected healthz to return error if JetStream is disabled, got status: %s", hs.Status) 16865 } 16866 16867 func TestJetStreamPullTimeout(t *testing.T) { 16868 s := RunBasicJetStreamServer(t) 16869 defer s.Shutdown() 16870 16871 nc, js := jsClientConnect(t, s) 16872 defer nc.Close() 16873 16874 _, err := js.AddStream(&nats.StreamConfig{ 16875 Name: "TEST", 16876 }) 16877 require_NoError(t, err) 16878 16879 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 16880 Durable: "pr", 16881 AckPolicy: nats.AckExplicitPolicy, 16882 }) 16883 require_NoError(t, err) 16884 16885 const numMessages = 1000 16886 // Send messages in small intervals. 16887 go func() { 16888 for i := 0; i < numMessages; i++ { 16889 time.Sleep(time.Millisecond * 10) 16890 sendStreamMsg(t, nc, "TEST", "data") 16891 } 16892 }() 16893 16894 // Prepare manual Pull Request. 16895 req := &JSApiConsumerGetNextRequest{Batch: 200, NoWait: false, Expires: time.Millisecond * 100} 16896 jreq, _ := json.Marshal(req) 16897 16898 subj := fmt.Sprintf(JSApiRequestNextT, "TEST", "pr") 16899 reply := "_pr_" 16900 var got atomic.Int32 16901 nc.PublishRequest(subj, reply, jreq) 16902 16903 // Manually subscribe to inbox subject and send new request only if we get `408 Request Timeout`. 16904 sub, _ := nc.Subscribe(reply, func(msg *nats.Msg) { 16905 if msg.Header.Get("Status") == "408" && msg.Header.Get("Description") == "Request Timeout" { 16906 nc.PublishRequest(subj, reply, jreq) 16907 nc.Flush() 16908 } else { 16909 got.Add(1) 16910 msg.Ack() 16911 } 16912 }) 16913 defer sub.Unsubscribe() 16914 16915 // Check if we're not stuck. 16916 checkFor(t, time.Second*30, time.Second*1, func() error { 16917 if got.Load() < int32(numMessages) { 16918 return fmt.Errorf("expected %d messages", numMessages) 16919 } 16920 return nil 16921 }) 16922 } 16923 16924 func TestJetStreamPullMaxBytes(t *testing.T) { 16925 s := RunBasicJetStreamServer(t) 16926 defer s.Shutdown() 16927 16928 nc, js := jsClientConnect(t, s) 16929 defer nc.Close() 16930 16931 _, err := js.AddStream(&nats.StreamConfig{ 16932 Name: "TEST", 16933 }) 16934 require_NoError(t, err) 16935 16936 // Put in ~2MB, each ~100k 16937 msz, dsz := 100_000, 99_950 16938 total, msg := 20, []byte(strings.Repeat("Z", dsz)) 16939 16940 for i := 0; i < total; i++ { 16941 if _, err := js.Publish("TEST", msg); err != nil { 16942 t.Fatalf("Unexpected publish error: %v", err) 16943 } 16944 } 16945 16946 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 16947 Durable: "pr", 16948 AckPolicy: nats.AckExplicitPolicy, 16949 }) 16950 require_NoError(t, err) 16951 16952 req := &JSApiConsumerGetNextRequest{MaxBytes: 100, NoWait: true} 16953 jreq, _ := json.Marshal(req) 16954 16955 subj := fmt.Sprintf(JSApiRequestNextT, "TEST", "pr") 16956 reply := "_pr_" 16957 sub, _ := nc.SubscribeSync(reply) 16958 defer sub.Unsubscribe() 16959 16960 checkHeader := func(m *nats.Msg, expected *nats.Header) { 16961 t.Helper() 16962 if len(m.Data) != 0 { 16963 t.Fatalf("Did not expect data, got %d bytes", len(m.Data)) 16964 } 16965 expectedStatus, givenStatus := expected.Get("Status"), m.Header.Get("Status") 16966 expectedDesc, givenDesc := expected.Get("Description"), m.Header.Get("Description") 16967 if expectedStatus != givenStatus || expectedDesc != givenDesc { 16968 t.Fatalf("expected %s %s, got %s %s", expectedStatus, expectedDesc, givenStatus, givenDesc) 16969 } 16970 } 16971 16972 // If we ask for less MaxBytes then a single message make sure we get an error. 16973 badReq := &nats.Header{"Status": []string{"409"}, "Description": []string{"Message Size Exceeds MaxBytes"}} 16974 16975 nc.PublishRequest(subj, reply, jreq) 16976 m, err := sub.NextMsg(time.Second) 16977 require_NoError(t, err) 16978 checkSubsPending(t, sub, 0) 16979 checkHeader(m, badReq) 16980 16981 // If we request a ton of max bytes make sure batch size overrides. 16982 req = &JSApiConsumerGetNextRequest{Batch: 1, MaxBytes: 10_000_000, NoWait: true} 16983 jreq, _ = json.Marshal(req) 16984 nc.PublishRequest(subj, reply, jreq) 16985 // we expect two messages, as the second one should be `Batch Completed` status. 16986 checkSubsPending(t, sub, 2) 16987 16988 // first one is message from the stream. 16989 m, err = sub.NextMsg(time.Second) 16990 require_NoError(t, err) 16991 require_True(t, len(m.Data) == dsz) 16992 require_True(t, len(m.Header) == 0) 16993 // second one is the status. 16994 m, err = sub.NextMsg(time.Second) 16995 require_NoError(t, err) 16996 if v := m.Header.Get("Description"); v != "Batch Completed" { 16997 t.Fatalf("Expected Batch Completed, got: %s", v) 16998 } 16999 checkSubsPending(t, sub, 0) 17000 17001 // Same but with batch > 1 17002 req = &JSApiConsumerGetNextRequest{Batch: 5, MaxBytes: 10_000_000, NoWait: true} 17003 jreq, _ = json.Marshal(req) 17004 nc.PublishRequest(subj, reply, jreq) 17005 // 6, not 5, as 6th is the status. 17006 checkSubsPending(t, sub, 6) 17007 for i := 0; i < 5; i++ { 17008 m, err = sub.NextMsg(time.Second) 17009 require_NoError(t, err) 17010 require_True(t, len(m.Data) == dsz) 17011 require_True(t, len(m.Header) == 0) 17012 } 17013 m, err = sub.NextMsg(time.Second) 17014 require_NoError(t, err) 17015 if v := m.Header.Get("Description"); v != "Batch Completed" { 17016 t.Fatalf("Expected Batch Completed, got: %s", v) 17017 } 17018 checkSubsPending(t, sub, 0) 17019 17020 // Now ask for large batch but make sure we are limited by batch size. 17021 req = &JSApiConsumerGetNextRequest{Batch: 1_000, MaxBytes: msz * 4, NoWait: true} 17022 jreq, _ = json.Marshal(req) 17023 nc.PublishRequest(subj, reply, jreq) 17024 // Receive 4 messages + the 409 17025 checkSubsPending(t, sub, 5) 17026 for i := 0; i < 4; i++ { 17027 m, err = sub.NextMsg(time.Second) 17028 require_NoError(t, err) 17029 require_True(t, len(m.Data) == dsz) 17030 require_True(t, len(m.Header) == 0) 17031 } 17032 m, err = sub.NextMsg(time.Second) 17033 require_NoError(t, err) 17034 checkHeader(m, badReq) 17035 checkSubsPending(t, sub, 0) 17036 17037 req = &JSApiConsumerGetNextRequest{Batch: 1_000, MaxBytes: msz, NoWait: true} 17038 jreq, _ = json.Marshal(req) 17039 nc.PublishRequest(subj, reply, jreq) 17040 // Receive 1 message + 409 17041 checkSubsPending(t, sub, 2) 17042 m, err = sub.NextMsg(time.Second) 17043 require_NoError(t, err) 17044 require_True(t, len(m.Data) == dsz) 17045 require_True(t, len(m.Header) == 0) 17046 m, err = sub.NextMsg(time.Second) 17047 require_NoError(t, err) 17048 checkHeader(m, badReq) 17049 checkSubsPending(t, sub, 0) 17050 } 17051 17052 func TestJetStreamStreamRepublishCycle(t *testing.T) { 17053 s := RunBasicJetStreamServer(t) 17054 defer s.Shutdown() 17055 17056 nc, _ := jsClientConnect(t, s) 17057 defer nc.Close() 17058 17059 // Do by hand for now. 17060 cfg := &StreamConfig{ 17061 Name: "RPC", 17062 Storage: MemoryStorage, 17063 Subjects: []string{"foo.>", "bar.*", "baz"}, 17064 } 17065 17066 expectFail := func() { 17067 t.Helper() 17068 req, err := json.Marshal(cfg) 17069 require_NoError(t, err) 17070 rmsg, err := nc.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second) 17071 require_NoError(t, err) 17072 var resp JSApiStreamCreateResponse 17073 err = json.Unmarshal(rmsg.Data, &resp) 17074 require_NoError(t, err) 17075 if resp.Type != JSApiStreamCreateResponseType { 17076 t.Fatalf("Invalid response type %s expected %s", resp.Type, JSApiStreamCreateResponseType) 17077 } 17078 if resp.Error == nil { 17079 t.Fatalf("Expected error but got none") 17080 } 17081 if !strings.Contains(resp.Error.Description, "republish destination forms a cycle") { 17082 t.Fatalf("Expected cycle error, got %q", resp.Error.Description) 17083 } 17084 } 17085 17086 cfg.RePublish = &RePublish{ 17087 Source: "foo.>", 17088 Destination: "foo.>", 17089 } 17090 expectFail() 17091 17092 cfg.RePublish = &RePublish{ 17093 Source: "bar.bar", 17094 Destination: "foo.bar", 17095 } 17096 expectFail() 17097 17098 cfg.RePublish = &RePublish{ 17099 Source: "baz", 17100 Destination: "bar.bar", 17101 } 17102 expectFail() 17103 } 17104 17105 func TestJetStreamStreamRepublishOneTokenMatch(t *testing.T) { 17106 s := RunBasicJetStreamServer(t) 17107 defer s.Shutdown() 17108 17109 nc, js := jsClientConnect(t, s) 17110 defer nc.Close() 17111 17112 // Do by hand for now. 17113 cfg := &StreamConfig{ 17114 Name: "Stream1", 17115 Storage: MemoryStorage, 17116 Subjects: []string{"one", "four"}, 17117 RePublish: &RePublish{ 17118 Source: "one", 17119 Destination: "uno", 17120 HeadersOnly: false, 17121 }, 17122 } 17123 addStream(t, nc, cfg) 17124 17125 sub, err := nc.SubscribeSync("uno") 17126 require_NoError(t, err) 17127 17128 msg, toSend := bytes.Repeat([]byte("Z"), 512), 100 17129 for i := 0; i < toSend; i++ { 17130 js.PublishAsync("one", msg) 17131 } 17132 select { 17133 case <-js.PublishAsyncComplete(): 17134 case <-time.After(5 * time.Second): 17135 t.Fatalf("Did not receive completion signal") 17136 } 17137 17138 checkSubsPending(t, sub, toSend) 17139 m, err := sub.NextMsg(time.Second) 17140 require_NoError(t, err) 17141 17142 if !(len(m.Data) > 0) { 17143 t.Fatalf("Expected msg data") 17144 } 17145 } 17146 17147 func TestJetStreamStreamRepublishMultiTokenMatch(t *testing.T) { 17148 s := RunBasicJetStreamServer(t) 17149 defer s.Shutdown() 17150 17151 nc, js := jsClientConnect(t, s) 17152 defer nc.Close() 17153 17154 // Do by hand for now. 17155 cfg := &StreamConfig{ 17156 Name: "Stream1", 17157 Storage: MemoryStorage, 17158 Subjects: []string{"one.>", "four.>"}, 17159 RePublish: &RePublish{ 17160 Source: "one.two.>", 17161 Destination: "uno.dos.>", 17162 HeadersOnly: false, 17163 }, 17164 } 17165 addStream(t, nc, cfg) 17166 17167 sub, err := nc.SubscribeSync("uno.dos.>") 17168 require_NoError(t, err) 17169 17170 msg, toSend := bytes.Repeat([]byte("Z"), 512), 100 17171 for i := 0; i < toSend; i++ { 17172 js.PublishAsync("one.two.three", msg) 17173 } 17174 select { 17175 case <-js.PublishAsyncComplete(): 17176 case <-time.After(5 * time.Second): 17177 t.Fatalf("Did not receive completion signal") 17178 } 17179 17180 checkSubsPending(t, sub, toSend) 17181 m, err := sub.NextMsg(time.Second) 17182 require_NoError(t, err) 17183 17184 if !(len(m.Data) > 0) { 17185 t.Fatalf("Expected msg data") 17186 } 17187 } 17188 17189 func TestJetStreamStreamRepublishAnySubjectMatch(t *testing.T) { 17190 s := RunBasicJetStreamServer(t) 17191 defer s.Shutdown() 17192 17193 nc, js := jsClientConnect(t, s) 17194 defer nc.Close() 17195 17196 // Do by hand for now. 17197 cfg := &StreamConfig{ 17198 Name: "Stream1", 17199 Storage: MemoryStorage, 17200 Subjects: []string{"one.>", "four.>"}, 17201 RePublish: &RePublish{ 17202 Destination: "uno.dos.>", 17203 HeadersOnly: false, 17204 }, 17205 } 17206 addStream(t, nc, cfg) 17207 17208 sub, err := nc.SubscribeSync("uno.dos.>") 17209 require_NoError(t, err) 17210 17211 msg, toSend := bytes.Repeat([]byte("Z"), 512), 100 17212 for i := 0; i < toSend; i++ { 17213 js.PublishAsync("one.two.three", msg) 17214 } 17215 select { 17216 case <-js.PublishAsyncComplete(): 17217 case <-time.After(5 * time.Second): 17218 t.Fatalf("Did not receive completion signal") 17219 } 17220 17221 checkSubsPending(t, sub, toSend) 17222 m, err := sub.NextMsg(time.Second) 17223 require_NoError(t, err) 17224 17225 if !(len(m.Data) > 0) { 17226 t.Fatalf("Expected msg data") 17227 } 17228 } 17229 17230 func TestJetStreamStreamRepublishMultiTokenNoMatch(t *testing.T) { 17231 s := RunBasicJetStreamServer(t) 17232 defer s.Shutdown() 17233 17234 nc, js := jsClientConnect(t, s) 17235 defer nc.Close() 17236 17237 // Do by hand for now. 17238 cfg := &StreamConfig{ 17239 Name: "Stream1", 17240 Storage: MemoryStorage, 17241 Subjects: []string{"one.>", "four.>"}, 17242 RePublish: &RePublish{ 17243 Source: "one.two.>", 17244 Destination: "uno.dos.>", 17245 HeadersOnly: true, 17246 }, 17247 } 17248 addStream(t, nc, cfg) 17249 17250 sub, err := nc.SubscribeSync("uno.dos.>") 17251 require_NoError(t, err) 17252 17253 msg, toSend := bytes.Repeat([]byte("Z"), 512), 100 17254 for i := 0; i < toSend; i++ { 17255 js.PublishAsync("four.five.six", msg) 17256 } 17257 select { 17258 case <-js.PublishAsyncComplete(): 17259 case <-time.After(5 * time.Second): 17260 t.Fatalf("Did not receive completion signal") 17261 } 17262 17263 checkSubsPending(t, sub, 0) 17264 require_NoError(t, err) 17265 } 17266 17267 func TestJetStreamStreamRepublishOneTokenNoMatch(t *testing.T) { 17268 s := RunBasicJetStreamServer(t) 17269 defer s.Shutdown() 17270 17271 nc, js := jsClientConnect(t, s) 17272 defer nc.Close() 17273 17274 // Do by hand for now. 17275 cfg := &StreamConfig{ 17276 Name: "Stream1", 17277 Storage: MemoryStorage, 17278 Subjects: []string{"one", "four"}, 17279 RePublish: &RePublish{ 17280 Source: "one", 17281 Destination: "uno", 17282 HeadersOnly: true, 17283 }, 17284 } 17285 addStream(t, nc, cfg) 17286 17287 sub, err := nc.SubscribeSync("uno") 17288 require_NoError(t, err) 17289 17290 msg, toSend := bytes.Repeat([]byte("Z"), 512), 100 17291 for i := 0; i < toSend; i++ { 17292 js.PublishAsync("four", msg) 17293 } 17294 select { 17295 case <-js.PublishAsyncComplete(): 17296 case <-time.After(5 * time.Second): 17297 t.Fatalf("Did not receive completion signal") 17298 } 17299 17300 checkSubsPending(t, sub, 0) 17301 require_NoError(t, err) 17302 } 17303 17304 func TestJetStreamStreamRepublishHeadersOnly(t *testing.T) { 17305 s := RunBasicJetStreamServer(t) 17306 defer s.Shutdown() 17307 17308 nc, js := jsClientConnect(t, s) 17309 defer nc.Close() 17310 17311 // Do by hand for now. 17312 cfg := &StreamConfig{ 17313 Name: "RPC", 17314 Storage: MemoryStorage, 17315 Subjects: []string{"foo", "bar", "baz"}, 17316 RePublish: &RePublish{ 17317 Destination: "RP.>", 17318 HeadersOnly: true, 17319 }, 17320 } 17321 addStream(t, nc, cfg) 17322 17323 sub, err := nc.SubscribeSync("RP.>") 17324 require_NoError(t, err) 17325 17326 msg, toSend := bytes.Repeat([]byte("Z"), 512), 100 17327 for i := 0; i < toSend; i++ { 17328 js.PublishAsync("foo", msg) 17329 } 17330 select { 17331 case <-js.PublishAsyncComplete(): 17332 case <-time.After(5 * time.Second): 17333 t.Fatalf("Did not receive completion signal") 17334 } 17335 17336 checkSubsPending(t, sub, toSend) 17337 m, err := sub.NextMsg(time.Second) 17338 require_NoError(t, err) 17339 17340 if len(m.Data) > 0 { 17341 t.Fatalf("Expected no msg just headers, but got %d bytes", len(m.Data)) 17342 } 17343 if sz := m.Header.Get(JSMsgSize); sz != "512" { 17344 t.Fatalf("Expected msg size hdr, got %q", sz) 17345 } 17346 } 17347 17348 func TestJetStreamConsumerDeliverNewNotConsumingBeforeRestart(t *testing.T) { 17349 s := RunBasicJetStreamServer(t) 17350 defer s.Shutdown() 17351 17352 nc, js := jsClientConnect(t, s) 17353 defer nc.Close() 17354 17355 _, err := js.AddStream(&nats.StreamConfig{ 17356 Name: "TEST", 17357 Subjects: []string{"foo"}, 17358 }) 17359 require_NoError(t, err) 17360 17361 inbox := nats.NewInbox() 17362 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 17363 DeliverSubject: inbox, 17364 Durable: "dur", 17365 AckPolicy: nats.AckExplicitPolicy, 17366 DeliverPolicy: nats.DeliverNewPolicy, 17367 FilterSubject: "foo", 17368 }) 17369 require_NoError(t, err) 17370 17371 for i := 0; i < 10; i++ { 17372 sendStreamMsg(t, nc, "foo", "msg") 17373 } 17374 17375 checkCount := func(expected int) { 17376 t.Helper() 17377 checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { 17378 ci, err := js.ConsumerInfo("TEST", "dur") 17379 if err != nil { 17380 return err 17381 } 17382 if n := int(ci.NumPending); n != expected { 17383 return fmt.Errorf("Expected %v pending, got %v", expected, n) 17384 } 17385 return nil 17386 }) 17387 } 17388 checkCount(10) 17389 17390 time.Sleep(300 * time.Millisecond) 17391 17392 // Check server restart 17393 nc.Close() 17394 sd := s.JetStreamConfig().StoreDir 17395 s.Shutdown() 17396 // Restart. 17397 s = RunJetStreamServerOnPort(-1, sd) 17398 defer s.Shutdown() 17399 17400 nc, js = jsClientConnect(t, s) 17401 defer nc.Close() 17402 17403 checkCount(10) 17404 17405 // Make sure messages can be consumed 17406 sub := natsSubSync(t, nc, inbox) 17407 for i := 0; i < 10; i++ { 17408 msg, err := sub.NextMsg(time.Second) 17409 if err != nil { 17410 t.Fatalf("i=%v next msg error: %v", i, err) 17411 } 17412 msg.AckSync() 17413 } 17414 checkCount(0) 17415 } 17416 17417 func TestJetStreamConsumerNumPendingWithMaxPerSubjectGreaterThanOne(t *testing.T) { 17418 s := RunBasicJetStreamServer(t) 17419 defer s.Shutdown() 17420 17421 nc, js := jsClientConnect(t, s) 17422 defer nc.Close() 17423 17424 test := func(t *testing.T, st nats.StorageType) { 17425 _, err := js.AddStream(&nats.StreamConfig{ 17426 Name: "TEST", 17427 Subjects: []string{"KV.*.*"}, 17428 MaxMsgsPerSubject: 2, 17429 Storage: st, 17430 }) 17431 require_NoError(t, err) 17432 17433 // If we allow more than one msg per subject, consumer's num pending can be off (bug in store level). 17434 // This requires a filtered state, simple states work ok. 17435 // Since we now rely on stream's filtered state when asked directly for consumer info in >=2.8.3. 17436 js.PublishAsync("KV.plans.foo", []byte("OK")) 17437 js.PublishAsync("KV.plans.bar", []byte("OK")) 17438 js.PublishAsync("KV.plans.baz", []byte("OK")) 17439 // These are required, the consumer needs to filter these out to see the bug. 17440 js.PublishAsync("KV.config.foo", []byte("OK")) 17441 js.PublishAsync("KV.config.bar", []byte("OK")) 17442 js.PublishAsync("KV.config.baz", []byte("OK")) 17443 17444 // Double up some now. 17445 js.PublishAsync("KV.plans.bar", []byte("OK")) 17446 js.PublishAsync("KV.plans.baz", []byte("OK")) 17447 17448 ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{ 17449 Durable: "d", 17450 AckPolicy: nats.AckExplicitPolicy, 17451 DeliverPolicy: nats.DeliverLastPerSubjectPolicy, 17452 FilterSubject: "KV.plans.*", 17453 }) 17454 require_NoError(t, err) 17455 17456 err = js.DeleteStream("TEST") 17457 require_NoError(t, err) 17458 17459 if ci.NumPending != 3 { 17460 t.Fatalf("Expected 3 NumPending, but got %d", ci.NumPending) 17461 } 17462 } 17463 17464 t.Run("MemoryStore", func(t *testing.T) { test(t, nats.MemoryStorage) }) 17465 t.Run("FileStore", func(t *testing.T) { test(t, nats.FileStorage) }) 17466 } 17467 17468 func TestJetStreamMsgGetNoAdvisory(t *testing.T) { 17469 s := RunBasicJetStreamServer(t) 17470 defer s.Shutdown() 17471 17472 nc, js := jsClientConnect(t, s) 17473 defer nc.Close() 17474 17475 _, err := js.AddStream(&nats.StreamConfig{Name: "foo"}) 17476 require_NoError(t, err) 17477 17478 for i := 0; i < 100; i++ { 17479 js.PublishAsync("foo", []byte("ok")) 17480 } 17481 select { 17482 case <-js.PublishAsyncComplete(): 17483 case <-time.After(5 * time.Second): 17484 t.Fatalf("Did not receive completion signal") 17485 } 17486 17487 sub, err := nc.SubscribeSync("$JS.EVENT.ADVISORY.>") 17488 require_NoError(t, err) 17489 17490 _, err = js.GetMsg("foo", 1) 17491 require_NoError(t, err) 17492 17493 checkSubsPending(t, sub, 0) 17494 } 17495 17496 func TestJetStreamDirectMsgGet(t *testing.T) { 17497 s := RunBasicJetStreamServer(t) 17498 defer s.Shutdown() 17499 17500 nc, _ := jsClientConnect(t, s) 17501 defer nc.Close() 17502 17503 // Do by hand for now. 17504 cfg := &StreamConfig{ 17505 Name: "DSMG", 17506 Storage: MemoryStorage, 17507 Subjects: []string{"foo", "bar", "baz"}, 17508 MaxMsgsPer: 1, 17509 AllowDirect: true, 17510 } 17511 addStream(t, nc, cfg) 17512 17513 sendStreamMsg(t, nc, "foo", "foo") 17514 sendStreamMsg(t, nc, "bar", "bar") 17515 sendStreamMsg(t, nc, "baz", "baz") 17516 17517 getSubj := fmt.Sprintf(JSDirectMsgGetT, "DSMG") 17518 getMsg := func(req *JSApiMsgGetRequest) *nats.Msg { 17519 var b []byte 17520 var err error 17521 if req != nil { 17522 b, err = json.Marshal(req) 17523 require_NoError(t, err) 17524 } 17525 m, err := nc.Request(getSubj, b, time.Second) 17526 require_NoError(t, err) 17527 return m 17528 } 17529 17530 m := getMsg(&JSApiMsgGetRequest{LastFor: "foo"}) 17531 require_True(t, string(m.Data) == "foo") 17532 require_True(t, m.Header.Get(JSStream) == "DSMG") 17533 require_True(t, m.Header.Get(JSSequence) == "1") 17534 require_True(t, m.Header.Get(JSSubject) == "foo") 17535 require_True(t, m.Subject != "foo") 17536 require_True(t, m.Header.Get(JSTimeStamp) != _EMPTY_) 17537 17538 m = getMsg(&JSApiMsgGetRequest{LastFor: "bar"}) 17539 require_True(t, string(m.Data) == "bar") 17540 require_True(t, m.Header.Get(JSStream) == "DSMG") 17541 require_True(t, m.Header.Get(JSSequence) == "2") 17542 require_True(t, m.Header.Get(JSSubject) == "bar") 17543 require_True(t, m.Subject != "bar") 17544 require_True(t, m.Header.Get(JSTimeStamp) != _EMPTY_) 17545 17546 m = getMsg(&JSApiMsgGetRequest{LastFor: "baz"}) 17547 require_True(t, string(m.Data) == "baz") 17548 require_True(t, m.Header.Get(JSStream) == "DSMG") 17549 require_True(t, m.Header.Get(JSSequence) == "3") 17550 require_True(t, m.Header.Get(JSSubject) == "baz") 17551 require_True(t, m.Subject != "baz") 17552 require_True(t, m.Header.Get(JSTimeStamp) != _EMPTY_) 17553 17554 // Test error conditions 17555 17556 // Nil request 17557 m = getMsg(nil) 17558 require_True(t, len(m.Data) == 0) 17559 require_True(t, m.Header.Get("Status") == "408") 17560 require_True(t, m.Header.Get("Description") == "Empty Request") 17561 17562 // Empty request 17563 m = getMsg(&JSApiMsgGetRequest{}) 17564 require_True(t, len(m.Data) == 0) 17565 require_True(t, m.Header.Get("Status") == "408") 17566 require_True(t, m.Header.Get("Description") == "Empty Request") 17567 17568 // Both set 17569 m = getMsg(&JSApiMsgGetRequest{Seq: 1, LastFor: "foo"}) 17570 require_True(t, len(m.Data) == 0) 17571 require_True(t, m.Header.Get("Status") == "408") 17572 require_True(t, m.Header.Get("Description") == "Bad Request") 17573 17574 // Not found 17575 m = getMsg(&JSApiMsgGetRequest{LastFor: "foobar"}) 17576 require_True(t, len(m.Data) == 0) 17577 require_True(t, m.Header.Get("Status") == "404") 17578 require_True(t, m.Header.Get("Description") == "Message Not Found") 17579 17580 m = getMsg(&JSApiMsgGetRequest{Seq: 22}) 17581 require_True(t, len(m.Data) == 0) 17582 require_True(t, m.Header.Get("Status") == "404") 17583 require_True(t, m.Header.Get("Description") == "Message Not Found") 17584 } 17585 17586 // This allows support for a get next given a sequence as a starting. 17587 // This allows these to be chained together if needed for sparse streams. 17588 func TestJetStreamDirectMsgGetNext(t *testing.T) { 17589 s := RunBasicJetStreamServer(t) 17590 defer s.Shutdown() 17591 17592 nc, _ := jsClientConnect(t, s) 17593 defer nc.Close() 17594 17595 // Do by hand for now. 17596 cfg := &StreamConfig{ 17597 Name: "DSMG", 17598 Storage: MemoryStorage, 17599 Subjects: []string{"foo", "bar", "baz"}, 17600 AllowDirect: true, 17601 } 17602 addStream(t, nc, cfg) 17603 17604 sendStreamMsg(t, nc, "foo", "foo") 17605 for i := 0; i < 10; i++ { 17606 sendStreamMsg(t, nc, "bar", "bar") 17607 } 17608 for i := 0; i < 10; i++ { 17609 sendStreamMsg(t, nc, "baz", "baz") 17610 } 17611 sendStreamMsg(t, nc, "foo", "foo") 17612 17613 getSubj := fmt.Sprintf(JSDirectMsgGetT, "DSMG") 17614 getMsg := func(seq uint64, subj string) *nats.Msg { 17615 req := []byte(fmt.Sprintf(`{"seq": %d, "next_by_subj": %q}`, seq, subj)) 17616 m, err := nc.Request(getSubj, req, time.Second) 17617 require_NoError(t, err) 17618 return m 17619 } 17620 17621 m := getMsg(0, "foo") 17622 require_True(t, m.Header.Get(JSSequence) == "1") 17623 require_True(t, m.Header.Get(JSSubject) == "foo") 17624 17625 m = getMsg(1, "foo") 17626 require_True(t, m.Header.Get(JSSequence) == "1") 17627 require_True(t, m.Header.Get(JSSubject) == "foo") 17628 17629 m = getMsg(2, "foo") 17630 require_True(t, m.Header.Get(JSSequence) == "22") 17631 require_True(t, m.Header.Get(JSSubject) == "foo") 17632 17633 m = getMsg(2, "bar") 17634 require_True(t, m.Header.Get(JSSequence) == "2") 17635 require_True(t, m.Header.Get(JSSubject) == "bar") 17636 17637 m = getMsg(5, "baz") 17638 require_True(t, m.Header.Get(JSSequence) == "12") 17639 require_True(t, m.Header.Get(JSSubject) == "baz") 17640 17641 m = getMsg(14, "baz") 17642 require_True(t, m.Header.Get(JSSequence) == "14") 17643 require_True(t, m.Header.Get(JSSubject) == "baz") 17644 } 17645 17646 func TestJetStreamConsumerAndStreamNamesWithPathSeparators(t *testing.T) { 17647 s := RunBasicJetStreamServer(t) 17648 defer s.Shutdown() 17649 17650 nc, js := jsClientConnect(t, s) 17651 defer nc.Close() 17652 17653 _, err := js.AddStream(&nats.StreamConfig{Name: "usr/bin"}) 17654 require_Error(t, err, NewJSStreamNameContainsPathSeparatorsError(), nats.ErrInvalidStreamName) 17655 _, err = js.AddStream(&nats.StreamConfig{Name: `Documents\readme.txt`}) 17656 require_Error(t, err, NewJSStreamNameContainsPathSeparatorsError(), nats.ErrInvalidStreamName) 17657 17658 // Now consumers. 17659 _, err = js.AddStream(&nats.StreamConfig{Name: "T"}) 17660 require_NoError(t, err) 17661 17662 _, err = js.AddConsumer("T", &nats.ConsumerConfig{Durable: "a/b", AckPolicy: nats.AckExplicitPolicy}) 17663 require_Error(t, err, NewJSConsumerNameContainsPathSeparatorsError(), nats.ErrInvalidConsumerName) 17664 17665 _, err = js.AddConsumer("T", &nats.ConsumerConfig{Durable: `a\b`, AckPolicy: nats.AckExplicitPolicy}) 17666 require_Error(t, err, NewJSConsumerNameContainsPathSeparatorsError(), nats.ErrInvalidConsumerName) 17667 } 17668 17669 func TestJetStreamConsumerUpdateFilterSubject(t *testing.T) { 17670 s := RunBasicJetStreamServer(t) 17671 defer s.Shutdown() 17672 17673 nc, js := jsClientConnect(t, s) 17674 defer nc.Close() 17675 17676 _, err := js.AddStream(&nats.StreamConfig{Name: "T", Subjects: []string{"foo", "bar", "baz"}}) 17677 require_NoError(t, err) 17678 17679 // 10 foo 17680 for i := 0; i < 10; i++ { 17681 js.PublishAsync("foo", []byte("OK")) 17682 } 17683 // 20 bar 17684 for i := 0; i < 20; i++ { 17685 js.PublishAsync("bar", []byte("OK")) 17686 } 17687 select { 17688 case <-js.PublishAsyncComplete(): 17689 case <-time.After(5 * time.Second): 17690 t.Fatalf("Did not receive completion signal") 17691 } 17692 17693 sub, err := js.PullSubscribe("foo", "d") 17694 require_NoError(t, err) 17695 17696 // Consume 5 msgs 17697 msgs, err := sub.Fetch(5) 17698 require_NoError(t, err) 17699 require_True(t, len(msgs) == 5) 17700 17701 // Now update to different filter subject. 17702 _, err = js.UpdateConsumer("T", &nats.ConsumerConfig{ 17703 Durable: "d", 17704 FilterSubject: "bar", 17705 AckPolicy: nats.AckExplicitPolicy, 17706 }) 17707 require_NoError(t, err) 17708 17709 sub, err = js.PullSubscribe("bar", "d") 17710 require_NoError(t, err) 17711 17712 msgs, err = sub.Fetch(1) 17713 require_NoError(t, err) 17714 17715 // Make sure meta and pending etc are all correct. 17716 m := msgs[0] 17717 meta, err := m.Metadata() 17718 require_NoError(t, err) 17719 17720 if meta.Sequence.Consumer != 6 || meta.Sequence.Stream != 11 { 17721 t.Fatalf("Sequence incorrect %+v", meta.Sequence) 17722 } 17723 if meta.NumDelivered != 1 { 17724 t.Fatalf("Expected NumDelivered to be 1, got %d", meta.NumDelivered) 17725 } 17726 if meta.NumPending != 19 { 17727 t.Fatalf("Expected NumPending to be 19, got %d", meta.NumPending) 17728 } 17729 } 17730 17731 // Originally pull consumers were FIFO with respect to the request, not delivery of messages. 17732 // We have changed to have the behavior be FIFO but on an individual message basis. 17733 // So after a message is delivered, the request, if still outstanding, effectively 17734 // goes to the end of the queue of requests pending. 17735 func TestJetStreamConsumerPullConsumerFIFO(t *testing.T) { 17736 s := RunBasicJetStreamServer(t) 17737 defer s.Shutdown() 17738 17739 nc, js := jsClientConnect(t, s) 17740 defer nc.Close() 17741 17742 _, err := js.AddStream(&nats.StreamConfig{Name: "T"}) 17743 require_NoError(t, err) 17744 17745 // Create pull consumer. 17746 _, err = js.AddConsumer("T", &nats.ConsumerConfig{Durable: "d", AckPolicy: nats.AckExplicitPolicy}) 17747 require_NoError(t, err) 17748 17749 // Simulate 10 pull requests each asking for 10 messages. 17750 var subs []*nats.Subscription 17751 for i := 0; i < 10; i++ { 17752 inbox := nats.NewInbox() 17753 sub := natsSubSync(t, nc, inbox) 17754 subs = append(subs, sub) 17755 req := &JSApiConsumerGetNextRequest{Batch: 10, Expires: 60 * time.Second} 17756 jreq, err := json.Marshal(req) 17757 require_NoError(t, err) 17758 err = nc.PublishRequest(fmt.Sprintf(JSApiRequestNextT, "T", "d"), inbox, jreq) 17759 require_NoError(t, err) 17760 } 17761 17762 // Now send 100 messages. 17763 for i := 0; i < 100; i++ { 17764 js.PublishAsync("T", []byte("FIFO FTW!")) 17765 } 17766 select { 17767 case <-js.PublishAsyncComplete(): 17768 case <-time.After(5 * time.Second): 17769 t.Fatalf("Did not receive completion signal") 17770 } 17771 17772 // Wait for messages 17773 for index, sub := range subs { 17774 checkSubsPending(t, sub, 10) 17775 for i := 0; i < 10; i++ { 17776 m, err := sub.NextMsg(time.Second) 17777 require_NoError(t, err) 17778 meta, err := m.Metadata() 17779 require_NoError(t, err) 17780 // We expect these to be FIFO per message. E.g. sub #1 = [1, 11, 21, 31, ..] 17781 if sseq := meta.Sequence.Stream; sseq != uint64(index+1+(10*i)) { 17782 t.Fatalf("Expected message #%d for sub #%d to be %d, but got %d", i+1, index+1, index+1+(10*i), sseq) 17783 } 17784 } 17785 } 17786 } 17787 17788 // Make sure that when we reach an ack limit that we follow one shot semantics. 17789 func TestJetStreamConsumerPullConsumerOneShotOnMaxAckLimit(t *testing.T) { 17790 s := RunBasicJetStreamServer(t) 17791 defer s.Shutdown() 17792 17793 nc, js := jsClientConnect(t, s) 17794 defer nc.Close() 17795 17796 _, err := js.AddStream(&nats.StreamConfig{Name: "T"}) 17797 require_NoError(t, err) 17798 17799 for i := 0; i < 10; i++ { 17800 js.Publish("T", []byte("OK")) 17801 } 17802 17803 sub, err := js.PullSubscribe("T", "d", nats.MaxAckPending(5)) 17804 require_NoError(t, err) 17805 17806 start := time.Now() 17807 msgs, err := sub.Fetch(10, nats.MaxWait(2*time.Second)) 17808 require_NoError(t, err) 17809 17810 if elapsed := time.Since(start); elapsed >= 2*time.Second { 17811 t.Fatalf("Took too long, not one shot behavior: %v", elapsed) 17812 } 17813 17814 if len(msgs) != 5 { 17815 t.Fatalf("Expected 5 msgs, got %d", len(msgs)) 17816 } 17817 } 17818 17819 /////////////////////////////////////////////////////////////////////////// 17820 // Simple JetStream Benchmarks 17821 /////////////////////////////////////////////////////////////////////////// 17822 17823 func Benchmark__JetStreamPubWithAck(b *testing.B) { 17824 s := RunBasicJetStreamServer(b) 17825 defer s.Shutdown() 17826 17827 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: "foo"}) 17828 if err != nil { 17829 b.Fatalf("Unexpected error adding stream: %v", err) 17830 } 17831 defer mset.delete() 17832 17833 nc, err := nats.Connect(s.ClientURL()) 17834 if err != nil { 17835 b.Fatalf("Failed to create client: %v", err) 17836 } 17837 defer nc.Close() 17838 17839 b.ResetTimer() 17840 for i := 0; i < b.N; i++ { 17841 nc.Request("foo", []byte("Hello World!"), 50*time.Millisecond) 17842 } 17843 b.StopTimer() 17844 17845 state := mset.state() 17846 if int(state.Msgs) != b.N { 17847 b.Fatalf("Expected %d messages, got %d", b.N, state.Msgs) 17848 } 17849 } 17850 17851 func Benchmark____JetStreamPubNoAck(b *testing.B) { 17852 s := RunBasicJetStreamServer(b) 17853 defer s.Shutdown() 17854 17855 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: "foo"}) 17856 if err != nil { 17857 b.Fatalf("Unexpected error adding stream: %v", err) 17858 } 17859 defer mset.delete() 17860 17861 nc, err := nats.Connect(s.ClientURL()) 17862 if err != nil { 17863 b.Fatalf("Failed to create client: %v", err) 17864 } 17865 defer nc.Close() 17866 17867 b.ResetTimer() 17868 for i := 0; i < b.N; i++ { 17869 if err := nc.Publish("foo", []byte("Hello World!")); err != nil { 17870 b.Fatalf("Unexpected error: %v", err) 17871 } 17872 } 17873 nc.Flush() 17874 b.StopTimer() 17875 17876 state := mset.state() 17877 if int(state.Msgs) != b.N { 17878 b.Fatalf("Expected %d messages, got %d", b.N, state.Msgs) 17879 } 17880 } 17881 17882 func Benchmark_JetStreamPubAsyncAck(b *testing.B) { 17883 s := RunBasicJetStreamServer(b) 17884 defer s.Shutdown() 17885 17886 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: "foo"}) 17887 if err != nil { 17888 b.Fatalf("Unexpected error adding stream: %v", err) 17889 } 17890 defer mset.delete() 17891 17892 nc, err := nats.Connect(s.ClientURL(), nats.NoReconnect()) 17893 if err != nil { 17894 b.Fatalf("Failed to create client: %v", err) 17895 } 17896 defer nc.Close() 17897 17898 // Put ack stream on its own connection. 17899 anc, err := nats.Connect(s.ClientURL()) 17900 if err != nil { 17901 b.Fatalf("Failed to create client: %v", err) 17902 } 17903 defer anc.Close() 17904 17905 acks := nats.NewInbox() 17906 sub, _ := anc.Subscribe(acks, func(m *nats.Msg) { 17907 // Just eat them for this test. 17908 }) 17909 // set max pending to unlimited. 17910 sub.SetPendingLimits(-1, -1) 17911 defer sub.Unsubscribe() 17912 17913 anc.Flush() 17914 runtime.GC() 17915 17916 b.ResetTimer() 17917 for i := 0; i < b.N; i++ { 17918 if err := nc.PublishRequest("foo", acks, []byte("Hello World!")); err != nil { 17919 b.Fatalf("[%d] Unexpected error: %v", i, err) 17920 } 17921 } 17922 nc.Flush() 17923 b.StopTimer() 17924 17925 state := mset.state() 17926 if int(state.Msgs) != b.N { 17927 b.Fatalf("Expected %d messages, got %d", b.N, state.Msgs) 17928 } 17929 } 17930 17931 func Benchmark____JetStreamSubNoAck(b *testing.B) { 17932 if b.N < 10000 { 17933 return 17934 } 17935 17936 s := RunBasicJetStreamServer(b) 17937 defer s.Shutdown() 17938 17939 mname := "foo" 17940 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: mname}) 17941 if err != nil { 17942 b.Fatalf("Unexpected error adding stream: %v", err) 17943 } 17944 defer mset.delete() 17945 17946 nc, err := nats.Connect(s.ClientURL(), nats.NoReconnect()) 17947 if err != nil { 17948 b.Fatalf("Failed to create client: %v", err) 17949 } 17950 defer nc.Close() 17951 17952 // Queue up messages. 17953 for i := 0; i < b.N; i++ { 17954 nc.Publish(mname, []byte("Hello World!")) 17955 } 17956 nc.Flush() 17957 17958 state := mset.state() 17959 if state.Msgs != uint64(b.N) { 17960 b.Fatalf("Expected %d messages, got %d", b.N, state.Msgs) 17961 } 17962 17963 total := int32(b.N) 17964 received := int32(0) 17965 done := make(chan bool) 17966 17967 deliverTo := "DM" 17968 oname := "O" 17969 17970 nc.Subscribe(deliverTo, func(m *nats.Msg) { 17971 // We only are done when we receive all, we could check for gaps too. 17972 if atomic.AddInt32(&received, 1) >= total { 17973 done <- true 17974 } 17975 }) 17976 nc.Flush() 17977 17978 b.ResetTimer() 17979 o, err := mset.addConsumer(&ConsumerConfig{DeliverSubject: deliverTo, Durable: oname, AckPolicy: AckNone}) 17980 if err != nil { 17981 b.Fatalf("Expected no error with registered interest, got %v", err) 17982 } 17983 defer o.delete() 17984 <-done 17985 b.StopTimer() 17986 } 17987 17988 func benchJetStreamWorkersAndBatch(b *testing.B, numWorkers, batchSize int) { 17989 // Avoid running at too low of numbers since that chews up memory and GC. 17990 if b.N < numWorkers*batchSize { 17991 return 17992 } 17993 17994 s := RunBasicJetStreamServer(b) 17995 defer s.Shutdown() 17996 17997 mname := "MSET22" 17998 mset, err := s.GlobalAccount().addStream(&StreamConfig{Name: mname}) 17999 if err != nil { 18000 b.Fatalf("Unexpected error adding stream: %v", err) 18001 } 18002 defer mset.delete() 18003 18004 nc, err := nats.Connect(s.ClientURL(), nats.NoReconnect()) 18005 if err != nil { 18006 b.Fatalf("Failed to create client: %v", err) 18007 } 18008 defer nc.Close() 18009 18010 // Queue up messages. 18011 for i := 0; i < b.N; i++ { 18012 nc.Publish(mname, []byte("Hello World!")) 18013 } 18014 nc.Flush() 18015 18016 state := mset.state() 18017 if state.Msgs != uint64(b.N) { 18018 b.Fatalf("Expected %d messages, got %d", b.N, state.Msgs) 18019 } 18020 18021 // Create basic work queue mode consumer. 18022 oname := "WQ" 18023 o, err := mset.addConsumer(&ConsumerConfig{Durable: oname, AckPolicy: AckExplicit}) 18024 if err != nil { 18025 b.Fatalf("Expected no error with registered interest, got %v", err) 18026 } 18027 defer o.delete() 18028 18029 total := int32(b.N) 18030 received := int32(0) 18031 start := make(chan bool) 18032 done := make(chan bool) 18033 18034 batchSizeMsg := []byte(strconv.Itoa(batchSize)) 18035 reqNextMsgSubj := o.requestNextMsgSubject() 18036 18037 for i := 0; i < numWorkers; i++ { 18038 nc, err := nats.Connect(s.ClientURL(), nats.NoReconnect()) 18039 if err != nil { 18040 b.Fatalf("Failed to create client: %v", err) 18041 } 18042 defer nc.Close() 18043 18044 deliverTo := nats.NewInbox() 18045 nc.Subscribe(deliverTo, func(m *nats.Msg) { 18046 if atomic.AddInt32(&received, 1) >= total { 18047 done <- true 18048 } 18049 // Ack + Next request. 18050 nc.PublishRequest(m.Reply, deliverTo, AckNext) 18051 }) 18052 nc.Flush() 18053 go func() { 18054 <-start 18055 nc.PublishRequest(reqNextMsgSubj, deliverTo, batchSizeMsg) 18056 }() 18057 } 18058 18059 b.ResetTimer() 18060 close(start) 18061 <-done 18062 b.StopTimer() 18063 } 18064 18065 func Benchmark___JetStream1x1Worker(b *testing.B) { 18066 benchJetStreamWorkersAndBatch(b, 1, 1) 18067 } 18068 18069 func Benchmark__JetStream1x1kWorker(b *testing.B) { 18070 benchJetStreamWorkersAndBatch(b, 1, 1024) 18071 } 18072 18073 func Benchmark_JetStream10x1kWorker(b *testing.B) { 18074 benchJetStreamWorkersAndBatch(b, 10, 1024) 18075 } 18076 18077 func Benchmark_JetStream4x512Worker(b *testing.B) { 18078 benchJetStreamWorkersAndBatch(b, 4, 512) 18079 } 18080 18081 func TestJetStreamKVMemoryStorePerf(t *testing.T) { 18082 // Comment out to run, holding place for now. 18083 t.SkipNow() 18084 18085 s := RunBasicJetStreamServer(t) 18086 defer s.Shutdown() 18087 18088 nc, js := jsClientConnect(t, s) 18089 defer nc.Close() 18090 18091 kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "TEST", History: 1, Storage: nats.MemoryStorage}) 18092 require_NoError(t, err) 18093 18094 start := time.Now() 18095 for i := 0; i < 100_000; i++ { 18096 _, err := kv.PutString(fmt.Sprintf("foo.%d", i), "HELLO") 18097 require_NoError(t, err) 18098 } 18099 fmt.Printf("Took %v for first run\n", time.Since(start)) 18100 18101 start = time.Now() 18102 for i := 0; i < 100_000; i++ { 18103 _, err := kv.PutString(fmt.Sprintf("foo.%d", i), "HELLO WORLD") 18104 require_NoError(t, err) 18105 } 18106 fmt.Printf("Took %v for second run\n", time.Since(start)) 18107 18108 start = time.Now() 18109 for i := 0; i < 100_000; i++ { 18110 _, err := kv.Get(fmt.Sprintf("foo.%d", i)) 18111 require_NoError(t, err) 18112 } 18113 fmt.Printf("Took %v for get\n", time.Since(start)) 18114 } 18115 18116 func TestJetStreamKVMemoryStoreDirectGetPerf(t *testing.T) { 18117 // Comment out to run, holding place for now. 18118 t.SkipNow() 18119 18120 s := RunBasicJetStreamServer(t) 18121 defer s.Shutdown() 18122 18123 nc, js := jsClientConnect(t, s) 18124 defer nc.Close() 18125 18126 cfg := &StreamConfig{ 18127 Name: "TEST", 18128 Storage: MemoryStorage, 18129 Subjects: []string{"foo.*"}, 18130 MaxMsgsPer: 1, 18131 AllowDirect: true, 18132 } 18133 addStream(t, nc, cfg) 18134 18135 start := time.Now() 18136 for i := 0; i < 100_000; i++ { 18137 _, err := js.Publish(fmt.Sprintf("foo.%d", i), []byte("HELLO")) 18138 require_NoError(t, err) 18139 } 18140 fmt.Printf("Took %v for put\n", time.Since(start)) 18141 18142 getSubj := fmt.Sprintf(JSDirectMsgGetT, "TEST") 18143 18144 const tmpl = "{\"last_by_subj\":%q}" 18145 18146 start = time.Now() 18147 for i := 0; i < 100_000; i++ { 18148 req := []byte(fmt.Sprintf(tmpl, fmt.Sprintf("foo.%d", i))) 18149 _, err := nc.Request(getSubj, req, time.Second) 18150 require_NoError(t, err) 18151 } 18152 fmt.Printf("Took %v for get\n", time.Since(start)) 18153 } 18154 18155 func TestJetStreamMultiplePullPerf(t *testing.T) { 18156 skip(t) 18157 18158 s := RunBasicJetStreamServer(t) 18159 defer s.Shutdown() 18160 18161 nc, js := jsClientConnect(t, s) 18162 defer nc.Close() 18163 18164 js.AddStream(&nats.StreamConfig{Name: "mp22", Storage: nats.FileStorage}) 18165 defer js.DeleteStream("mp22") 18166 18167 n, msg := 1_000_000, []byte("OK") 18168 for i := 0; i < n; i++ { 18169 js.PublishAsync("mp22", msg) 18170 } 18171 select { 18172 case <-js.PublishAsyncComplete(): 18173 case <-time.After(10 * time.Second): 18174 t.Fatalf("Did not receive completion signal") 18175 } 18176 18177 si, err := js.StreamInfo("mp22") 18178 require_NoError(t, err) 18179 18180 fmt.Printf("msgs: %d, total_bytes: %v\n", si.State.Msgs, friendlyBytes(int64(si.State.Bytes))) 18181 18182 // 10 pull subscribers each asking for 100 msgs. 18183 _, err = js.AddConsumer("mp22", &nats.ConsumerConfig{ 18184 Durable: "d", 18185 MaxAckPending: 8_000, 18186 AckPolicy: nats.AckExplicitPolicy, 18187 }) 18188 require_NoError(t, err) 18189 18190 startCh := make(chan bool) 18191 var wg sync.WaitGroup 18192 18193 np, bs := 10, 100 18194 18195 count := 0 18196 18197 for i := 0; i < np; i++ { 18198 nc, js := jsClientConnect(t, s) 18199 defer nc.Close() 18200 sub, err := js.PullSubscribe("mp22", "d") 18201 require_NoError(t, err) 18202 18203 wg.Add(1) 18204 go func(sub *nats.Subscription) { 18205 defer wg.Done() 18206 <-startCh 18207 for i := 0; i < n/(np*bs); i++ { 18208 msgs, err := sub.Fetch(bs) 18209 if err != nil { 18210 t.Logf("Got error on pull: %v", err) 18211 return 18212 } 18213 if len(msgs) != bs { 18214 t.Logf("Expected %d msgs, got %d", bs, len(msgs)) 18215 return 18216 } 18217 count += len(msgs) 18218 for _, m := range msgs { 18219 m.Ack() 18220 } 18221 } 18222 }(sub) 18223 } 18224 18225 start := time.Now() 18226 close(startCh) 18227 wg.Wait() 18228 18229 tt := time.Since(start) 18230 fmt.Printf("Took %v to receive %d msgs [%d]\n", tt, n, count) 18231 fmt.Printf("%.0f msgs/s\n", float64(n)/tt.Seconds()) 18232 fmt.Printf("%.0f mb/s\n\n", float64(si.State.Bytes/(1024*1024))/tt.Seconds()) 18233 } 18234 18235 func TestJetStreamMirrorUpdatesNotSupported(t *testing.T) { 18236 s := RunBasicJetStreamServer(t) 18237 defer s.Shutdown() 18238 18239 nc, js := jsClientConnect(t, s) 18240 defer nc.Close() 18241 18242 _, err := js.AddStream(&nats.StreamConfig{Name: "SOURCE"}) 18243 require_NoError(t, err) 18244 18245 cfg := &nats.StreamConfig{ 18246 Name: "M", 18247 Mirror: &nats.StreamSource{Name: "SOURCE"}, 18248 } 18249 _, err = js.AddStream(cfg) 18250 require_NoError(t, err) 18251 18252 cfg.Mirror = nil 18253 _, err = js.UpdateStream(cfg) 18254 require_Error(t, err, NewJSStreamMirrorNotUpdatableError()) 18255 } 18256 18257 func TestJetStreamMirrorFirstSeqNotSupported(t *testing.T) { 18258 s := RunBasicJetStreamServer(t) 18259 defer s.Shutdown() 18260 18261 _, err := s.gacc.addStream(&StreamConfig{Name: "SOURCE"}) 18262 require_NoError(t, err) 18263 18264 cfg := &StreamConfig{ 18265 Name: "M", 18266 Mirror: &StreamSource{Name: "SOURCE"}, 18267 FirstSeq: 123, 18268 } 18269 _, err = s.gacc.addStream(cfg) 18270 require_Error(t, err, NewJSMirrorWithFirstSeqError()) 18271 } 18272 18273 func TestJetStreamDirectGetBySubject(t *testing.T) { 18274 conf := createConfFile(t, []byte(fmt.Sprintf(` 18275 listen: 127.0.0.1:-1 18276 jetstream: {max_mem_store: 64GB, max_file_store: 10TB, store_dir: %q} 18277 18278 ONLYME = { 18279 publish = { allow = "$JS.API.DIRECT.GET.KV.vid.22.>"} 18280 } 18281 18282 accounts: { 18283 A: { 18284 jetstream: enabled 18285 users: [ 18286 { user: admin, password: s3cr3t }, 18287 { user: user, password: pwd, permissions: $ONLYME}, 18288 ] 18289 }, 18290 } 18291 `, t.TempDir()))) 18292 18293 s, _ := RunServerWithConfig(conf) 18294 defer s.Shutdown() 18295 18296 nc, js := jsClientConnect(t, s, nats.UserInfo("admin", "s3cr3t")) 18297 defer nc.Close() 18298 18299 // Do by hand for now. 18300 cfg := &StreamConfig{ 18301 Name: "KV", 18302 Storage: MemoryStorage, 18303 Subjects: []string{"vid.*.>"}, 18304 MaxMsgsPer: 1, 18305 AllowDirect: true, 18306 } 18307 addStream(t, nc, cfg) 18308 18309 // Add in mirror as well. 18310 cfg = &StreamConfig{ 18311 Name: "M", 18312 Storage: MemoryStorage, 18313 Mirror: &StreamSource{Name: "KV"}, 18314 MirrorDirect: true, 18315 } 18316 addStream(t, nc, cfg) 18317 18318 v22 := "vid.22.speed" 18319 v33 := "vid.33.speed" 18320 _, err := js.Publish(v22, []byte("100")) 18321 require_NoError(t, err) 18322 _, err = js.Publish(v33, []byte("55")) 18323 require_NoError(t, err) 18324 18325 // User the restricted user. 18326 nc, _ = jsClientConnect(t, s, nats.UserInfo("user", "pwd")) 18327 defer nc.Close() 18328 18329 errCh := make(chan error, 10) 18330 nc.SetErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, e error) { 18331 select { 18332 case errCh <- e: 18333 default: 18334 } 18335 }) 18336 18337 getSubj := fmt.Sprintf(JSDirectGetLastBySubjectT, "KV", v22) 18338 m, err := nc.Request(getSubj, nil, time.Second) 18339 require_NoError(t, err) 18340 require_True(t, string(m.Data) == "100") 18341 18342 // Now attempt to access vid 33 data.. 18343 getSubj = fmt.Sprintf(JSDirectGetLastBySubjectT, "KV", v33) 18344 _, err = nc.Request(getSubj, nil, 200*time.Millisecond) 18345 require_Error(t, err) // timeout here. 18346 18347 select { 18348 case e := <-errCh: 18349 if !strings.HasPrefix(e.Error(), "nats: Permissions Violation") { 18350 t.Fatalf("Expected a permissions violation but got %v", e) 18351 } 18352 case <-time.After(time.Second): 18353 t.Fatalf("Expected to get a permissions error, got none") 18354 } 18355 18356 // Now make sure mirrors are doing right thing with new way as well. 18357 var sawMirror bool 18358 getSubj = fmt.Sprintf(JSDirectGetLastBySubjectT, "KV", v22) 18359 for i := 0; i < 100; i++ { 18360 m, err := nc.Request(getSubj, nil, time.Second) 18361 require_NoError(t, err) 18362 if shdr := m.Header.Get(JSStream); shdr == "M" { 18363 sawMirror = true 18364 break 18365 } 18366 } 18367 if !sawMirror { 18368 t.Fatalf("Expected to see the mirror respond at least once") 18369 } 18370 } 18371 18372 func TestJetStreamProperErrorDueToOverlapSubjects(t *testing.T) { 18373 s := RunBasicJetStreamServer(t) 18374 defer s.Shutdown() 18375 18376 c := createJetStreamClusterExplicit(t, "R3S", 3) 18377 defer c.shutdown() 18378 18379 test := func(t *testing.T, s *Server) { 18380 nc, js := jsClientConnect(t, s) 18381 defer nc.Close() 18382 18383 _, err := js.AddStream(&nats.StreamConfig{ 18384 Name: "TEST", 18385 Subjects: []string{"foo.*"}, 18386 }) 18387 require_NoError(t, err) 18388 18389 // Now do this by end since we want to check the error returned. 18390 sc := &nats.StreamConfig{ 18391 Name: "TEST2", 18392 Subjects: []string{"foo.>"}, 18393 } 18394 req, _ := json.Marshal(sc) 18395 msg, err := nc.Request(fmt.Sprintf(JSApiStreamCreateT, sc.Name), req, time.Second) 18396 require_NoError(t, err) 18397 18398 var scResp JSApiStreamCreateResponse 18399 err = json.Unmarshal(msg.Data, &scResp) 18400 require_NoError(t, err) 18401 18402 if scResp.Error == nil || !IsNatsErr(scResp.Error, JSStreamSubjectOverlapErr) { 18403 t.Fatalf("Did not receive correct error: %+v", scResp) 18404 } 18405 } 18406 18407 t.Run("standalone", func(t *testing.T) { test(t, s) }) 18408 t.Run("clustered", func(t *testing.T) { test(t, c.randomServer()) }) 18409 } 18410 18411 func TestJetStreamServerCipherConvert(t *testing.T) { 18412 tmpl := ` 18413 server_name: S22 18414 listen: 127.0.0.1:-1 18415 jetstream: {key: s3cr3t, store_dir: '%s', cipher: %s} 18416 ` 18417 storeDir := t.TempDir() 18418 18419 // Create a stream and a consumer under one cipher, and restart the server with a new cipher. 18420 conf := createConfFile(t, []byte(fmt.Sprintf(tmpl, storeDir, "AES"))) 18421 18422 s, _ := RunServerWithConfig(conf) 18423 defer s.Shutdown() 18424 18425 // Client based API 18426 nc, js := jsClientConnect(t, s) 18427 defer nc.Close() 18428 18429 cfg := &nats.StreamConfig{ 18430 Name: "TEST", 18431 Subjects: []string{"foo"}, 18432 } 18433 if _, err := js.AddStream(cfg); err != nil { 18434 t.Fatalf("Unexpected error: %v", err) 18435 } 18436 18437 for i := 0; i < 1000; i++ { 18438 msg := []byte(fmt.Sprintf("TOP SECRET DOCUMENT #%d", i+1)) 18439 _, err := js.Publish("foo", msg) 18440 require_NoError(t, err) 18441 } 18442 18443 // Make sure consumers convert as well. 18444 sub, err := js.PullSubscribe("foo", "dlc") 18445 require_NoError(t, err) 18446 for _, m := range fetchMsgs(t, sub, 100, 5*time.Second) { 18447 m.AckSync() 18448 } 18449 18450 si, err := js.StreamInfo("TEST") 18451 require_NoError(t, err) 18452 18453 ci, err := js.ConsumerInfo("TEST", "dlc") 18454 require_NoError(t, err) 18455 18456 // Stop current 18457 s.Shutdown() 18458 18459 conf = createConfFile(t, []byte(fmt.Sprintf(tmpl, storeDir, "ChaCha"))) 18460 18461 s, _ = RunServerWithConfig(conf) 18462 defer s.Shutdown() 18463 18464 nc, js = jsClientConnect(t, s) 18465 defer nc.Close() 18466 18467 si2, err := js.StreamInfo("TEST") 18468 require_NoError(t, err) 18469 18470 if !reflect.DeepEqual(si, si2) { 18471 t.Fatalf("Stream infos did not match\n%+v\nvs\n%+v", si, si2) 18472 } 18473 18474 ci2, err := js.ConsumerInfo("TEST", "dlc") 18475 require_NoError(t, err) 18476 18477 // Consumer create times can be slightly off after restore from disk. 18478 now := time.Now() 18479 ci.Created, ci2.Created = now, now 18480 ci.Delivered.Last, ci2.Delivered.Last = nil, nil 18481 ci.AckFloor.Last, ci2.AckFloor.Last = nil, nil 18482 // Also clusters will be different. 18483 ci.Cluster, ci2.Cluster = nil, nil 18484 18485 if !reflect.DeepEqual(ci, ci2) { 18486 t.Fatalf("Consumer infos did not match\n%+v\nvs\n%+v", ci, ci2) 18487 } 18488 } 18489 18490 func TestJetStreamConsumerDeliverNewMaxRedeliveriesAndServerRestart(t *testing.T) { 18491 s := RunBasicJetStreamServer(t) 18492 defer s.Shutdown() 18493 18494 nc, js := jsClientConnect(t, s) 18495 defer nc.Close() 18496 18497 _, err := js.AddStream(&nats.StreamConfig{ 18498 Name: "TEST", 18499 Subjects: []string{"foo.*"}, 18500 }) 18501 require_NoError(t, err) 18502 18503 inbox := nats.NewInbox() 18504 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 18505 DeliverSubject: inbox, 18506 Durable: "dur", 18507 AckPolicy: nats.AckExplicitPolicy, 18508 DeliverPolicy: nats.DeliverNewPolicy, 18509 MaxDeliver: 3, 18510 AckWait: 250 * time.Millisecond, 18511 FilterSubject: "foo.bar", 18512 }) 18513 require_NoError(t, err) 18514 18515 sendStreamMsg(t, nc, "foo.bar", "msg") 18516 18517 sub := natsSubSync(t, nc, inbox) 18518 for i := 0; i < 3; i++ { 18519 natsNexMsg(t, sub, time.Second) 18520 } 18521 // Now check that there is no more redeliveries 18522 if msg, err := sub.NextMsg(300 * time.Millisecond); err != nats.ErrTimeout { 18523 t.Fatalf("Expected timeout, got msg=%+v err=%v", msg, err) 18524 } 18525 18526 // Give a chance to things to be persisted 18527 time.Sleep(300 * time.Millisecond) 18528 18529 // Check server restart 18530 nc.Close() 18531 sd := s.JetStreamConfig().StoreDir 18532 s.Shutdown() 18533 s = RunJetStreamServerOnPort(-1, sd) 18534 defer s.Shutdown() 18535 18536 nc, _ = jsClientConnect(t, s) 18537 defer nc.Close() 18538 18539 sub = natsSubSync(t, nc, inbox) 18540 // We should not have messages being redelivered. 18541 if msg, err := sub.NextMsg(300 * time.Millisecond); err != nats.ErrTimeout { 18542 t.Fatalf("Expected timeout, got msg=%+v err=%v", msg, err) 18543 } 18544 } 18545 18546 func TestJetStreamConsumerPendingLowerThanStreamFirstSeq(t *testing.T) { 18547 s := RunBasicJetStreamServer(t) 18548 defer s.Shutdown() 18549 18550 nc, js := jsClientConnect(t, s) 18551 defer nc.Close() 18552 18553 _, err := js.AddStream(&nats.StreamConfig{ 18554 Name: "TEST", 18555 Subjects: []string{"foo"}, 18556 }) 18557 require_NoError(t, err) 18558 18559 for i := 0; i < 100; i++ { 18560 sendStreamMsg(t, nc, "foo", "msg") 18561 } 18562 18563 inbox := nats.NewInbox() 18564 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 18565 DeliverSubject: inbox, 18566 Durable: "dur", 18567 AckPolicy: nats.AckExplicitPolicy, 18568 DeliverPolicy: nats.DeliverAllPolicy, 18569 }) 18570 require_NoError(t, err) 18571 18572 sub := natsSubSync(t, nc, inbox) 18573 for i := 0; i < 10; i++ { 18574 natsNexMsg(t, sub, time.Second) 18575 } 18576 18577 acc, err := s.lookupAccount(globalAccountName) 18578 require_NoError(t, err) 18579 mset, err := acc.lookupStream("TEST") 18580 require_NoError(t, err) 18581 o := mset.lookupConsumer("dur") 18582 require_True(t, o != nil) 18583 o.stop() 18584 mset.store.Compact(1_000_000) 18585 nc.Close() 18586 18587 sd := s.JetStreamConfig().StoreDir 18588 s.Shutdown() 18589 s = RunJetStreamServerOnPort(-1, sd) 18590 defer s.Shutdown() 18591 18592 nc, js = jsClientConnect(t, s) 18593 defer nc.Close() 18594 18595 si, err := js.StreamInfo("TEST") 18596 require_NoError(t, err) 18597 require_True(t, si.State.FirstSeq == 1_000_000) 18598 require_True(t, si.State.LastSeq == 999_999) 18599 18600 natsSubSync(t, nc, inbox) 18601 checkFor(t, 2*time.Second, 15*time.Millisecond, func() error { 18602 ci, err := js.ConsumerInfo("TEST", "dur") 18603 if err != nil { 18604 return err 18605 } 18606 if ci.NumAckPending != 0 { 18607 return fmt.Errorf("NumAckPending should be 0, got %v", ci.NumAckPending) 18608 } 18609 if ci.Delivered.Stream != 999_999 { 18610 return fmt.Errorf("Delivered.Stream should be 999,999, got %v", ci.Delivered.Stream) 18611 } 18612 return nil 18613 }) 18614 } 18615 18616 func TestJetStreamAllowDirectAfterUpdate(t *testing.T) { 18617 s := RunBasicJetStreamServer(t) 18618 defer s.Shutdown() 18619 18620 nc, js := jsClientConnect(t, s) 18621 defer nc.Close() 18622 18623 _, err := js.AddStream(&nats.StreamConfig{ 18624 Name: "TEST", 18625 Subjects: []string{"*"}, 18626 }) 18627 require_NoError(t, err) 18628 sendStreamMsg(t, nc, "foo", "msg") 18629 18630 si, err := js.UpdateStream(&nats.StreamConfig{ 18631 Name: "TEST", 18632 Subjects: []string{"*"}, 18633 AllowDirect: true, 18634 }) 18635 require_NoError(t, err) 18636 require_True(t, si.Config.AllowDirect) 18637 18638 _, err = js.GetLastMsg("TEST", "foo", nats.DirectGet(), nats.MaxWait(100*time.Millisecond)) 18639 require_NoError(t, err) 18640 18641 // Make sure turning off works too. 18642 si, err = js.UpdateStream(&nats.StreamConfig{ 18643 Name: "TEST", 18644 Subjects: []string{"*"}, 18645 AllowDirect: false, 18646 }) 18647 require_NoError(t, err) 18648 require_False(t, si.Config.AllowDirect) 18649 18650 _, err = js.GetLastMsg("TEST", "foo", nats.DirectGet(), nats.MaxWait(100*time.Millisecond)) 18651 require_Error(t, err) 18652 } 18653 18654 // Bug when stream's consumer config does not force filestore to track per subject information. 18655 func TestJetStreamConsumerEOFBugNewFileStore(t *testing.T) { 18656 s := RunBasicJetStreamServer(t) 18657 defer s.Shutdown() 18658 18659 nc, js := jsClientConnect(t, s) 18660 defer nc.Close() 18661 18662 _, err := js.AddStream(&nats.StreamConfig{ 18663 Name: "TEST", 18664 Subjects: []string{"foo.bar.*"}, 18665 }) 18666 require_NoError(t, err) 18667 18668 _, err = js.AddStream(&nats.StreamConfig{ 18669 Name: "M", 18670 Mirror: &nats.StreamSource{Name: "TEST"}, 18671 }) 18672 require_NoError(t, err) 18673 18674 dsubj := nats.NewInbox() 18675 sub, err := nc.SubscribeSync(dsubj) 18676 require_NoError(t, err) 18677 nc.Flush() 18678 18679 // Filter needs to be a wildcard. Need to bind to the 18680 _, err = js.AddConsumer("M", &nats.ConsumerConfig{DeliverSubject: dsubj, FilterSubject: "foo.>"}) 18681 require_NoError(t, err) 18682 18683 for i := 0; i < 100; i++ { 18684 _, err := js.PublishAsync("foo.bar.baz", []byte("OK")) 18685 require_NoError(t, err) 18686 } 18687 18688 for i := 0; i < 100; i++ { 18689 m, err := sub.NextMsg(time.Second) 18690 require_NoError(t, err) 18691 m.Respond(nil) 18692 } 18693 18694 // Now force an expiration. 18695 mset, err := s.GlobalAccount().lookupStream("M") 18696 require_NoError(t, err) 18697 mset.mu.RLock() 18698 store := mset.store.(*fileStore) 18699 mset.mu.RUnlock() 18700 store.mu.RLock() 18701 mb := store.blks[0] 18702 store.mu.RUnlock() 18703 mb.mu.Lock() 18704 mb.fss = nil 18705 mb.mu.Unlock() 18706 18707 // Now send another message. 18708 _, err = js.PublishAsync("foo.bar.baz", []byte("OK")) 18709 require_NoError(t, err) 18710 18711 // This will fail with the bug. 18712 _, err = sub.NextMsg(time.Second) 18713 require_NoError(t, err) 18714 } 18715 18716 func TestJetStreamSubjectBasedFilteredConsumers(t *testing.T) { 18717 conf := createConfFile(t, []byte(fmt.Sprintf(` 18718 listen: 127.0.0.1:-1 18719 jetstream: {max_mem_store: 64GB, max_file_store: 10TB, store_dir: %q} 18720 accounts: { 18721 A: { 18722 jetstream: enabled 18723 users: [ { 18724 user: u, 18725 password: p 18726 permissions { 18727 publish { 18728 allow: [ 18729 'ID.>', 18730 '$JS.API.INFO', 18731 '$JS.API.STREAM.>', 18732 '$JS.API.CONSUMER.INFO.>', 18733 '$JS.API.CONSUMER.CREATE.TEST.VIN-xxx.ID.foo.>', # Only allow ID.foo. 18734 ] 18735 deny: [ '$JS.API.CONSUMER.CREATE.*', '$JS.API.CONSUMER.DURABLE.CREATE.*.*'] 18736 } 18737 } 18738 } ] 18739 }, 18740 } 18741 `, t.TempDir()))) 18742 18743 s, _ := RunServerWithConfig(conf) 18744 defer s.Shutdown() 18745 18746 nc, js := jsClientConnect(t, s, nats.UserInfo("u", "p"), nats.ErrorHandler(noOpErrHandler)) 18747 defer nc.Close() 18748 18749 _, err := js.AddStream(&nats.StreamConfig{ 18750 Name: "TEST", 18751 Subjects: []string{"ID.*.*"}, 18752 }) 18753 require_NoError(t, err) 18754 18755 for i := 0; i < 100; i++ { 18756 js.Publish(fmt.Sprintf("ID.foo.%d", i*3), nil) 18757 js.Publish(fmt.Sprintf("ID.bar.%d", i*3+1), nil) 18758 js.Publish(fmt.Sprintf("ID.baz.%d", i*3+2), nil) 18759 } 18760 si, err := js.StreamInfo("TEST") 18761 require_NoError(t, err) 18762 require_True(t, si.State.Msgs == 300) 18763 18764 // Trying to create a consumer with non filtered API should fail. 18765 js, err = nc.JetStream(nats.MaxWait(200 * time.Millisecond)) 18766 require_NoError(t, err) 18767 18768 _, err = js.SubscribeSync("ID.foo.*") 18769 require_Error(t, err, nats.ErrTimeout, context.DeadlineExceeded) 18770 18771 _, err = js.SubscribeSync("ID.foo.*", nats.Durable("dlc")) 18772 require_Error(t, err, nats.ErrTimeout, context.DeadlineExceeded) 18773 18774 // Direct filtered should work. 18775 // Need to do by hand for now. 18776 ecSubj := fmt.Sprintf(JSApiConsumerCreateExT, "TEST", "VIN-xxx", "ID.foo.*") 18777 18778 crReq := CreateConsumerRequest{ 18779 Stream: "TEST", 18780 Config: ConsumerConfig{ 18781 DeliverPolicy: DeliverLast, 18782 FilterSubject: "ID.foo.*", 18783 AckPolicy: AckExplicit, 18784 }, 18785 } 18786 req, err := json.Marshal(crReq) 18787 require_NoError(t, err) 18788 18789 resp, err := nc.Request(ecSubj, req, 500*time.Millisecond) 18790 require_NoError(t, err) 18791 var ccResp JSApiConsumerCreateResponse 18792 err = json.Unmarshal(resp.Data, &ccResp) 18793 require_NoError(t, err) 18794 if ccResp.Error != nil { 18795 t.Fatalf("Unexpected error: %v", ccResp.Error) 18796 } 18797 cfg := ccResp.Config 18798 ci := ccResp.ConsumerInfo 18799 // Make sure we recognized as an ephemeral (since no durable was set) and that we have an InactiveThreshold. 18800 // Make sure we captured preferred ephemeral name. 18801 if ci.Name != "VIN-xxx" { 18802 t.Fatalf("Did not get correct name, expected %q got %q", "xxx", ci.Name) 18803 } 18804 if cfg.InactiveThreshold == 0 { 18805 t.Fatalf("Expected default inactive threshold to be set, got %v", cfg.InactiveThreshold) 18806 } 18807 18808 // Make sure we can not use different consumer name since locked above. 18809 ecSubj = fmt.Sprintf(JSApiConsumerCreateExT, "TEST", "VIN-zzz", "ID.foo.*") 18810 _, err = nc.Request(ecSubj, req, 500*time.Millisecond) 18811 require_Error(t, err, nats.ErrTimeout) 18812 18813 // Now check that we error when we mismatch filtersubject. 18814 crReq = CreateConsumerRequest{ 18815 Stream: "TEST", 18816 Config: ConsumerConfig{ 18817 DeliverPolicy: DeliverLast, 18818 FilterSubject: "ID.bar.*", 18819 AckPolicy: AckExplicit, 18820 }, 18821 } 18822 req, err = json.Marshal(crReq) 18823 require_NoError(t, err) 18824 18825 ecSubj = fmt.Sprintf(JSApiConsumerCreateExT, "TEST", "VIN-xxx", "ID.foo.*") 18826 resp, err = nc.Request(ecSubj, req, 500*time.Millisecond) 18827 require_NoError(t, err) 18828 err = json.Unmarshal(resp.Data, &ccResp) 18829 require_NoError(t, err) 18830 checkNatsError(t, ccResp.Error, JSConsumerCreateFilterSubjectMismatchErr) 18831 18832 // Now make sure if we change subject to match that we can not create a filtered consumer on ID.bar.> 18833 ecSubj = fmt.Sprintf(JSApiConsumerCreateExT, "TEST", "VIN-xxx", "ID.bar.*") 18834 _, err = nc.Request(ecSubj, req, 500*time.Millisecond) 18835 require_Error(t, err, nats.ErrTimeout) 18836 } 18837 18838 func TestJetStreamStreamSubjectsOverlap(t *testing.T) { 18839 s := RunBasicJetStreamServer(t) 18840 defer s.Shutdown() 18841 18842 nc, js := jsClientConnect(t, s) 18843 defer nc.Close() 18844 18845 _, err := js.AddStream(&nats.StreamConfig{ 18846 Name: "TEST", 18847 Subjects: []string{"foo.*", "foo.A"}, 18848 }) 18849 require_Error(t, err) 18850 require_True(t, strings.Contains(err.Error(), "overlaps")) 18851 18852 _, err = js.AddStream(&nats.StreamConfig{ 18853 Name: "TEST", 18854 Subjects: []string{"foo.*"}, 18855 }) 18856 require_NoError(t, err) 18857 18858 _, err = js.UpdateStream(&nats.StreamConfig{ 18859 Name: "TEST", 18860 Subjects: []string{"foo.*", "foo.A"}, 18861 }) 18862 require_Error(t, err) 18863 require_True(t, strings.Contains(err.Error(), "overlaps")) 18864 } 18865 18866 func TestJetStreamSuppressAllowDirect(t *testing.T) { 18867 s := RunBasicJetStreamServer(t) 18868 defer s.Shutdown() 18869 18870 nc, js := jsClientConnect(t, s) 18871 defer nc.Close() 18872 18873 si, err := js.AddStream(&nats.StreamConfig{ 18874 Name: "TEST", 18875 Subjects: []string{"key.*"}, 18876 MaxMsgsPerSubject: 1, 18877 AllowDirect: true, 18878 }) 18879 require_NoError(t, err) 18880 require_True(t, si.Config.AllowDirect) 18881 18882 si, err = js.UpdateStream(&nats.StreamConfig{ 18883 Name: "TEST", 18884 Subjects: []string{"key.*"}, 18885 MaxMsgsPerSubject: 1, 18886 AllowDirect: false, 18887 }) 18888 require_NoError(t, err) 18889 require_False(t, si.Config.AllowDirect) 18890 18891 sendStreamMsg(t, nc, "key.22", "msg") 18892 18893 _, err = js.GetLastMsg("TEST", "foo", nats.DirectGet(), nats.MaxWait(100*time.Millisecond)) 18894 require_Error(t, err) 18895 } 18896 18897 func TestJetStreamPullConsumerNoAck(t *testing.T) { 18898 s := RunBasicJetStreamServer(t) 18899 defer s.Shutdown() 18900 18901 nc, js := jsClientConnect(t, s) 18902 defer nc.Close() 18903 18904 _, err := js.AddStream(&nats.StreamConfig{ 18905 Name: "TEST", 18906 Subjects: []string{"ORDERS.*"}, 18907 }) 18908 require_NoError(t, err) 18909 18910 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 18911 Durable: "dlc", 18912 AckPolicy: nats.AckNonePolicy, 18913 }) 18914 require_NoError(t, err) 18915 } 18916 18917 func TestJetStreamAccountPurge(t *testing.T) { 18918 sysKp, syspub := createKey(t) 18919 sysJwt := encodeClaim(t, jwt.NewAccountClaims(syspub), syspub) 18920 sysCreds := newUser(t, sysKp) 18921 accKp, accpub := createKey(t) 18922 accClaim := jwt.NewAccountClaims(accpub) 18923 accClaim.Limits.JetStreamLimits.DiskStorage = 1024 * 1024 * 5 18924 accClaim.Limits.JetStreamLimits.MemoryStorage = 1024 * 1024 * 5 18925 accJwt := encodeClaim(t, accClaim, accpub) 18926 accCreds := newUser(t, accKp) 18927 18928 storeDir := t.TempDir() 18929 18930 cfg := createConfFile(t, []byte(fmt.Sprintf(` 18931 host: 127.0.0.1 18932 port:-1 18933 server_name: S1 18934 operator: %s 18935 system_account: %s 18936 resolver: { 18937 type: full 18938 dir: '%s/jwt' 18939 } 18940 jetstream: {max_mem_store: 256MB, max_file_store: 2GB, store_dir: '%s/js'} 18941 `, ojwt, syspub, storeDir, storeDir))) 18942 defer os.Remove(cfg) 18943 18944 s, o := RunServerWithConfig(cfg) 18945 updateJwt(t, s.ClientURL(), sysCreds, sysJwt, 1) 18946 updateJwt(t, s.ClientURL(), sysCreds, accJwt, 1) 18947 defer s.Shutdown() 18948 18949 inspectDirs := func(t *testing.T, accTotal int) error { 18950 t.Helper() 18951 if accTotal == 0 { 18952 files, err := os.ReadDir(filepath.Join(o.StoreDir, "jetstream", accpub)) 18953 require_True(t, len(files) == accTotal || err != nil) 18954 } else { 18955 files, err := os.ReadDir(filepath.Join(o.StoreDir, "jetstream", accpub, "streams")) 18956 require_NoError(t, err) 18957 require_True(t, len(files) == accTotal) 18958 } 18959 return nil 18960 } 18961 18962 createTestData := func() { 18963 nc := natsConnect(t, s.ClientURL(), nats.UserCredentials(accCreds)) 18964 defer nc.Close() 18965 js, err := nc.JetStream() 18966 require_NoError(t, err) 18967 _, err = js.AddStream(&nats.StreamConfig{ 18968 Name: "TEST1", 18969 Subjects: []string{"foo"}, 18970 }) 18971 require_NoError(t, err) 18972 _, err = js.AddConsumer("TEST1", 18973 &nats.ConsumerConfig{Durable: "DUR1", 18974 AckPolicy: nats.AckExplicitPolicy}) 18975 require_NoError(t, err) 18976 } 18977 18978 purge := func(t *testing.T) { 18979 t.Helper() 18980 var resp JSApiAccountPurgeResponse 18981 ncsys := natsConnect(t, s.ClientURL(), nats.UserCredentials(sysCreds)) 18982 defer ncsys.Close() 18983 m, err := ncsys.Request(fmt.Sprintf(JSApiAccountPurgeT, accpub), nil, 5*time.Second) 18984 require_NoError(t, err) 18985 err = json.Unmarshal(m.Data, &resp) 18986 require_NoError(t, err) 18987 require_True(t, resp.Initiated) 18988 } 18989 18990 createTestData() 18991 inspectDirs(t, 1) 18992 purge(t) 18993 inspectDirs(t, 0) 18994 createTestData() 18995 inspectDirs(t, 1) 18996 18997 s.Shutdown() 18998 require_NoError(t, os.Remove(storeDir+"/jwt/"+accpub+".jwt")) 18999 19000 s, o = RunServerWithConfig(o.ConfigFile) 19001 defer s.Shutdown() 19002 inspectDirs(t, 1) 19003 purge(t) 19004 inspectDirs(t, 0) 19005 } 19006 19007 func TestJetStreamPullConsumerLastPerSubjectRedeliveries(t *testing.T) { 19008 s := RunBasicJetStreamServer(t) 19009 defer s.Shutdown() 19010 19011 nc, js := jsClientConnect(t, s) 19012 defer nc.Close() 19013 19014 _, err := js.AddStream(&nats.StreamConfig{ 19015 Name: "TEST", 19016 Subjects: []string{"foo.>"}, 19017 }) 19018 require_NoError(t, err) 19019 19020 for i := 0; i < 20; i++ { 19021 sendStreamMsg(t, nc, fmt.Sprintf("foo.%v", i), "msg") 19022 } 19023 19024 // Create a pull sub with a maxackpending that is <= of the number of 19025 // messages in the stream and as much as we are going to Fetch() below. 19026 sub, err := js.PullSubscribe(">", "dur", 19027 nats.AckExplicit(), 19028 nats.BindStream("TEST"), 19029 nats.DeliverLastPerSubject(), 19030 nats.MaxAckPending(10), 19031 nats.MaxRequestBatch(10), 19032 nats.AckWait(250*time.Millisecond)) 19033 require_NoError(t, err) 19034 19035 // Fetch the max number of message we can get, and don't ack them. 19036 _, err = sub.Fetch(10, nats.MaxWait(time.Second)) 19037 require_NoError(t, err) 19038 19039 // Wait for more than redelivery time. 19040 time.Sleep(500 * time.Millisecond) 19041 19042 // Fetch again, make sure we can get those 10 messages. 19043 msgs, err := sub.Fetch(10, nats.MaxWait(time.Second)) 19044 require_NoError(t, err) 19045 require_True(t, len(msgs) == 10) 19046 // Make sure those were the first 10 messages 19047 for i, m := range msgs { 19048 if m.Subject != fmt.Sprintf("foo.%v", i) { 19049 t.Fatalf("Expected message for subject foo.%v, got %v", i, m.Subject) 19050 } 19051 m.Ack() 19052 } 19053 } 19054 19055 func TestJetStreamPullConsumersTimeoutHeaders(t *testing.T) { 19056 s := RunBasicJetStreamServer(t) 19057 defer s.Shutdown() 19058 19059 // Client for API requests. 19060 nc, js := jsClientConnect(t, s) 19061 defer nc.Close() 19062 19063 _, err := js.AddStream(&nats.StreamConfig{ 19064 Name: "TEST", 19065 Subjects: []string{"foo.>"}, 19066 }) 19067 require_NoError(t, err) 19068 19069 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 19070 Durable: "dlc", 19071 AckPolicy: nats.AckExplicitPolicy, 19072 }) 19073 require_NoError(t, err) 19074 19075 nc.Publish("foo.foo", []byte("foo")) 19076 nc.Publish("foo.bar", []byte("bar")) 19077 nc.Publish("foo.else", []byte("baz")) 19078 nc.Flush() 19079 19080 // We will do low level requests by hand for this test as to not depend on any client impl. 19081 rsubj := fmt.Sprintf(JSApiRequestNextT, "TEST", "dlc") 19082 19083 maxBytes := 1024 19084 batch := 50 19085 req := &JSApiConsumerGetNextRequest{Batch: batch, Expires: 100 * time.Millisecond, NoWait: false, MaxBytes: maxBytes} 19086 jreq, err := json.Marshal(req) 19087 require_NoError(t, err) 19088 // Create listener. 19089 reply, msgs := nats.NewInbox(), make(chan *nats.Msg, batch) 19090 sub, err := nc.ChanSubscribe(reply, msgs) 19091 require_NoError(t, err) 19092 defer sub.Unsubscribe() 19093 19094 // Send request. 19095 err = nc.PublishRequest(rsubj, reply, jreq) 19096 require_NoError(t, err) 19097 19098 bytesReceived := 0 19099 messagesReceived := 0 19100 19101 checkHeaders := func(expectedStatus, expectedDesc string, m *nats.Msg) { 19102 t.Helper() 19103 if value := m.Header.Get("Status"); value != expectedStatus { 19104 t.Fatalf("Expected status %q, got %q", expectedStatus, value) 19105 } 19106 if value := m.Header.Get("Description"); value != expectedDesc { 19107 t.Fatalf("Expected description %q, got %q", expectedDesc, value) 19108 } 19109 if value := m.Header.Get(JSPullRequestPendingMsgs); value != fmt.Sprint(batch-messagesReceived) { 19110 t.Fatalf("Expected %d messages, got %s", batch-messagesReceived, value) 19111 } 19112 if value := m.Header.Get(JSPullRequestPendingBytes); value != fmt.Sprint(maxBytes-bytesReceived) { 19113 t.Fatalf("Expected %d bytes, got %s", maxBytes-bytesReceived, value) 19114 } 19115 } 19116 19117 for done := false; !done; { 19118 select { 19119 case m := <-msgs: 19120 if len(m.Data) == 0 && m.Header != nil { 19121 checkHeaders("408", "Request Timeout", m) 19122 done = true 19123 } else { 19124 messagesReceived += 1 19125 bytesReceived += (len(m.Data) + len(m.Header) + len(m.Reply) + len(m.Subject)) 19126 } 19127 case <-time.After(100 + 250*time.Millisecond): 19128 t.Fatalf("Did not receive all the msgs in time") 19129 } 19130 } 19131 19132 // Now resend the request but then shutdown the server and 19133 // make sure we have the same info. 19134 err = nc.PublishRequest(rsubj, reply, jreq) 19135 require_NoError(t, err) 19136 natsFlush(t, nc) 19137 19138 s.Shutdown() 19139 19140 // It is possible that the client did not receive, so let's not fail 19141 // on that. But if the 409 indicating the the server is shutdown 19142 // is received, then it should have the new headers. 19143 messagesReceived, bytesReceived = 0, 0 19144 select { 19145 case m := <-msgs: 19146 checkHeaders("409", "Server Shutdown", m) 19147 case <-time.After(500 * time.Millisecond): 19148 // we can't fail for that. 19149 t.Logf("Subscription did not receive the pull request response on server shutdown") 19150 } 19151 } 19152 19153 // For issue https://github.com/nats-io/nats-server/issues/3612 19154 // Do auto cleanup. 19155 func TestJetStreamDanglingMessageAutoCleanup(t *testing.T) { 19156 s := RunBasicJetStreamServer(t) 19157 defer s.Shutdown() 19158 19159 // Client for API requests. 19160 nc, js := jsClientConnect(t, s) 19161 defer nc.Close() 19162 19163 _, err := js.AddStream(&nats.StreamConfig{ 19164 Name: "TEST", 19165 Subjects: []string{"foo"}, 19166 Retention: nats.InterestPolicy, 19167 }) 19168 require_NoError(t, err) 19169 19170 sub, err := js.PullSubscribe("foo", "dlc", nats.MaxAckPending(10)) 19171 require_NoError(t, err) 19172 19173 // Send 100 msgs 19174 n := 100 19175 for i := 0; i < n; i++ { 19176 sendStreamMsg(t, nc, "foo", "msg") 19177 } 19178 19179 // Grab and ack 10 messages. 19180 for _, m := range fetchMsgs(t, sub, 10, time.Second) { 19181 m.AckSync() 19182 } 19183 19184 ci, err := sub.ConsumerInfo() 19185 require_NoError(t, err) 19186 require_True(t, ci.AckFloor.Stream == 10) 19187 19188 // Stop current 19189 sd := s.JetStreamConfig().StoreDir 19190 s.Shutdown() 19191 19192 // We will hand move the ackfloor to simulate dangling message condition. 19193 cstore := filepath.Join(sd, "$G", "streams", "TEST", "obs", "dlc", "o.dat") 19194 19195 buf, err := os.ReadFile(cstore) 19196 require_NoError(t, err) 19197 19198 state, err := decodeConsumerState(buf) 19199 require_NoError(t, err) 19200 19201 // Update from 10 for delivered and ack to 90. 19202 state.Delivered.Stream, state.Delivered.Consumer = 90, 90 19203 state.AckFloor.Stream, state.AckFloor.Consumer = 90, 90 19204 19205 err = os.WriteFile(cstore, encodeConsumerState(state), defaultFilePerms) 19206 require_NoError(t, err) 19207 19208 // Restart. 19209 s = RunJetStreamServerOnPort(-1, sd) 19210 defer s.Shutdown() 19211 19212 nc, js = jsClientConnect(t, s) 19213 defer nc.Close() 19214 19215 si, err := js.StreamInfo("TEST") 19216 require_NoError(t, err) 19217 19218 if si.State.Msgs != 10 { 19219 t.Fatalf("Expected auto-cleanup to have worked but got %d msgs vs 10", si.State.Msgs) 19220 } 19221 } 19222 19223 // Issue https://github.com/nats-io/nats-server/issues/3645 19224 func TestJetStreamMsgIDHeaderCollision(t *testing.T) { 19225 s := RunBasicJetStreamServer(t) 19226 defer s.Shutdown() 19227 19228 // Client for API requests. 19229 nc, js := jsClientConnect(t, s) 19230 defer nc.Close() 19231 19232 _, err := js.AddStream(&nats.StreamConfig{ 19233 Name: "TEST", 19234 Subjects: []string{"ORDERS.*"}, 19235 }) 19236 require_NoError(t, err) 19237 19238 m := nats.NewMsg("ORDERS.test") 19239 m.Header.Add(JSMsgId, "1") 19240 m.Data = []byte("ok") 19241 19242 _, err = js.PublishMsg(m) 19243 require_NoError(t, err) 19244 19245 m.Header = make(nats.Header) 19246 m.Header.Add("Orig-Nats-Msg-Id", "1") 19247 19248 _, err = js.PublishMsg(m) 19249 require_NoError(t, err) 19250 19251 m.Header = make(nats.Header) 19252 m.Header.Add("Original-Nats-Msg-Id", "1") 19253 19254 _, err = js.PublishMsg(m) 19255 require_NoError(t, err) 19256 19257 m.Header = make(nats.Header) 19258 m.Header.Add("Original-Nats-Msg-Id", "1") 19259 m.Header.Add("Really-Original-Nats-Msg-Id", "1") 19260 19261 _, err = js.PublishMsg(m) 19262 require_NoError(t, err) 19263 19264 m.Header = make(nats.Header) 19265 m.Header.Add("X", "Nats-Msg-Id:1") 19266 19267 _, err = js.PublishMsg(m) 19268 require_NoError(t, err) 19269 19270 si, err := js.StreamInfo("TEST") 19271 require_NoError(t, err) 19272 19273 require_True(t, si.State.Msgs == 5) 19274 } 19275 19276 // https://github.com/nats-io/nats-server/issues/3657 19277 func TestJetStreamServerCrashOnPullConsumerDeleteWithInactiveThresholdAfterAck(t *testing.T) { 19278 s := RunBasicJetStreamServer(t) 19279 defer s.Shutdown() 19280 19281 // Client for API requests. 19282 nc, js := jsClientConnect(t, s) 19283 defer nc.Close() 19284 19285 _, err := js.AddStream(&nats.StreamConfig{ 19286 Name: "TEST", 19287 Subjects: []string{"foo"}, 19288 }) 19289 require_NoError(t, err) 19290 19291 sendStreamMsg(t, nc, "foo", "msg") 19292 19293 sub, err := js.PullSubscribe("foo", "dlc", nats.InactiveThreshold(10*time.Second)) 19294 require_NoError(t, err) 19295 19296 msgs := fetchMsgs(t, sub, 1, time.Second) 19297 require_True(t, len(msgs) == 1) 19298 msgs[0].Ack() 19299 err = js.DeleteConsumer("TEST", "dlc") 19300 require_NoError(t, err) 19301 19302 // If server crashes this will fail. 19303 _, err = js.StreamInfo("TEST") 19304 require_NoError(t, err) 19305 } 19306 19307 func TestJetStreamConsumerMultipleSubjectsLast(t *testing.T) { 19308 s := RunBasicJetStreamServer(t) 19309 if config := s.JetStreamConfig(); config != nil { 19310 defer removeDir(t, config.StoreDir) 19311 } 19312 defer s.Shutdown() 19313 19314 durable := "durable" 19315 nc, js := jsClientConnect(t, s) 19316 defer nc.Close() 19317 acc := s.GlobalAccount() 19318 19319 mset, err := acc.addStream(&StreamConfig{ 19320 Subjects: []string{"events", "data", "other"}, 19321 Name: "name", 19322 }) 19323 if err != nil { 19324 t.Fatalf("error while creating stream") 19325 } 19326 19327 sendStreamMsg(t, nc, "events", "1") 19328 sendStreamMsg(t, nc, "data", "2") 19329 sendStreamMsg(t, nc, "other", "3") 19330 sendStreamMsg(t, nc, "events", "4") 19331 sendStreamMsg(t, nc, "data", "5") 19332 sendStreamMsg(t, nc, "data", "6") 19333 sendStreamMsg(t, nc, "other", "7") 19334 sendStreamMsg(t, nc, "other", "8") 19335 19336 // if they're not the same, expect error 19337 _, err = mset.addConsumer(&ConsumerConfig{ 19338 DeliverPolicy: DeliverLast, 19339 AckPolicy: AckExplicit, 19340 DeliverSubject: "deliver", 19341 FilterSubjects: []string{"events", "data"}, 19342 Durable: durable, 19343 }) 19344 require_NoError(t, err) 19345 19346 sub, err := js.SubscribeSync("", nats.Bind("name", durable)) 19347 require_NoError(t, err) 19348 19349 msg, err := sub.NextMsg(time.Millisecond * 500) 19350 require_NoError(t, err) 19351 19352 j, err := strconv.Atoi(string(msg.Data)) 19353 require_NoError(t, err) 19354 expectedStreamSeq := 6 19355 if j != expectedStreamSeq { 19356 t.Fatalf("wrong sequence, expected %v got %v", expectedStreamSeq, j) 19357 } 19358 19359 require_NoError(t, msg.AckSync()) 19360 19361 // check if we don't get more than we wanted 19362 msg, err = sub.NextMsg(time.Millisecond * 500) 19363 if msg != nil || err == nil { 19364 t.Fatalf("should not get more messages") 19365 } 19366 19367 info, err := js.ConsumerInfo("name", durable) 19368 require_NoError(t, err) 19369 19370 require_True(t, info.NumAckPending == 0) 19371 require_True(t, info.AckFloor.Stream == 8) 19372 require_True(t, info.AckFloor.Consumer == 1) 19373 require_True(t, info.NumPending == 0) 19374 } 19375 19376 func TestJetStreamConsumerMultipleSubjectsLastPerSubject(t *testing.T) { 19377 s := RunBasicJetStreamServer(t) 19378 if config := s.JetStreamConfig(); config != nil { 19379 defer removeDir(t, config.StoreDir) 19380 } 19381 defer s.Shutdown() 19382 19383 durable := "durable" 19384 nc, js := jsClientConnect(t, s) 19385 defer nc.Close() 19386 acc := s.GlobalAccount() 19387 19388 mset, err := acc.addStream(&StreamConfig{ 19389 Subjects: []string{"events.*", "data.>", "other"}, 19390 Name: "name", 19391 }) 19392 if err != nil { 19393 t.Fatalf("error while creating stream") 19394 } 19395 19396 sendStreamMsg(t, nc, "events.1", "bad") 19397 sendStreamMsg(t, nc, "events.1", "events.1") 19398 19399 sendStreamMsg(t, nc, "data.1", "bad") 19400 sendStreamMsg(t, nc, "data.1", "bad") 19401 sendStreamMsg(t, nc, "data.1", "bad") 19402 sendStreamMsg(t, nc, "data.1", "bad") 19403 sendStreamMsg(t, nc, "data.1", "data.1") 19404 19405 sendStreamMsg(t, nc, "events.2", "bad") 19406 sendStreamMsg(t, nc, "events.2", "bad") 19407 // this is last proper sequence, 19408 sendStreamMsg(t, nc, "events.2", "events.2") 19409 19410 sendStreamMsg(t, nc, "other", "bad") 19411 sendStreamMsg(t, nc, "other", "bad") 19412 19413 // if they're not the same, expect error 19414 _, err = mset.addConsumer(&ConsumerConfig{ 19415 DeliverPolicy: DeliverLastPerSubject, 19416 AckPolicy: AckExplicit, 19417 DeliverSubject: "deliver", 19418 FilterSubjects: []string{"events.*", "data.>"}, 19419 Durable: durable, 19420 }) 19421 require_NoError(t, err) 19422 19423 sub, err := js.SubscribeSync("", nats.Bind("name", durable)) 19424 require_NoError(t, err) 19425 19426 checkMessage := func(t *testing.T, subject string, payload string, ack bool) { 19427 msg, err := sub.NextMsg(time.Millisecond * 500) 19428 require_NoError(t, err) 19429 19430 if string(msg.Data) != payload { 19431 t.Fatalf("expected %v paylaod, got %v", payload, string(msg.Data)) 19432 } 19433 if subject != msg.Subject { 19434 t.Fatalf("expected %v subject, got %v", subject, msg.Subject) 19435 } 19436 if ack { 19437 msg.AckSync() 19438 } 19439 } 19440 19441 checkMessage(t, "events.1", "events.1", true) 19442 checkMessage(t, "data.1", "data.1", true) 19443 checkMessage(t, "events.2", "events.2", false) 19444 19445 info, err := js.ConsumerInfo("name", durable) 19446 require_NoError(t, err) 19447 19448 require_True(t, info.AckFloor.Consumer == 2) 19449 require_True(t, info.AckFloor.Stream == 9) 19450 require_True(t, info.Delivered.Stream == 12) 19451 require_True(t, info.Delivered.Consumer == 3) 19452 19453 require_NoError(t, err) 19454 19455 } 19456 func TestJetStreamConsumerMultipleSubjects(t *testing.T) { 19457 s := RunBasicJetStreamServer(t) 19458 if config := s.JetStreamConfig(); config != nil { 19459 defer removeDir(t, config.StoreDir) 19460 } 19461 defer s.Shutdown() 19462 19463 durable := "durable" 19464 nc, js := jsClientConnect(t, s) 19465 defer nc.Close() 19466 19467 mset, err := s.GlobalAccount().addStream(&StreamConfig{ 19468 Subjects: []string{"events.>", "data.>"}, 19469 Name: "name", 19470 }) 19471 require_NoError(t, err) 19472 19473 for i := 0; i < 20; i += 2 { 19474 sendStreamMsg(t, nc, "events.created", fmt.Sprintf("created %v", i)) 19475 sendStreamMsg(t, nc, "data.processed", fmt.Sprintf("processed %v", i+1)) 19476 } 19477 19478 _, err = mset.addConsumer(&ConsumerConfig{ 19479 Durable: durable, 19480 DeliverSubject: "deliver", 19481 FilterSubjects: []string{"events.created", "data.processed"}, 19482 AckPolicy: AckExplicit, 19483 }) 19484 require_NoError(t, err) 19485 19486 sub, err := js.SubscribeSync("", nats.Bind("name", durable)) 19487 require_NoError(t, err) 19488 19489 for i := 0; i < 20; i++ { 19490 msg, err := sub.NextMsg(time.Millisecond * 500) 19491 require_NoError(t, err) 19492 require_NoError(t, msg.AckSync()) 19493 } 19494 info, err := js.ConsumerInfo("name", durable) 19495 require_NoError(t, err) 19496 require_True(t, info.NumAckPending == 0) 19497 require_True(t, info.NumPending == 0) 19498 require_True(t, info.AckFloor.Consumer == 20) 19499 require_True(t, info.AckFloor.Stream == 20) 19500 19501 } 19502 19503 func TestJetStreamConsumerMultipleSubjectsWithEmpty(t *testing.T) { 19504 s := RunBasicJetStreamServer(t) 19505 if config := s.JetStreamConfig(); config != nil { 19506 defer removeDir(t, config.StoreDir) 19507 } 19508 defer s.Shutdown() 19509 19510 durable := "durable" 19511 nc, js := jsClientConnect(t, s) 19512 defer nc.Close() 19513 19514 _, err := js.AddStream(&nats.StreamConfig{ 19515 Subjects: []string{"events.>"}, 19516 Name: "name", 19517 }) 19518 require_NoError(t, err) 19519 19520 for i := 0; i < 10; i++ { 19521 sendStreamMsg(t, nc, "events.created", fmt.Sprintf("%v", i)) 19522 } 19523 19524 // if they're not the same, expect error 19525 _, err = js.AddConsumer("name", &nats.ConsumerConfig{ 19526 DeliverSubject: "deliver", 19527 FilterSubject: "", 19528 Durable: durable, 19529 AckPolicy: nats.AckExplicitPolicy}) 19530 require_NoError(t, err) 19531 19532 sub, err := js.SubscribeSync("", nats.Bind("name", durable)) 19533 require_NoError(t, err) 19534 19535 for i := 0; i < 9; i++ { 19536 msg, err := sub.NextMsg(time.Millisecond * 500) 19537 require_NoError(t, err) 19538 j, err := strconv.Atoi(string(msg.Data)) 19539 require_NoError(t, err) 19540 if j != i { 19541 t.Fatalf("wrong sequence, expected %v got %v", i, j) 19542 } 19543 require_NoError(t, msg.AckSync()) 19544 } 19545 19546 info, err := js.ConsumerInfo("name", durable) 19547 require_NoError(t, err) 19548 require_True(t, info.Delivered.Stream == 10) 19549 require_True(t, info.Delivered.Consumer == 10) 19550 require_True(t, info.AckFloor.Stream == 9) 19551 require_True(t, info.AckFloor.Consumer == 9) 19552 require_True(t, info.NumAckPending == 1) 19553 19554 resp := createConsumer(t, nc, "name", ConsumerConfig{ 19555 FilterSubjects: []string{""}, 19556 DeliverSubject: "multiple", 19557 Durable: "multiple", 19558 AckPolicy: AckExplicit, 19559 }) 19560 require_True(t, resp.Error.ErrCode == 10139) 19561 } 19562 19563 func SingleFilterConsumerCheck(t *testing.T) { 19564 s := RunBasicJetStreamServer(t) 19565 if config := s.JetStreamConfig(); config != nil { 19566 defer removeDir(t, config.StoreDir) 19567 } 19568 defer s.Shutdown() 19569 19570 durable := "durable" 19571 nc, _ := jsClientConnect(t, s) 19572 defer nc.Close() 19573 acc := s.GlobalAccount() 19574 19575 mset, err := acc.addStream(&StreamConfig{ 19576 Subjects: []string{"events.>"}, 19577 Name: "deliver", 19578 }) 19579 require_NoError(t, err) 19580 19581 // if they're not the same, expect error 19582 _, err = mset.addConsumer(&ConsumerConfig{ 19583 DeliverSubject: "deliver", 19584 FilterSubject: "SINGLE", 19585 Durable: durable, 19586 }) 19587 require_Error(t, err) 19588 } 19589 19590 // createConsumer is a temporary method until nats.go client supports multiple subjects. 19591 // it is used where lowe level call on mset is not enough, as we want to test error validation. 19592 func createConsumer(t *testing.T, nc *nats.Conn, stream string, config ConsumerConfig) JSApiConsumerCreateResponse { 19593 req, err := json.Marshal(&CreateConsumerRequest{Stream: stream, Config: config}) 19594 require_NoError(t, err) 19595 19596 resp, err := nc.Request(fmt.Sprintf("$JS.API.CONSUMER.DURABLE.CREATE.%s.%s", stream, config.Durable), req, time.Second*10) 19597 require_NoError(t, err) 19598 19599 var apiResp JSApiConsumerCreateResponse 19600 require_NoError(t, json.Unmarshal(resp.Data, &apiResp)) 19601 19602 return apiResp 19603 } 19604 19605 func TestJetStreamConsumerOverlappingSubjects(t *testing.T) { 19606 s := RunBasicJetStreamServer(t) 19607 if config := s.JetStreamConfig(); config != nil { 19608 defer removeDir(t, config.StoreDir) 19609 } 19610 defer s.Shutdown() 19611 19612 nc, _ := jsClientConnect(t, s) 19613 defer nc.Close() 19614 acc := s.GlobalAccount() 19615 19616 _, err := acc.addStream(&StreamConfig{ 19617 Subjects: []string{"events.>"}, 19618 Name: "deliver", 19619 }) 19620 require_NoError(t, err) 19621 19622 resp := createConsumer(t, nc, "deliver", ConsumerConfig{ 19623 FilterSubjects: []string{"events.one", "events.*"}, 19624 Durable: "name", 19625 }) 19626 19627 if resp.Error.ErrCode != 10138 { 19628 t.Fatalf("this should error as we have overlapping subjects, got %+v", resp.Error) 19629 } 19630 } 19631 19632 func TestJetStreamBothFiltersSet(t *testing.T) { 19633 s := RunBasicJetStreamServer(t) 19634 if config := s.JetStreamConfig(); config != nil { 19635 defer removeDir(t, config.StoreDir) 19636 } 19637 defer s.Shutdown() 19638 19639 nc, _ := jsClientConnect(t, s) 19640 defer nc.Close() 19641 acc := s.GlobalAccount() 19642 19643 _, err := acc.addStream(&StreamConfig{ 19644 Subjects: []string{"events.>"}, 19645 Name: "deliver", 19646 }) 19647 require_NoError(t, err) 19648 19649 resp := createConsumer(t, nc, "deliver", ConsumerConfig{ 19650 FilterSubjects: []string{"events.one", "events.two"}, 19651 FilterSubject: "events.three", 19652 Durable: "name", 19653 }) 19654 require_True(t, resp.Error.ErrCode == 10136) 19655 } 19656 19657 func TestJetStreamMultipleSubjectsPushBasic(t *testing.T) { 19658 s := RunBasicJetStreamServer(t) 19659 if config := s.JetStreamConfig(); config != nil { 19660 defer removeDir(t, config.StoreDir) 19661 } 19662 defer s.Shutdown() 19663 19664 nc, js := jsClientConnect(t, s) 19665 defer nc.Close() 19666 19667 mset, err := s.GlobalAccount().addStream(&StreamConfig{ 19668 Subjects: []string{"events", "data", "other"}, 19669 Name: "deliver", 19670 }) 19671 require_NoError(t, err) 19672 19673 _, err = mset.addConsumer(&ConsumerConfig{ 19674 FilterSubjects: []string{"events", "data"}, 19675 Durable: "name", 19676 DeliverSubject: "push", 19677 }) 19678 require_NoError(t, err) 19679 19680 sub, err := nc.SubscribeSync("push") 19681 require_NoError(t, err) 19682 19683 sendStreamMsg(t, nc, "other", "10") 19684 sendStreamMsg(t, nc, "events", "0") 19685 sendStreamMsg(t, nc, "data", "1") 19686 sendStreamMsg(t, nc, "events", "2") 19687 sendStreamMsg(t, nc, "events", "3") 19688 sendStreamMsg(t, nc, "other", "10") 19689 sendStreamMsg(t, nc, "data", "4") 19690 sendStreamMsg(t, nc, "data", "5") 19691 19692 for i := 0; i < 6; i++ { 19693 msg, err := sub.NextMsg(time.Second * 1) 19694 require_NoError(t, err) 19695 if fmt.Sprintf("%v", i) != string(msg.Data) { 19696 t.Fatalf("bad sequence. Expected %v, got %v", i, string(msg.Data)) 19697 } 19698 } 19699 info, err := js.ConsumerInfo("deliver", "name") 19700 require_NoError(t, err) 19701 require_True(t, info.AckFloor.Consumer == 6) 19702 require_True(t, info.AckFloor.Stream == 8) 19703 } 19704 func TestJetStreamMultipleSubjectsBasic(t *testing.T) { 19705 s := RunBasicJetStreamServer(t) 19706 if config := s.JetStreamConfig(); config != nil { 19707 defer removeDir(t, config.StoreDir) 19708 } 19709 defer s.Shutdown() 19710 19711 nc, js := jsClientConnect(t, s) 19712 defer nc.Close() 19713 acc := s.GlobalAccount() 19714 19715 mset, err := acc.addStream(&StreamConfig{ 19716 Subjects: []string{"events", "data", "other"}, 19717 Name: "deliver", 19718 }) 19719 require_NoError(t, err) 19720 19721 mset.addConsumer(&ConsumerConfig{ 19722 FilterSubjects: []string{"events", "data"}, 19723 Durable: "name", 19724 }) 19725 require_NoError(t, err) 19726 19727 sendStreamMsg(t, nc, "other", "10") 19728 sendStreamMsg(t, nc, "events", "0") 19729 sendStreamMsg(t, nc, "data", "1") 19730 sendStreamMsg(t, nc, "events", "2") 19731 sendStreamMsg(t, nc, "events", "3") 19732 sendStreamMsg(t, nc, "other", "10") 19733 sendStreamMsg(t, nc, "data", "4") 19734 sendStreamMsg(t, nc, "data", "5") 19735 19736 consumer, err := js.PullSubscribe("", "name", nats.Bind("deliver", "name")) 19737 require_NoError(t, err) 19738 19739 msg, err := consumer.Fetch(6) 19740 require_NoError(t, err) 19741 19742 for i, msg := range msg { 19743 if fmt.Sprintf("%v", i) != string(msg.Data) { 19744 t.Fatalf("bad sequence. Expected %v, got %v", i, string(msg.Data)) 19745 } 19746 } 19747 _, err = js.ConsumerInfo("deliver", "name") 19748 require_NoError(t, err) 19749 } 19750 19751 func TestJetStreamKVDelete(t *testing.T) { 19752 s := RunBasicJetStreamServer(t) 19753 if config := s.JetStreamConfig(); config != nil { 19754 defer removeDir(t, config.StoreDir) 19755 } 19756 defer s.Shutdown() 19757 19758 nc, js := jsClientConnect(t, s) 19759 defer nc.Close() 19760 19761 kv, err := js.CreateKeyValue(&nats.KeyValueConfig{ 19762 Bucket: "deletion", 19763 History: 10, 19764 }) 19765 require_NoError(t, err) 19766 kv.Put("a", nil) 19767 kv.Put("a.a", nil) 19768 kv.Put("a.b", nil) 19769 kv.Put("a.b.c", nil) 19770 19771 keys, err := kv.Keys() 19772 require_NoError(t, err) 19773 require_True(t, len(keys) == 4) 19774 19775 info, err := js.AddConsumer("KV_deletion", &nats.ConsumerConfig{ 19776 Name: "keys", 19777 FilterSubject: "$KV.deletion.a.*", 19778 DeliverPolicy: nats.DeliverLastPerSubjectPolicy, 19779 DeliverSubject: "keys", 19780 MaxDeliver: 1, 19781 AckPolicy: nats.AckNonePolicy, 19782 MemoryStorage: true, 19783 FlowControl: true, 19784 Heartbeat: time.Second * 5, 19785 }) 19786 require_NoError(t, err) 19787 require_True(t, info.NumPending == 2) 19788 19789 sub, err := js.SubscribeSync("$KV.deletion.a.*", nats.Bind("KV_deletion", "keys")) 19790 require_NoError(t, err) 19791 19792 _, err = sub.NextMsg(time.Second * 1) 19793 require_NoError(t, err) 19794 _, err = sub.NextMsg(time.Second * 1) 19795 require_NoError(t, err) 19796 msg, err := sub.NextMsg(time.Second * 1) 19797 require_True(t, msg == nil) 19798 require_Error(t, err) 19799 19800 require_NoError(t, kv.Delete("a.a")) 19801 require_NoError(t, kv.Delete("a.b")) 19802 19803 watcher, err := kv.WatchAll() 19804 require_NoError(t, err) 19805 19806 updates := watcher.Updates() 19807 19808 keys = []string{} 19809 for v := range updates { 19810 if v == nil { 19811 break 19812 } 19813 if v.Operation() == nats.KeyValueDelete { 19814 keys = append(keys, v.Key()) 19815 } 19816 } 19817 require_True(t, len(keys) == 2) 19818 } 19819 19820 func TestJetStreamDeliverLastPerSubjectWithKV(t *testing.T) { 19821 s := RunBasicJetStreamServer(t) 19822 if config := s.JetStreamConfig(); config != nil { 19823 defer removeDir(t, config.StoreDir) 19824 } 19825 defer s.Shutdown() 19826 19827 nc, js := jsClientConnect(t, s) 19828 defer nc.Close() 19829 19830 _, err := js.AddStream(&nats.StreamConfig{ 19831 Name: "TEST", 19832 MaxMsgsPerSubject: 5, 19833 Subjects: []string{"kv.>"}, 19834 }) 19835 require_NoError(t, err) 19836 19837 sendStreamMsg(t, nc, "kv.a", "bad") 19838 sendStreamMsg(t, nc, "kv.a", "bad") 19839 sendStreamMsg(t, nc, "kv.a", "bad") 19840 sendStreamMsg(t, nc, "kv.a", "a") 19841 sendStreamMsg(t, nc, "kv.a.b", "bad") 19842 sendStreamMsg(t, nc, "kv.a.b", "bad") 19843 sendStreamMsg(t, nc, "kv.a.b", "a.b") 19844 sendStreamMsg(t, nc, "kv.a.b.c", "bad") 19845 sendStreamMsg(t, nc, "kv.a.b.c", "bad") 19846 sendStreamMsg(t, nc, "kv.a.b.c", "bad") 19847 sendStreamMsg(t, nc, "kv.a.b.c", "a.b.c") 19848 19849 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 19850 Name: "CONSUMER", 19851 FilterSubject: "kv.>", 19852 DeliverPolicy: nats.DeliverLastPerSubjectPolicy, 19853 DeliverSubject: "deliver", 19854 MaxDeliver: 1, 19855 AckPolicy: nats.AckNonePolicy, 19856 MemoryStorage: true, 19857 FlowControl: true, 19858 Heartbeat: time.Second * 5, 19859 }) 19860 require_NoError(t, err) 19861 19862 sub, err := js.SubscribeSync("kv.>", nats.Bind("TEST", "CONSUMER")) 19863 require_NoError(t, err) 19864 19865 for i := 1; i <= 3; i++ { 19866 _, err := sub.NextMsg(time.Second * 1) 19867 require_NoError(t, err) 19868 } 19869 19870 msg, err := sub.NextMsg(time.Second * 1) 19871 if err == nil || msg != nil { 19872 t.Fatalf("should not get any more messages") 19873 } 19874 } 19875 19876 func TestJetStreamConsumerMultipleSubjectsAck(t *testing.T) { 19877 s := RunBasicJetStreamServer(t) 19878 if config := s.JetStreamConfig(); config != nil { 19879 defer removeDir(t, config.StoreDir) 19880 } 19881 defer s.Shutdown() 19882 19883 nc, js := jsClientConnect(t, s) 19884 defer nc.Close() 19885 acc := s.GlobalAccount() 19886 19887 mset, err := acc.addStream(&StreamConfig{ 19888 Subjects: []string{"events", "data", "other"}, 19889 Name: "deliver", 19890 }) 19891 require_NoError(t, err) 19892 19893 _, err = mset.addConsumer(&ConsumerConfig{ 19894 FilterSubjects: []string{"events", "data"}, 19895 Durable: "name", 19896 AckPolicy: AckExplicit, 19897 Replicas: 1, 19898 }) 19899 require_NoError(t, err) 19900 19901 sendStreamMsg(t, nc, "events", "1") 19902 sendStreamMsg(t, nc, "data", "2") 19903 sendStreamMsg(t, nc, "data", "3") 19904 sendStreamMsg(t, nc, "data", "4") 19905 sendStreamMsg(t, nc, "events", "5") 19906 sendStreamMsg(t, nc, "data", "6") 19907 sendStreamMsg(t, nc, "data", "7") 19908 19909 consumer, err := js.PullSubscribe("", "name", nats.Bind("deliver", "name")) 19910 require_NoError(t, err) 19911 19912 msg, err := consumer.Fetch(3) 19913 require_NoError(t, err) 19914 19915 require_True(t, len(msg) == 3) 19916 19917 require_NoError(t, msg[0].AckSync()) 19918 require_NoError(t, msg[1].AckSync()) 19919 19920 info, err := js.ConsumerInfo("deliver", "name") 19921 require_NoError(t, err) 19922 19923 if info.AckFloor.Consumer != 2 { 19924 t.Fatalf("bad consumer sequence. expected %v, got %v", 2, info.AckFloor.Consumer) 19925 } 19926 if info.AckFloor.Stream != 2 { 19927 t.Fatalf("bad stream sequence. expected %v, got %v", 2, info.AckFloor.Stream) 19928 } 19929 if info.NumPending != 4 { 19930 t.Fatalf("bad num pending. Expected %v, got %v", 2, info.NumPending) 19931 } 19932 19933 } 19934 19935 func TestJetStreamConsumerMultipleSubjectAndNewAPI(t *testing.T) { 19936 s := RunBasicJetStreamServer(t) 19937 if config := s.JetStreamConfig(); config != nil { 19938 defer removeDir(t, config.StoreDir) 19939 } 19940 defer s.Shutdown() 19941 19942 nc, _ := jsClientConnect(t, s) 19943 defer nc.Close() 19944 acc := s.GlobalAccount() 19945 19946 _, err := acc.addStream(&StreamConfig{ 19947 Subjects: []string{"data", "events"}, 19948 Name: "deliver", 19949 }) 19950 if err != nil { 19951 t.Fatalf("error while creating stream") 19952 } 19953 19954 req, err := json.Marshal(&CreateConsumerRequest{Stream: "deliver", Config: ConsumerConfig{ 19955 FilterSubjects: []string{"events", "data"}, 19956 Name: "name", 19957 Durable: "name", 19958 }}) 19959 require_NoError(t, err) 19960 19961 resp, err := nc.Request(fmt.Sprintf("$JS.API.CONSUMER.CREATE.%s.%s.%s", "deliver", "name", "data.>"), req, time.Second*10) 19962 19963 var apiResp JSApiConsumerCreateResponse 19964 json.Unmarshal(resp.Data, &apiResp) 19965 require_NoError(t, err) 19966 19967 if apiResp.Error.ErrCode != 10137 { 19968 t.Fatal("this should error as multiple subject filters is incompatible with new API and didn't") 19969 } 19970 19971 } 19972 19973 func TestJetStreamConsumerMultipleSubjectsWithAddedMessages(t *testing.T) { 19974 s := RunBasicJetStreamServer(t) 19975 if config := s.JetStreamConfig(); config != nil { 19976 defer removeDir(t, config.StoreDir) 19977 } 19978 defer s.Shutdown() 19979 19980 durable := "durable" 19981 nc, js := jsClientConnect(t, s) 19982 defer nc.Close() 19983 acc := s.GlobalAccount() 19984 19985 mset, err := acc.addStream(&StreamConfig{ 19986 Subjects: []string{"events.>"}, 19987 Name: "deliver", 19988 }) 19989 require_NoError(t, err) 19990 19991 // if they're not the same, expect error 19992 _, err = mset.addConsumer(&ConsumerConfig{ 19993 DeliverSubject: "deliver", 19994 FilterSubjects: []string{"events.created", "events.processed"}, 19995 Durable: durable, 19996 AckPolicy: AckExplicit, 19997 }) 19998 19999 require_NoError(t, err) 20000 20001 sendStreamMsg(t, nc, "events.created", "0") 20002 sendStreamMsg(t, nc, "events.created", "1") 20003 sendStreamMsg(t, nc, "events.created", "2") 20004 sendStreamMsg(t, nc, "events.created", "3") 20005 sendStreamMsg(t, nc, "events.other", "BAD") 20006 sendStreamMsg(t, nc, "events.processed", "4") 20007 sendStreamMsg(t, nc, "events.processed", "5") 20008 sendStreamMsg(t, nc, "events.processed", "6") 20009 sendStreamMsg(t, nc, "events.other", "BAD") 20010 sendStreamMsg(t, nc, "events.processed", "7") 20011 sendStreamMsg(t, nc, "events.processed", "8") 20012 20013 sub, err := js.SubscribeSync("", nats.Bind("deliver", durable)) 20014 if err != nil { 20015 t.Fatalf("error while subscribing to Consumer: %v", err) 20016 } 20017 20018 for i := 0; i < 10; i++ { 20019 if i == 5 { 20020 sendStreamMsg(t, nc, "events.created", "9") 20021 } 20022 if i == 9 { 20023 sendStreamMsg(t, nc, "events.other", "BAD") 20024 sendStreamMsg(t, nc, "events.created", "11") 20025 } 20026 if i == 7 { 20027 sendStreamMsg(t, nc, "events.processed", "10") 20028 } 20029 20030 msg, err := sub.NextMsg(time.Second * 1) 20031 require_NoError(t, err) 20032 j, err := strconv.Atoi(string(msg.Data)) 20033 require_NoError(t, err) 20034 if j != i { 20035 t.Fatalf("wrong sequence, expected %v got %v", i, j) 20036 } 20037 if err := msg.AckSync(); err != nil { 20038 t.Fatalf("error while acking the message :%v", err) 20039 } 20040 20041 } 20042 20043 info, err := js.ConsumerInfo("deliver", durable) 20044 require_NoError(t, err) 20045 20046 require_True(t, info.Delivered.Consumer == 12) 20047 require_True(t, info.Delivered.Stream == 15) 20048 require_True(t, info.AckFloor.Stream == 12) 20049 require_True(t, info.AckFloor.Consumer == 10) 20050 } 20051 20052 func TestJetStreamConsumerThreeFilters(t *testing.T) { 20053 s := RunBasicJetStreamServer(t) 20054 defer s.Shutdown() 20055 20056 nc, js := jsClientConnect(t, s) 20057 defer nc.Close() 20058 20059 mset, err := s.GlobalAccount().addStream(&StreamConfig{ 20060 Name: "TEST", 20061 Subjects: []string{"events", "data", "other", "ignored"}, 20062 }) 20063 require_NoError(t, err) 20064 20065 sendStreamMsg(t, nc, "ignored", "100") 20066 sendStreamMsg(t, nc, "events", "0") 20067 sendStreamMsg(t, nc, "events", "1") 20068 20069 sendStreamMsg(t, nc, "data", "2") 20070 sendStreamMsg(t, nc, "ignored", "100") 20071 sendStreamMsg(t, nc, "data", "3") 20072 20073 sendStreamMsg(t, nc, "other", "4") 20074 sendStreamMsg(t, nc, "data", "5") 20075 sendStreamMsg(t, nc, "other", "6") 20076 sendStreamMsg(t, nc, "data", "7") 20077 sendStreamMsg(t, nc, "ignored", "100") 20078 20079 mset.addConsumer(&ConsumerConfig{ 20080 FilterSubjects: []string{"events", "data", "other"}, 20081 Durable: "multi", 20082 AckPolicy: AckExplicit, 20083 }) 20084 20085 consumer, err := js.PullSubscribe("", "multi", nats.Bind("TEST", "multi")) 20086 require_NoError(t, err) 20087 20088 msgs, err := consumer.Fetch(6) 20089 require_NoError(t, err) 20090 for i, msg := range msgs { 20091 require_Equal(t, string(msg.Data), fmt.Sprintf("%d", i)) 20092 require_NoError(t, msg.AckSync()) 20093 } 20094 20095 info, err := js.ConsumerInfo("TEST", "multi") 20096 require_NoError(t, err) 20097 require_True(t, info.Delivered.Stream == 8) 20098 require_True(t, info.Delivered.Consumer == 6) 20099 require_True(t, info.NumPending == 2) 20100 require_True(t, info.NumAckPending == 0) 20101 require_True(t, info.AckFloor.Consumer == 6) 20102 require_True(t, info.AckFloor.Stream == 8) 20103 } 20104 20105 func TestJetStreamConsumerUpdateFilterSubjects(t *testing.T) { 20106 s := RunBasicJetStreamServer(t) 20107 defer s.Shutdown() 20108 20109 nc, js := jsClientConnect(t, s) 20110 defer nc.Close() 20111 20112 mset, err := s.GlobalAccount().addStream(&StreamConfig{ 20113 Name: "TEST", 20114 Subjects: []string{"events", "data", "other"}, 20115 }) 20116 require_NoError(t, err) 20117 20118 sendStreamMsg(t, nc, "other", "100") 20119 sendStreamMsg(t, nc, "events", "0") 20120 sendStreamMsg(t, nc, "events", "1") 20121 sendStreamMsg(t, nc, "data", "2") 20122 sendStreamMsg(t, nc, "data", "3") 20123 sendStreamMsg(t, nc, "other", "4") 20124 sendStreamMsg(t, nc, "data", "5") 20125 20126 _, err = mset.addConsumer(&ConsumerConfig{ 20127 FilterSubjects: []string{"events", "data"}, 20128 Durable: "multi", 20129 AckPolicy: AckExplicit, 20130 }) 20131 require_NoError(t, err) 20132 20133 consumer, err := js.PullSubscribe("", "multi", nats.Bind("TEST", "multi")) 20134 require_NoError(t, err) 20135 20136 msgs, err := consumer.Fetch(3) 20137 require_NoError(t, err) 20138 for i, msg := range msgs { 20139 require_Equal(t, string(msg.Data), fmt.Sprintf("%d", i)) 20140 require_NoError(t, msg.AckSync()) 20141 } 20142 20143 _, err = mset.addConsumer(&ConsumerConfig{ 20144 FilterSubjects: []string{"events", "data", "other"}, 20145 Durable: "multi", 20146 AckPolicy: AckExplicit, 20147 }) 20148 require_NoError(t, err) 20149 20150 updatedConsumer, err := js.PullSubscribe("", "multi", nats.Bind("TEST", "multi")) 20151 require_NoError(t, err) 20152 20153 msgs, err = updatedConsumer.Fetch(3) 20154 require_NoError(t, err) 20155 for i, msg := range msgs { 20156 require_Equal(t, string(msg.Data), fmt.Sprintf("%d", i+3)) 20157 require_NoError(t, msg.AckSync()) 20158 } 20159 } 20160 func TestJetStreamStreamUpdateSubjectsOverlapOthers(t *testing.T) { 20161 s := RunBasicJetStreamServer(t) 20162 defer s.Shutdown() 20163 20164 nc, js := jsClientConnect(t, s) 20165 defer nc.Close() 20166 20167 _, err := js.AddStream(&nats.StreamConfig{ 20168 Name: "TEST", 20169 Subjects: []string{"TEST"}, 20170 }) 20171 require_NoError(t, err) 20172 20173 _, err = js.UpdateStream(&nats.StreamConfig{ 20174 Name: "TEST", 20175 Subjects: []string{"TEST", "foo.a"}, 20176 }) 20177 require_NoError(t, err) 20178 20179 _, err = js.AddStream(&nats.StreamConfig{ 20180 Name: "TEST2", 20181 Subjects: []string{"TEST2"}, 20182 }) 20183 require_NoError(t, err) 20184 20185 // we expect an error updating stream TEST2 with subject that overlaps that used by TEST 20186 // foo.a fails too, but foo.* also double-check for sophisticated overlap match 20187 _, err = js.UpdateStream(&nats.StreamConfig{ 20188 Name: "TEST2", 20189 Subjects: []string{"TEST2", "foo.*"}, 20190 }) 20191 require_Error(t, err) 20192 require_Contains(t, err.Error(), "overlap") 20193 } 20194 20195 func TestJetStreamMetaDataFailOnKernelFault(t *testing.T) { 20196 s := RunBasicJetStreamServer(t) 20197 defer s.Shutdown() 20198 20199 nc, js := jsClientConnect(t, s) 20200 defer nc.Close() 20201 20202 _, err := js.AddStream(&nats.StreamConfig{ 20203 Name: "TEST", 20204 Subjects: []string{"foo"}, 20205 }) 20206 require_NoError(t, err) 20207 20208 for i := 0; i < 10; i++ { 20209 sendStreamMsg(t, nc, "foo", "OK") 20210 } 20211 20212 sd := s.JetStreamConfig().StoreDir 20213 sdir := filepath.Join(sd, "$G", "streams", "TEST") 20214 s.Shutdown() 20215 20216 // Emulate if the kernel did not flush out to disk the meta information. 20217 // so we will zero out both meta.inf and meta.sum. 20218 err = os.WriteFile(filepath.Join(sdir, JetStreamMetaFile), nil, defaultFilePerms) 20219 require_NoError(t, err) 20220 20221 err = os.WriteFile(filepath.Join(sdir, JetStreamMetaFileSum), nil, defaultFilePerms) 20222 require_NoError(t, err) 20223 20224 // Restart. 20225 s = RunJetStreamServerOnPort(-1, sd) 20226 defer s.Shutdown() 20227 20228 nc, js = jsClientConnect(t, s) 20229 defer nc.Close() 20230 20231 // The stream will have not been recovered. So err is normal. 20232 _, err = js.StreamInfo("TEST") 20233 require_Error(t, err) 20234 20235 // Make sure we are signaled here from healthz 20236 hs := s.healthz(nil) 20237 const expected = "JetStream stream '$G > TEST' could not be recovered" 20238 if hs.Status != "unavailable" || hs.Error == _EMPTY_ { 20239 t.Fatalf("Expected healthz to return an error") 20240 } else if hs.Error != expected { 20241 t.Fatalf("Expected healthz error %q got %q", expected, hs.Error) 20242 } 20243 20244 // If we add it back, this should recover the msgs. 20245 _, err = js.AddStream(&nats.StreamConfig{ 20246 Name: "TEST", 20247 Subjects: []string{"foo"}, 20248 }) 20249 require_NoError(t, err) 20250 20251 // Make sure we recovered. 20252 si, err := js.StreamInfo("TEST") 20253 require_NoError(t, err) 20254 require_True(t, si.State.Msgs == 10) 20255 20256 // Now if we restart the server, meta should be correct, 20257 // and the stream should be restored. 20258 s.Shutdown() 20259 20260 s = RunJetStreamServerOnPort(-1, sd) 20261 defer s.Shutdown() 20262 20263 nc, js = jsClientConnect(t, s) 20264 defer nc.Close() 20265 20266 // Make sure we recovered the stream correctly after re-adding. 20267 si, err = js.StreamInfo("TEST") 20268 require_NoError(t, err) 20269 require_True(t, si.State.Msgs == 10) 20270 } 20271 20272 func TestJetstreamConsumerSingleTokenSubject(t *testing.T) { 20273 s := RunBasicJetStreamServer(t) 20274 defer s.Shutdown() 20275 20276 nc, js := jsClientConnect(t, s) 20277 defer nc.Close() 20278 20279 filterSubject := "foo" 20280 _, err := js.AddStream(&nats.StreamConfig{ 20281 Name: "TEST", 20282 Subjects: []string{filterSubject}, 20283 }) 20284 require_NoError(t, err) 20285 20286 req, err := json.Marshal(&CreateConsumerRequest{Stream: "TEST", Config: ConsumerConfig{ 20287 FilterSubject: filterSubject, 20288 Name: "name", 20289 }}) 20290 20291 if err != nil { 20292 t.Fatalf("failed to marshal consumer create request: %v", err) 20293 } 20294 20295 resp, err := nc.Request(fmt.Sprintf("$JS.API.CONSUMER.CREATE.%s.%s.%s", "TEST", "name", "not_filter_subject"), req, time.Second*10) 20296 20297 var apiResp ApiResponse 20298 json.Unmarshal(resp.Data, &apiResp) 20299 if err != nil { 20300 t.Fatalf("failed to unmarshal response: %v", err) 20301 } 20302 if apiResp.Error == nil { 20303 t.Fatalf("expected error, got nil") 20304 } 20305 if apiResp.Error.ErrCode != 10131 { 20306 t.Fatalf("expected error 10131, got %v", apiResp.Error) 20307 } 20308 } 20309 20310 // https://github.com/nats-io/nats-server/issues/3734 20311 func TestJetStreamMsgBlkFailOnKernelFault(t *testing.T) { 20312 s := RunBasicJetStreamServer(t) 20313 defer s.Shutdown() 20314 20315 nc, js := jsClientConnect(t, s) 20316 defer nc.Close() 20317 20318 _, err := js.AddStream(&nats.StreamConfig{ 20319 Name: "TEST", 20320 Subjects: []string{"foo"}, 20321 MaxBytes: 10 * 1024 * 1024, // 10MB 20322 }) 20323 require_NoError(t, err) 20324 20325 msgSize := 1024 * 1024 // 1MB 20326 msg := make([]byte, msgSize) 20327 crand.Read(msg) 20328 20329 for i := 0; i < 20; i++ { 20330 _, err := js.Publish("foo", msg) 20331 require_NoError(t, err) 20332 } 20333 20334 si, err := js.StreamInfo("TEST") 20335 require_NoError(t, err) 20336 require_True(t, si.State.Bytes < uint64(si.Config.MaxBytes)) 20337 20338 // Now emulate a kernel fault that fails to write the last blk properly. 20339 mset, err := s.GlobalAccount().lookupStream("TEST") 20340 require_NoError(t, err) 20341 20342 mset.mu.RLock() 20343 fs := mset.store.(*fileStore) 20344 fs.mu.RLock() 20345 require_True(t, len(fs.blks) > 2) 20346 // Here we do not grab the last one, which we handle correctly. We grab an interior one near the end. 20347 lmbf := fs.blks[len(fs.blks)-2].mfn 20348 fs.mu.RUnlock() 20349 mset.mu.RUnlock() 20350 20351 sd := s.JetStreamConfig().StoreDir 20352 s.Shutdown() 20353 20354 // Remove block. 20355 require_NoError(t, os.Remove(lmbf)) 20356 20357 s = RunJetStreamServerOnPort(-1, sd) 20358 defer s.Shutdown() 20359 20360 nc, js = jsClientConnect(t, s) 20361 defer nc.Close() 20362 20363 _, err = js.GetMsg("TEST", 17) 20364 require_Error(t, err, nats.ErrMsgNotFound) 20365 20366 si, err = js.StreamInfo("TEST") 20367 require_NoError(t, err) 20368 require_True(t, si.State.NumDeleted == 3) 20369 20370 // Test detailed version as well. 20371 si, err = js.StreamInfo("TEST", &nats.StreamInfoRequest{DeletedDetails: true}) 20372 require_NoError(t, err) 20373 require_True(t, si.State.NumDeleted == 3) 20374 if !reflect.DeepEqual(si.State.Deleted, []uint64{16, 17, 18}) { 20375 t.Fatalf("Expected deleted of %+v, got %+v", []uint64{16, 17, 18}, si.State.Deleted) 20376 } 20377 20378 for i := 0; i < 20; i++ { 20379 _, err := js.Publish("foo", msg) 20380 require_NoError(t, err) 20381 } 20382 20383 si, err = js.StreamInfo("TEST") 20384 require_NoError(t, err) 20385 if si.State.Bytes > uint64(si.Config.MaxBytes) { 20386 t.Fatalf("MaxBytes not enforced with empty interior msg blk, max %v, bytes %v", 20387 friendlyBytes(si.Config.MaxBytes), friendlyBytes(int64(si.State.Bytes))) 20388 } 20389 } 20390 20391 func TestJetStreamPullConsumerBatchCompleted(t *testing.T) { 20392 s := RunBasicJetStreamServer(t) 20393 defer s.Shutdown() 20394 20395 nc, js := jsClientConnect(t, s) 20396 defer nc.Close() 20397 20398 _, err := js.AddStream(&nats.StreamConfig{ 20399 Name: "TEST", 20400 Subjects: []string{"foo"}, 20401 }) 20402 require_NoError(t, err) 20403 20404 msgSize := 128 20405 msg := make([]byte, msgSize) 20406 crand.Read(msg) 20407 20408 for i := 0; i < 10; i++ { 20409 _, err := js.Publish("foo", msg) 20410 require_NoError(t, err) 20411 } 20412 20413 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 20414 Durable: "dur", 20415 AckPolicy: nats.AckExplicitPolicy, 20416 }) 20417 require_NoError(t, err) 20418 20419 req := JSApiConsumerGetNextRequest{Batch: 0, MaxBytes: 1024, Expires: 250 * time.Millisecond} 20420 20421 reqb, _ := json.Marshal(req) 20422 sub := natsSubSync(t, nc, nats.NewInbox()) 20423 err = nc.PublishRequest("$JS.API.CONSUMER.MSG.NEXT.TEST.dur", sub.Subject, reqb) 20424 require_NoError(t, err) 20425 20426 // Expect first message to arrive normally. 20427 _, err = sub.NextMsg(time.Second * 1) 20428 require_NoError(t, err) 20429 20430 // Second message should be info that batch is complete, but there were pending bytes. 20431 pullMsg, err := sub.NextMsg(time.Second * 1) 20432 require_NoError(t, err) 20433 20434 if v := pullMsg.Header.Get("Status"); v != "409" { 20435 t.Fatalf("Expected 409, got: %s", v) 20436 } 20437 if v := pullMsg.Header.Get("Description"); v != "Batch Completed" { 20438 t.Fatalf("Expected Batch Completed, got: %s", v) 20439 } 20440 } 20441 20442 func TestJetStreamConsumerAndStreamMetadata(t *testing.T) { 20443 s := RunBasicJetStreamServer(t) 20444 defer s.Shutdown() 20445 20446 metadata := map[string]string{"key": "value", "_nats_created_version": "2.9.11"} 20447 acc := s.GlobalAccount() 20448 20449 // Check stream's first. 20450 mset, err := acc.addStream(&StreamConfig{Name: "foo", Metadata: metadata}) 20451 if err != nil { 20452 t.Fatalf("Unexpected error adding stream: %v", err) 20453 } 20454 if cfg := mset.config(); !reflect.DeepEqual(metadata, cfg.Metadata) { 20455 t.Fatalf("Expected a metadata of %q, got %q", metadata, cfg.Metadata) 20456 } 20457 20458 // Now consumer 20459 o, err := mset.addConsumer(&ConsumerConfig{ 20460 Metadata: metadata, 20461 DeliverSubject: "to", 20462 AckPolicy: AckNone}) 20463 if err != nil { 20464 t.Fatalf("Unexpected error adding consumer: %v", err) 20465 } 20466 if cfg := o.config(); !reflect.DeepEqual(metadata, cfg.Metadata) { 20467 t.Fatalf("Expected a metadata of %q, got %q", metadata, cfg.Metadata) 20468 } 20469 20470 // Test max. 20471 data := make([]byte, JSMaxMetadataLen/100) 20472 crand.Read(data) 20473 bigValue := base64.StdEncoding.EncodeToString(data) 20474 20475 bigMetadata := make(map[string]string, 101) 20476 for i := 0; i < 101; i++ { 20477 bigMetadata[fmt.Sprintf("key%d", i)] = bigValue 20478 } 20479 20480 _, err = acc.addStream(&StreamConfig{Name: "bar", Metadata: bigMetadata}) 20481 if err == nil || !strings.Contains(err.Error(), "stream metadata exceeds") { 20482 t.Fatalf("Expected an error but got none") 20483 } 20484 20485 _, err = mset.addConsumer(&ConsumerConfig{ 20486 Metadata: bigMetadata, 20487 DeliverSubject: "to", 20488 AckPolicy: AckNone}) 20489 if err == nil || !strings.Contains(err.Error(), "consumer metadata exceeds") { 20490 t.Fatalf("Expected an error but got none") 20491 } 20492 } 20493 20494 func TestJetStreamConsumerPurge(t *testing.T) { 20495 s := RunBasicJetStreamServer(t) 20496 defer s.Shutdown() 20497 20498 nc, js := jsClientConnect(t, s) 20499 defer nc.Close() 20500 20501 _, err := js.AddStream(&nats.StreamConfig{ 20502 Name: "TEST", 20503 Subjects: []string{"test.>"}, 20504 }) 20505 require_NoError(t, err) 20506 20507 sendStreamMsg(t, nc, "test.1", "hello") 20508 sendStreamMsg(t, nc, "test.2", "hello") 20509 20510 sub, err := js.PullSubscribe("test.>", "consumer") 20511 require_NoError(t, err) 20512 20513 // Purge one of the subjects. 20514 err = js.PurgeStream("TEST", &nats.StreamPurgeRequest{Subject: "test.2"}) 20515 require_NoError(t, err) 20516 20517 info, err := js.ConsumerInfo("TEST", "consumer") 20518 require_NoError(t, err) 20519 require_True(t, info.NumPending == 1) 20520 20521 // Expect to get message from not purged subject. 20522 _, err = sub.Fetch(1) 20523 require_NoError(t, err) 20524 20525 _, err = js.AddStream(&nats.StreamConfig{ 20526 Name: "OTHER", 20527 Subjects: []string{"other.>"}, 20528 }) 20529 require_NoError(t, err) 20530 20531 // Publish two items to two subjects. 20532 sendStreamMsg(t, nc, "other.1", "hello") 20533 sendStreamMsg(t, nc, "other.2", "hello") 20534 20535 sub, err = js.PullSubscribe("other.>", "other_consumer") 20536 require_NoError(t, err) 20537 20538 // Purge whole stream. 20539 err = js.PurgeStream("OTHER", &nats.StreamPurgeRequest{}) 20540 require_NoError(t, err) 20541 20542 info, err = js.ConsumerInfo("OTHER", "other_consumer") 20543 require_NoError(t, err) 20544 require_True(t, info.NumPending == 0) 20545 20546 // This time expect error, as we purged whole stream, 20547 _, err = sub.Fetch(1) 20548 require_Error(t, err) 20549 20550 } 20551 20552 func TestJetStreamConsumerFilterUpdate(t *testing.T) { 20553 s := RunBasicJetStreamServer(t) 20554 defer s.Shutdown() 20555 20556 nc, js := jsClientConnect(t, s) 20557 defer nc.Close() 20558 20559 _, err := js.AddStream(&nats.StreamConfig{ 20560 Name: "TEST", 20561 Subjects: []string{"foo.>", "bar.>"}, 20562 }) 20563 require_NoError(t, err) 20564 20565 for i := 0; i < 3; i++ { 20566 sendStreamMsg(t, nc, "foo.data", "OK") 20567 } 20568 20569 sub, err := nc.SubscribeSync("deliver") 20570 require_NoError(t, err) 20571 20572 js.AddConsumer("TEST", &nats.ConsumerConfig{ 20573 Durable: "consumer", 20574 DeliverSubject: "deliver", 20575 FilterSubject: "foo.data", 20576 }) 20577 20578 _, err = sub.NextMsg(time.Second * 1) 20579 require_NoError(t, err) 20580 _, err = sub.NextMsg(time.Second * 1) 20581 require_NoError(t, err) 20582 _, err = sub.NextMsg(time.Second * 1) 20583 require_NoError(t, err) 20584 20585 _, err = js.UpdateConsumer("TEST", &nats.ConsumerConfig{ 20586 Durable: "consumer", 20587 DeliverSubject: "deliver", 20588 FilterSubject: "foo.>", 20589 }) 20590 require_NoError(t, err) 20591 20592 sendStreamMsg(t, nc, "foo.other", "data") 20593 20594 // This will timeout if filters were not properly updated. 20595 _, err = sub.NextMsg(time.Second * 1) 20596 require_NoError(t, err) 20597 20598 mset, err := s.GlobalAccount().lookupStream("TEST") 20599 require_NoError(t, err) 20600 20601 checkNumFilter := func(expected int) { 20602 t.Helper() 20603 mset.mu.RLock() 20604 nf := mset.numFilter 20605 mset.mu.RUnlock() 20606 if nf != expected { 20607 t.Fatalf("Expected stream's numFilter to be %d, got %d", expected, nf) 20608 } 20609 } 20610 20611 checkNumFilter(1) 20612 20613 // Update consumer once again, now not having any filters 20614 _, err = js.UpdateConsumer("TEST", &nats.ConsumerConfig{ 20615 Durable: "consumer", 20616 DeliverSubject: "deliver", 20617 FilterSubject: _EMPTY_, 20618 }) 20619 require_NoError(t, err) 20620 20621 // and expect that numFilter reports correctly. 20622 checkNumFilter(0) 20623 } 20624 20625 func TestJetStreamPurgeExAndAccounting(t *testing.T) { 20626 cases := []struct { 20627 name string 20628 cfg *nats.StreamConfig 20629 }{ 20630 {name: "MemoryStore", 20631 cfg: &nats.StreamConfig{ 20632 Name: "TEST", 20633 Storage: nats.MemoryStorage, 20634 Subjects: []string{"*"}, 20635 }}, 20636 {name: "FileStore", 20637 cfg: &nats.StreamConfig{ 20638 Name: "TEST", 20639 Storage: nats.FileStorage, 20640 Subjects: []string{"*"}, 20641 }}, 20642 } 20643 for _, c := range cases { 20644 s := RunBasicJetStreamServer(t) 20645 defer s.Shutdown() 20646 20647 // Client for API requests. 20648 nc, js := jsClientConnect(t, s) 20649 defer nc.Close() 20650 20651 _, err := js.AddStream(c.cfg) 20652 require_NoError(t, err) 20653 20654 msg := []byte("accounting") 20655 for i := 0; i < 100; i++ { 20656 _, err = js.Publish("foo", msg) 20657 require_NoError(t, err) 20658 _, err = js.Publish("bar", msg) 20659 require_NoError(t, err) 20660 } 20661 20662 info, err := js.AccountInfo() 20663 require_NoError(t, err) 20664 20665 err = js.PurgeStream("TEST", &nats.StreamPurgeRequest{Subject: "foo"}) 20666 require_NoError(t, err) 20667 20668 ninfo, err := js.AccountInfo() 20669 require_NoError(t, err) 20670 20671 // Make sure we did the proper accounting. 20672 if c.cfg.Storage == nats.MemoryStorage { 20673 if ninfo.Memory != info.Memory/2 { 20674 t.Fatalf("Accounting information incorrect for Memory: %d vs %d", 20675 ninfo.Memory, info.Memory/2) 20676 } 20677 } else { 20678 if ninfo.Store != info.Store/2 { 20679 t.Fatalf("Accounting information incorrect for FileStore: %d vs %d", 20680 ninfo.Store, info.Store/2) 20681 } 20682 } 20683 } 20684 } 20685 20686 func TestJetStreamRollup(t *testing.T) { 20687 s := RunBasicJetStreamServer(t) 20688 defer s.Shutdown() 20689 20690 nc, js := jsClientConnect(t, s) 20691 defer nc.Close() 20692 20693 const STREAM = "S" 20694 const SUBJ = "S.*" 20695 20696 js.AddStream(&nats.StreamConfig{ 20697 Name: STREAM, 20698 Subjects: []string{SUBJ}, 20699 AllowRollup: true, 20700 }) 20701 20702 for i := 1; i <= 10; i++ { 20703 sendStreamMsg(t, nc, "S.A", fmt.Sprintf("%v", i)) 20704 sendStreamMsg(t, nc, "S.B", fmt.Sprintf("%v", i)) 20705 } 20706 20707 sinfo, err := js.StreamInfo(STREAM) 20708 require_NoError(t, err) 20709 require_True(t, sinfo.State.Msgs == 20) 20710 20711 cinfo, err := js.AddConsumer(STREAM, &nats.ConsumerConfig{ 20712 Durable: "DUR-A", 20713 FilterSubject: "S.A", 20714 AckPolicy: nats.AckExplicitPolicy, 20715 }) 20716 require_NoError(t, err) 20717 require_True(t, cinfo.NumPending == 10) 20718 20719 m := nats.NewMsg("S.A") 20720 m.Header.Set(JSMsgRollup, JSMsgRollupSubject) 20721 20722 _, err = js.PublishMsg(m) 20723 require_NoError(t, err) 20724 20725 cinfo, err = js.ConsumerInfo("S", "DUR-A") 20726 require_NoError(t, err) 20727 require_True(t, cinfo.NumPending == 1) 20728 20729 sinfo, err = js.StreamInfo(STREAM) 20730 require_NoError(t, err) 20731 require_True(t, sinfo.State.Msgs == 11) 20732 20733 cinfo, err = js.AddConsumer(STREAM, &nats.ConsumerConfig{ 20734 Durable: "DUR-B", 20735 FilterSubject: "S.B", 20736 AckPolicy: nats.AckExplicitPolicy, 20737 }) 20738 require_NoError(t, err) 20739 require_True(t, cinfo.NumPending == 10) 20740 } 20741 20742 func TestJetStreamPartialPurgeWithAckPending(t *testing.T) { 20743 s := RunBasicJetStreamServer(t) 20744 defer s.Shutdown() 20745 20746 nc, js := jsClientConnect(t, s) 20747 defer nc.Close() 20748 20749 _, err := js.AddStream(&nats.StreamConfig{ 20750 Name: "TEST", 20751 Subjects: []string{"foo"}, 20752 }) 20753 require_NoError(t, err) 20754 20755 nmsgs := 100 20756 for i := 0; i < nmsgs; i++ { 20757 sendStreamMsg(t, nc, "foo", "OK") 20758 } 20759 sub, err := js.PullSubscribe("foo", "dlc", nats.AckWait(time.Second)) 20760 require_NoError(t, err) 20761 20762 // Queue up all for ack pending. 20763 _, err = sub.Fetch(nmsgs) 20764 require_NoError(t, err) 20765 20766 keep := nmsgs / 2 20767 require_NoError(t, js.PurgeStream("TEST", &nats.StreamPurgeRequest{Keep: uint64(keep)})) 20768 20769 // Should be able to be redelivered now. 20770 time.Sleep(2 * time.Second) 20771 20772 ci, err := js.ConsumerInfo("TEST", "dlc") 20773 require_NoError(t, err) 20774 // Make sure we calculated correctly. 20775 require_True(t, ci.AckFloor.Consumer == uint64(keep)) 20776 require_True(t, ci.AckFloor.Stream == uint64(keep)) 20777 require_True(t, ci.NumAckPending == keep) 20778 require_True(t, ci.NumPending == 0) 20779 20780 for i := 0; i < nmsgs; i++ { 20781 sendStreamMsg(t, nc, "foo", "OK") 20782 } 20783 20784 ci, err = js.ConsumerInfo("TEST", "dlc") 20785 require_NoError(t, err) 20786 // Make sure we calculated correctly. 20787 // Top 3 will be same. 20788 require_True(t, ci.AckFloor.Consumer == uint64(keep)) 20789 require_True(t, ci.AckFloor.Stream == uint64(keep)) 20790 require_True(t, ci.NumAckPending == keep) 20791 require_True(t, ci.NumPending == uint64(nmsgs)) 20792 require_True(t, ci.NumRedelivered == 0) 20793 20794 msgs, err := sub.Fetch(keep) 20795 require_NoError(t, err) 20796 require_True(t, len(msgs) == keep) 20797 20798 ci, err = js.ConsumerInfo("TEST", "dlc") 20799 require_NoError(t, err) 20800 // Make sure we calculated correctly. 20801 require_True(t, ci.Delivered.Consumer == uint64(nmsgs+keep)) 20802 require_True(t, ci.Delivered.Stream == uint64(nmsgs)) 20803 require_True(t, ci.AckFloor.Consumer == uint64(keep)) 20804 require_True(t, ci.AckFloor.Stream == uint64(keep)) 20805 require_True(t, ci.NumAckPending == keep) 20806 require_True(t, ci.NumPending == uint64(nmsgs)) 20807 require_True(t, ci.NumRedelivered == keep) 20808 20809 // Ack all. 20810 for _, m := range msgs { 20811 m.Ack() 20812 } 20813 nc.Flush() 20814 20815 ci, err = js.ConsumerInfo("TEST", "dlc") 20816 require_NoError(t, err) 20817 // Same for Delivered 20818 require_True(t, ci.Delivered.Consumer == uint64(nmsgs+keep)) 20819 require_True(t, ci.Delivered.Stream == uint64(nmsgs)) 20820 require_True(t, ci.AckFloor.Consumer == uint64(nmsgs+keep)) 20821 require_True(t, ci.AckFloor.Stream == uint64(nmsgs)) 20822 require_True(t, ci.NumAckPending == 0) 20823 require_True(t, ci.NumPending == uint64(nmsgs)) 20824 require_True(t, ci.NumRedelivered == 0) 20825 20826 msgs, err = sub.Fetch(nmsgs) 20827 require_NoError(t, err) 20828 require_True(t, len(msgs) == nmsgs) 20829 20830 // Ack all again 20831 for _, m := range msgs { 20832 m.Ack() 20833 } 20834 nc.Flush() 20835 20836 ci, err = js.ConsumerInfo("TEST", "dlc") 20837 require_NoError(t, err) 20838 // Make sure we calculated correctly. 20839 require_True(t, ci.Delivered.Consumer == uint64(nmsgs*2+keep)) 20840 require_True(t, ci.Delivered.Stream == uint64(nmsgs*2)) 20841 require_True(t, ci.AckFloor.Consumer == uint64(nmsgs*2+keep)) 20842 require_True(t, ci.AckFloor.Stream == uint64(nmsgs*2)) 20843 require_True(t, ci.NumAckPending == 0) 20844 require_True(t, ci.NumPending == 0) 20845 require_True(t, ci.NumRedelivered == 0) 20846 } 20847 20848 func TestJetStreamPurgeWithRedeliveredPending(t *testing.T) { 20849 s := RunBasicJetStreamServer(t) 20850 defer s.Shutdown() 20851 20852 nc, js := jsClientConnect(t, s) 20853 defer nc.Close() 20854 20855 _, err := js.AddStream(&nats.StreamConfig{ 20856 Name: "TEST", 20857 Subjects: []string{"foo"}, 20858 }) 20859 require_NoError(t, err) 20860 20861 nmsgs := 100 20862 for i := 0; i < nmsgs; i++ { 20863 sendStreamMsg(t, nc, "foo", "OK") 20864 } 20865 sub, err := js.PullSubscribe("foo", "dlc", nats.AckWait(time.Second)) 20866 require_NoError(t, err) 20867 20868 // Queue up all for ack pending. 20869 msgs, err := sub.Fetch(nmsgs) 20870 require_NoError(t, err) 20871 require_True(t, len(msgs) == nmsgs) 20872 20873 // Should be able to be redelivered now. 20874 time.Sleep(2 * time.Second) 20875 20876 // Queue up all for ack pending again. 20877 msgs, err = sub.Fetch(nmsgs) 20878 require_NoError(t, err) 20879 require_True(t, len(msgs) == nmsgs) 20880 20881 require_NoError(t, js.PurgeStream("TEST")) 20882 20883 ci, err := js.ConsumerInfo("TEST", "dlc") 20884 require_NoError(t, err) 20885 20886 require_True(t, ci.Delivered.Consumer == uint64(2*nmsgs)) 20887 require_True(t, ci.Delivered.Stream == uint64(nmsgs)) 20888 require_True(t, ci.AckFloor.Consumer == uint64(2*nmsgs)) 20889 require_True(t, ci.AckFloor.Stream == uint64(nmsgs)) 20890 require_True(t, ci.NumAckPending == 0) 20891 require_True(t, ci.NumPending == 0) 20892 require_True(t, ci.NumRedelivered == 0) 20893 } 20894 20895 func TestJetStreamConsumerAckFloorWithExpired(t *testing.T) { 20896 s := RunBasicJetStreamServer(t) 20897 defer s.Shutdown() 20898 20899 nc, js := jsClientConnect(t, s) 20900 defer nc.Close() 20901 20902 _, err := js.AddStream(&nats.StreamConfig{ 20903 Name: "TEST", 20904 Subjects: []string{"foo"}, 20905 MaxAge: 2 * time.Second, 20906 }) 20907 require_NoError(t, err) 20908 20909 nmsgs := 100 20910 for i := 0; i < nmsgs; i++ { 20911 sendStreamMsg(t, nc, "foo", "OK") 20912 } 20913 sub, err := js.PullSubscribe("foo", "dlc", nats.AckWait(time.Second)) 20914 require_NoError(t, err) 20915 20916 // Queue up all for ack pending. 20917 msgs, err := sub.Fetch(nmsgs) 20918 require_NoError(t, err) 20919 require_True(t, len(msgs) == nmsgs) 20920 20921 // Let all messages expire. 20922 time.Sleep(3 * time.Second) 20923 20924 si, err := js.StreamInfo("TEST") 20925 require_NoError(t, err) 20926 require_True(t, si.State.Msgs == 0) 20927 20928 ci, err := js.ConsumerInfo("TEST", "dlc") 20929 require_NoError(t, err) 20930 20931 require_True(t, ci.Delivered.Consumer == uint64(nmsgs)) 20932 require_True(t, ci.Delivered.Stream == uint64(nmsgs)) 20933 require_True(t, ci.AckFloor.Consumer == uint64(nmsgs)) 20934 require_True(t, ci.AckFloor.Stream == uint64(nmsgs)) 20935 require_True(t, ci.NumAckPending == 0) 20936 require_True(t, ci.NumPending == 0) 20937 require_True(t, ci.NumRedelivered == 0) 20938 } 20939 20940 func TestJetStreamConsumerIsFiltered(t *testing.T) { 20941 s := RunBasicJetStreamServer(t) 20942 defer s.Shutdown() 20943 acc := s.GlobalAccount() 20944 20945 tests := []struct { 20946 name string 20947 streamSubjects []string 20948 filters []string 20949 isFiltered bool 20950 }{ 20951 { 20952 name: "single_subject", 20953 streamSubjects: []string{"one"}, 20954 filters: []string{"one"}, 20955 isFiltered: false, 20956 }, 20957 { 20958 name: "single_subject_filtered", 20959 streamSubjects: []string{"one.>"}, 20960 filters: []string{"one.filter"}, 20961 isFiltered: true, 20962 }, 20963 { 20964 name: "multi_subject_non_filtered", 20965 streamSubjects: []string{"multi", "foo", "bar.>"}, 20966 filters: []string{"multi", "bar.>", "foo"}, 20967 isFiltered: false, 20968 }, 20969 { 20970 name: "multi_subject_filtered_wc", 20971 streamSubjects: []string{"events", "data"}, 20972 filters: []string{"data"}, 20973 isFiltered: true, 20974 }, 20975 { 20976 name: "multi_subject_filtered", 20977 streamSubjects: []string{"machines", "floors"}, 20978 filters: []string{"machines"}, 20979 isFiltered: true, 20980 }, 20981 } 20982 for _, test := range tests { 20983 t.Run(test.name, func(t *testing.T) { 20984 mset, err := acc.addStream(&StreamConfig{ 20985 Name: test.name, 20986 Subjects: test.streamSubjects, 20987 }) 20988 require_NoError(t, err) 20989 20990 o, err := mset.addConsumer(&ConsumerConfig{ 20991 FilterSubjects: test.filters, 20992 Durable: test.name, 20993 }) 20994 require_NoError(t, err) 20995 20996 require_True(t, o.isFiltered() == test.isFiltered) 20997 }) 20998 } 20999 } 21000 21001 func TestJetStreamConsumerWithFormattingSymbol(t *testing.T) { 21002 s := RunBasicJetStreamServer(t) 21003 defer s.Shutdown() 21004 21005 nc, js := jsClientConnect(t, s) 21006 defer nc.Close() 21007 21008 _, err := js.AddStream(&nats.StreamConfig{ 21009 Name: "Test%123", 21010 Subjects: []string{"foo"}, 21011 }) 21012 require_NoError(t, err) 21013 21014 for i := 0; i < 10; i++ { 21015 sendStreamMsg(t, nc, "foo", "OK") 21016 } 21017 21018 _, err = js.AddConsumer("Test%123", &nats.ConsumerConfig{ 21019 Durable: "Test%123", 21020 FilterSubject: "foo", 21021 DeliverSubject: "bar", 21022 }) 21023 require_NoError(t, err) 21024 21025 sub, err := js.SubscribeSync("foo", nats.Bind("Test%123", "Test%123")) 21026 require_NoError(t, err) 21027 21028 _, err = sub.NextMsg(time.Second * 5) 21029 require_NoError(t, err) 21030 } 21031 21032 func TestJetStreamStreamUpdateWithExternalSource(t *testing.T) { 21033 ho := DefaultTestOptions 21034 ho.Port = -1 21035 ho.LeafNode.Host = "127.0.0.1" 21036 ho.LeafNode.Port = -1 21037 ho.JetStream = true 21038 ho.JetStreamDomain = "hub" 21039 ho.StoreDir = t.TempDir() 21040 hs := RunServer(&ho) 21041 defer hs.Shutdown() 21042 21043 lu, err := url.Parse(fmt.Sprintf("nats://127.0.0.1:%d", ho.LeafNode.Port)) 21044 require_NoError(t, err) 21045 21046 lo1 := DefaultTestOptions 21047 lo1.Port = -1 21048 lo1.ServerName = "a-leaf" 21049 lo1.JetStream = true 21050 lo1.StoreDir = t.TempDir() 21051 lo1.JetStreamDomain = "a-leaf" 21052 lo1.LeafNode.Remotes = []*RemoteLeafOpts{{URLs: []*url.URL{lu}}} 21053 l1 := RunServer(&lo1) 21054 defer l1.Shutdown() 21055 21056 checkLeafNodeConnected(t, l1) 21057 21058 // Test sources with `External` provided 21059 ncl, jsl := jsClientConnect(t, l1) 21060 defer ncl.Close() 21061 21062 // Hub stream. 21063 _, err = jsl.AddStream(&nats.StreamConfig{Name: "stream", Subjects: []string{"leaf"}}) 21064 require_NoError(t, err) 21065 21066 nch, jsh := jsClientConnect(t, hs) 21067 defer nch.Close() 21068 21069 // Leaf stream. 21070 // Both streams uses the same name, as we're testing if overlap does not check against itself 21071 // if `External` stream has the same name. 21072 _, err = jsh.AddStream(&nats.StreamConfig{ 21073 Name: "stream", 21074 Subjects: []string{"hub"}, 21075 }) 21076 require_NoError(t, err) 21077 21078 // Add `Sources`. 21079 // This should not validate subjects overlap against itself. 21080 _, err = jsh.UpdateStream(&nats.StreamConfig{ 21081 Name: "stream", 21082 Subjects: []string{"hub"}, 21083 Sources: []*nats.StreamSource{ 21084 { 21085 Name: "stream", 21086 FilterSubject: "leaf", 21087 External: &nats.ExternalStream{ 21088 APIPrefix: "$JS.a-leaf.API", 21089 }, 21090 }, 21091 }, 21092 }) 21093 require_NoError(t, err) 21094 21095 // Specifying not existing FilterSubject should also be fine, as we do not validate `External` stream. 21096 _, err = jsh.UpdateStream(&nats.StreamConfig{ 21097 Name: "stream", 21098 Subjects: []string{"hub"}, 21099 Sources: []*nats.StreamSource{ 21100 { 21101 Name: "stream", 21102 FilterSubject: "foo", 21103 External: &nats.ExternalStream{ 21104 APIPrefix: "$JS.a-leaf.API", 21105 }, 21106 }, 21107 }, 21108 }) 21109 require_NoError(t, err) 21110 } 21111 21112 func TestJetStreamKVHistoryRegression(t *testing.T) { 21113 s := RunBasicJetStreamServer(t) 21114 defer s.Shutdown() 21115 21116 nc, js := jsClientConnect(t, s) 21117 defer nc.Close() 21118 21119 for _, storage := range []nats.StorageType{nats.FileStorage, nats.MemoryStorage} { 21120 t.Run(storage.String(), func(t *testing.T) { 21121 js.DeleteKeyValue("TEST") 21122 21123 kv, err := js.CreateKeyValue(&nats.KeyValueConfig{ 21124 Bucket: "TEST", 21125 History: 4, 21126 Storage: storage, 21127 }) 21128 require_NoError(t, err) 21129 21130 r1, err := kv.Create("foo", []byte("a")) 21131 require_NoError(t, err) 21132 21133 _, err = kv.Update("foo", []byte("ab"), r1) 21134 require_NoError(t, err) 21135 21136 err = kv.Delete("foo") 21137 require_NoError(t, err) 21138 21139 _, err = kv.Create("foo", []byte("abc")) 21140 require_NoError(t, err) 21141 21142 err = kv.Delete("foo") 21143 require_NoError(t, err) 21144 21145 history, err := kv.History("foo") 21146 require_NoError(t, err) 21147 require_True(t, len(history) == 4) 21148 21149 _, err = kv.Update("foo", []byte("abcd"), history[len(history)-1].Revision()) 21150 require_NoError(t, err) 21151 21152 err = kv.Purge("foo") 21153 require_NoError(t, err) 21154 21155 _, err = kv.Create("foo", []byte("abcde")) 21156 require_NoError(t, err) 21157 21158 err = kv.Purge("foo") 21159 require_NoError(t, err) 21160 21161 history, err = kv.History("foo") 21162 require_NoError(t, err) 21163 require_True(t, len(history) == 1) 21164 }) 21165 } 21166 } 21167 21168 func TestJetStreamSnapshotRestoreStallAndHealthz(t *testing.T) { 21169 s := RunBasicJetStreamServer(t) 21170 defer s.Shutdown() 21171 21172 nc, js := jsClientConnect(t, s) 21173 defer nc.Close() 21174 21175 _, err := js.AddStream(&nats.StreamConfig{ 21176 Name: "ORDERS", 21177 Subjects: []string{"orders.*"}, 21178 }) 21179 require_NoError(t, err) 21180 21181 for i := 0; i < 1000; i++ { 21182 sendStreamMsg(t, nc, "orders.created", "new order") 21183 } 21184 21185 hs := s.healthz(nil) 21186 if hs.Status != "ok" || hs.Error != _EMPTY_ { 21187 t.Fatalf("Expected health to be ok, got %+v", hs) 21188 } 21189 21190 // Simulate the staging directory for restores. This is normally cleaned up 21191 // but since its at the root of the storage directory make sure healthz is not affected. 21192 snapDir := filepath.Join(s.getJetStream().config.StoreDir, snapStagingDir) 21193 require_NoError(t, os.MkdirAll(snapDir, defaultDirPerms)) 21194 21195 // Make sure healthz ok. 21196 hs = s.healthz(nil) 21197 if hs.Status != "ok" || hs.Error != _EMPTY_ { 21198 t.Fatalf("Expected health to be ok, got %+v", hs) 21199 } 21200 } 21201 21202 // https://github.com/nats-io/nats-server/pull/4163 21203 func TestJetStreamMaxBytesIgnored(t *testing.T) { 21204 s := RunBasicJetStreamServer(t) 21205 defer s.Shutdown() 21206 21207 nc, js := jsClientConnect(t, s) 21208 defer nc.Close() 21209 21210 _, err := js.AddStream(&nats.StreamConfig{ 21211 Name: "TEST", 21212 Subjects: []string{"*"}, 21213 MaxBytes: 10 * 1024 * 1024, 21214 }) 21215 require_NoError(t, err) 21216 21217 msg := bytes.Repeat([]byte("A"), 1024*1024) 21218 21219 for i := 0; i < 10; i++ { 21220 _, err := js.Publish("x", msg) 21221 require_NoError(t, err) 21222 } 21223 21224 si, err := js.StreamInfo("TEST") 21225 require_NoError(t, err) 21226 require_True(t, si.State.Msgs == 9) 21227 21228 // Stop current 21229 sd := s.JetStreamConfig().StoreDir 21230 s.Shutdown() 21231 21232 // We will truncate blk file. 21233 mdir := filepath.Join(sd, "$G", "streams", "TEST", "msgs") 21234 // Truncate blk 21235 err = os.WriteFile(filepath.Join(mdir, "1.blk"), nil, defaultFilePerms) 21236 require_NoError(t, err) 21237 21238 // Restart. 21239 s = RunJetStreamServerOnPort(-1, sd) 21240 defer s.Shutdown() 21241 21242 nc, js = jsClientConnect(t, s) 21243 defer nc.Close() 21244 21245 for i := 0; i < 10; i++ { 21246 _, err := js.Publish("x", msg) 21247 require_NoError(t, err) 21248 } 21249 21250 si, err = js.StreamInfo("TEST") 21251 require_NoError(t, err) 21252 require_True(t, si.State.Bytes <= 10*1024*1024) 21253 } 21254 21255 func TestJetStreamLastSequenceBySubjectConcurrent(t *testing.T) { 21256 for _, st := range []StorageType{FileStorage, MemoryStorage} { 21257 t.Run(st.String(), func(t *testing.T) { 21258 c := createJetStreamClusterExplicit(t, "JSC", 3) 21259 defer c.shutdown() 21260 21261 nc0, js0 := jsClientConnect(t, c.randomServer()) 21262 defer nc0.Close() 21263 21264 nc1, js1 := jsClientConnect(t, c.randomServer()) 21265 defer nc1.Close() 21266 21267 cfg := StreamConfig{ 21268 Name: "KV", 21269 Subjects: []string{"kv.>"}, 21270 Storage: st, 21271 Replicas: 3, 21272 } 21273 21274 req, err := json.Marshal(cfg) 21275 if err != nil { 21276 t.Fatalf("Unexpected error: %v", err) 21277 } 21278 // Do manually for now. 21279 m, err := nc0.Request(fmt.Sprintf(JSApiStreamCreateT, cfg.Name), req, time.Second) 21280 require_NoError(t, err) 21281 si, err := js0.StreamInfo("KV") 21282 if err != nil { 21283 t.Fatalf("Unexpected error: %v, respmsg: %q", err, string(m.Data)) 21284 } 21285 if si == nil || si.Config.Name != "KV" { 21286 t.Fatalf("StreamInfo is not correct %+v", si) 21287 } 21288 21289 pub := func(js nats.JetStreamContext, subj, data, seq string) { 21290 t.Helper() 21291 m := nats.NewMsg(subj) 21292 m.Data = []byte(data) 21293 m.Header.Set(JSExpectedLastSubjSeq, seq) 21294 js.PublishMsg(m) 21295 } 21296 21297 ready := make(chan struct{}) 21298 wg := &sync.WaitGroup{} 21299 wg.Add(2) 21300 21301 go func() { 21302 <-ready 21303 pub(js0, "kv.foo", "0-0", "0") 21304 pub(js0, "kv.foo", "0-1", "1") 21305 pub(js0, "kv.foo", "0-2", "2") 21306 wg.Done() 21307 }() 21308 21309 go func() { 21310 <-ready 21311 pub(js1, "kv.foo", "1-0", "0") 21312 pub(js1, "kv.foo", "1-1", "1") 21313 pub(js1, "kv.foo", "1-2", "2") 21314 wg.Done() 21315 }() 21316 21317 time.Sleep(50 * time.Millisecond) 21318 close(ready) 21319 wg.Wait() 21320 21321 // Read the messages. 21322 sub, err := js0.PullSubscribe(_EMPTY_, _EMPTY_, nats.BindStream("KV")) 21323 require_NoError(t, err) 21324 msgs, err := sub.Fetch(10) 21325 require_NoError(t, err) 21326 if len(msgs) != 3 { 21327 t.Errorf("Expected 3 messages, got %d", len(msgs)) 21328 } 21329 for i, m := range msgs { 21330 if m.Header.Get(JSExpectedLastSubjSeq) != fmt.Sprint(i) { 21331 t.Errorf("Expected %d for last sequence, got %q", i, m.Header.Get(JSExpectedLastSubjSeq)) 21332 } 21333 } 21334 }) 21335 } 21336 } 21337 21338 func TestJetStreamServerReencryption(t *testing.T) { 21339 storeDir := t.TempDir() 21340 21341 for i, algo := range []struct { 21342 from string 21343 to string 21344 }{ 21345 {"aes", "aes"}, 21346 {"aes", "chacha"}, 21347 {"chacha", "chacha"}, 21348 {"chacha", "aes"}, 21349 } { 21350 t.Run(fmt.Sprintf("%s_to_%s", algo.from, algo.to), func(t *testing.T) { 21351 streamName := fmt.Sprintf("TEST_%d", i) 21352 subjectName := fmt.Sprintf("foo_%d", i) 21353 expected := 30 21354 21355 checkStream := func(js nats.JetStreamContext) { 21356 si, err := js.StreamInfo(streamName) 21357 if err != nil { 21358 t.Fatal(err) 21359 } 21360 21361 if si.State.Msgs != uint64(expected) { 21362 t.Fatalf("Should be %d messages but got %d messages", expected, si.State.Msgs) 21363 } 21364 21365 sub, err := js.PullSubscribe(subjectName, "") 21366 if err != nil { 21367 t.Fatalf("Unexpected error: %v", err) 21368 } 21369 21370 c := 0 21371 for _, m := range fetchMsgs(t, sub, expected, 5*time.Second) { 21372 m.AckSync() 21373 c++ 21374 } 21375 if c != expected { 21376 t.Fatalf("Should have read back %d messages but got %d messages", expected, c) 21377 } 21378 } 21379 21380 // First off, we start up using the original encryption key and algorithm. 21381 // We'll create a stream and populate it with some messages. 21382 t.Run("setup", func(t *testing.T) { 21383 conf := createConfFile(t, []byte(fmt.Sprintf(` 21384 server_name: S22 21385 listen: 127.0.0.1:-1 21386 jetstream: { 21387 key: %q, 21388 cipher: %s, 21389 store_dir: %q 21390 } 21391 `, "firstencryptionkey", algo.from, storeDir))) 21392 21393 s, _ := RunServerWithConfig(conf) 21394 defer s.Shutdown() 21395 21396 nc, js := jsClientConnect(t, s) 21397 defer nc.Close() 21398 21399 cfg := &nats.StreamConfig{ 21400 Name: streamName, 21401 Subjects: []string{subjectName}, 21402 } 21403 if _, err := js.AddStream(cfg); err != nil { 21404 t.Fatalf("Unexpected error: %v", err) 21405 } 21406 21407 for i := 0; i < expected; i++ { 21408 if _, err := js.Publish(subjectName, []byte("ENCRYPTED PAYLOAD!!")); err != nil { 21409 t.Fatalf("Unexpected publish error: %v", err) 21410 } 21411 } 21412 21413 checkStream(js) 21414 }) 21415 21416 // Next up, we will restart the server, this time with both the new key 21417 // and algorithm and also the old key. At startup, the server will detect 21418 // the change in encryption key and/or algorithm and re-encrypt the stream. 21419 t.Run("reencrypt", func(t *testing.T) { 21420 conf := createConfFile(t, []byte(fmt.Sprintf(` 21421 server_name: S22 21422 listen: 127.0.0.1:-1 21423 jetstream: { 21424 key: %q, 21425 cipher: %s, 21426 prev_key: %q, 21427 store_dir: %q 21428 } 21429 `, "secondencryptionkey", algo.to, "firstencryptionkey", storeDir))) 21430 21431 s, _ := RunServerWithConfig(conf) 21432 defer s.Shutdown() 21433 21434 nc, js := jsClientConnect(t, s) 21435 defer nc.Close() 21436 21437 checkStream(js) 21438 }) 21439 21440 // Finally, we'll restart the server using only the new key and algorithm. 21441 // At this point everything should have been re-encrypted, so we should still 21442 // be able to access the stream. 21443 t.Run("restart", func(t *testing.T) { 21444 conf := createConfFile(t, []byte(fmt.Sprintf(` 21445 server_name: S22 21446 listen: 127.0.0.1:-1 21447 jetstream: { 21448 key: %q, 21449 cipher: %s, 21450 store_dir: %q 21451 } 21452 `, "secondencryptionkey", algo.to, storeDir))) 21453 21454 s, _ := RunServerWithConfig(conf) 21455 defer s.Shutdown() 21456 21457 nc, js := jsClientConnect(t, s) 21458 defer nc.Close() 21459 21460 checkStream(js) 21461 }) 21462 }) 21463 } 21464 } 21465 21466 func TestJetStreamLimitsToInterestPolicy(t *testing.T) { 21467 c := createJetStreamClusterExplicit(t, "JSC", 3) 21468 defer c.shutdown() 21469 21470 nc, js := jsClientConnect(t, c.leader()) 21471 defer nc.Close() 21472 21473 // This is the index of the consumer that we'll create as R1 21474 // instead of R3, just to prove that it blocks the stream 21475 // update from happening properly. 21476 singleReplica := 3 21477 21478 streamCfg := nats.StreamConfig{ 21479 Name: "TEST", 21480 Subjects: []string{"foo"}, 21481 Retention: nats.LimitsPolicy, 21482 Storage: nats.MemoryStorage, 21483 Replicas: 3, 21484 } 21485 21486 stream, err := js.AddStream(&streamCfg) 21487 require_NoError(t, err) 21488 21489 for i := 0; i < 10; i++ { 21490 replicas := streamCfg.Replicas 21491 if i == singleReplica { 21492 // Make one of the consumers R1 so that we can check 21493 // that the switch to interest-based retention is also 21494 // turning it into an R3 consumer. 21495 replicas = 1 21496 } 21497 cname := fmt.Sprintf("test_%d", i) 21498 _, err := js.AddConsumer("TEST", &nats.ConsumerConfig{ 21499 Name: cname, 21500 Durable: cname, 21501 AckPolicy: nats.AckAllPolicy, 21502 Replicas: replicas, 21503 }) 21504 require_NoError(t, err) 21505 } 21506 21507 for i := 0; i < 20; i++ { 21508 _, err := js.Publish("foo", []byte{1, 2, 3, 4, 5}) 21509 require_NoError(t, err) 21510 } 21511 21512 // Pull 10 or more messages from the stream. We will never pull 21513 // less than 10, which guarantees that the lowest ack floor of 21514 // all consumers should be 10. 21515 for i := 0; i < 10; i++ { 21516 cname := fmt.Sprintf("test_%d", i) 21517 count := 10 + i // At least 10 messages 21518 21519 sub, err := js.PullSubscribe("foo", cname) 21520 require_NoError(t, err) 21521 21522 msgs, err := sub.Fetch(count) 21523 require_NoError(t, err) 21524 require_Equal(t, len(msgs), count) 21525 require_NoError(t, msgs[len(msgs)-1].AckSync()) 21526 21527 // At this point the ack floor should match the count of 21528 // messages we received. 21529 info, err := js.ConsumerInfo("TEST", cname) 21530 require_NoError(t, err) 21531 require_Equal(t, info.AckFloor.Consumer, uint64(count)) 21532 } 21533 21534 // Try updating to interest-based. This should fail because 21535 // we have a consumer that is R1 on an R3 stream. 21536 streamCfg = stream.Config 21537 streamCfg.Retention = nats.InterestPolicy 21538 _, err = js.UpdateStream(&streamCfg) 21539 require_Error(t, err) 21540 21541 // Now we'll make the R1 consumer an R3. 21542 cname := fmt.Sprintf("test_%d", singleReplica) 21543 cinfo, err := js.ConsumerInfo("TEST", cname) 21544 require_NoError(t, err) 21545 21546 cinfo.Config.Replicas = streamCfg.Replicas 21547 _, _ = js.UpdateConsumer("TEST", &cinfo.Config) 21548 // TODO(nat): The jsConsumerCreateRequest update doesn't always 21549 // respond when there are no errors updating a consumer, so this 21550 // nearly always returns a timeout, despite actually doing what 21551 // it should. We'll make sure the replicas were updated by doing 21552 // another consumer info just to be sure. 21553 // require_NoError(t, err) 21554 c.waitOnAllCurrent() 21555 cinfo, err = js.ConsumerInfo("TEST", cname) 21556 require_NoError(t, err) 21557 require_Equal(t, cinfo.Config.Replicas, streamCfg.Replicas) 21558 require_Equal(t, len(cinfo.Cluster.Replicas), streamCfg.Replicas-1) 21559 21560 // This time it should succeed. 21561 _, err = js.UpdateStream(&streamCfg) 21562 require_NoError(t, err) 21563 21564 // We need to wait for all nodes to have applied the new stream 21565 // configuration. 21566 c.waitOnAllCurrent() 21567 21568 // Now we should only have 10 messages left in the stream, as 21569 // each consumer has acked at least the first 10 messages. 21570 info, err := js.StreamInfo("TEST") 21571 require_NoError(t, err) 21572 require_Equal(t, info.State.FirstSeq, 11) 21573 require_Equal(t, info.State.Msgs, 10) 21574 } 21575 21576 func TestJetStreamLimitsToInterestPolicyWhileAcking(t *testing.T) { 21577 for _, st := range []nats.StorageType{nats.FileStorage, nats.MemoryStorage} { 21578 t.Run(st.String(), func(t *testing.T) { 21579 c := createJetStreamClusterExplicit(t, "JSC", 3) 21580 defer c.shutdown() 21581 21582 nc, js := jsClientConnect(t, c.leader()) 21583 defer nc.Close() 21584 streamCfg := nats.StreamConfig{ 21585 Name: "TEST", 21586 Subjects: []string{"foo"}, 21587 Retention: nats.LimitsPolicy, 21588 Storage: st, 21589 Replicas: 3, 21590 } 21591 21592 stream, err := js.AddStream(&streamCfg) 21593 require_NoError(t, err) 21594 21595 wg := sync.WaitGroup{} 21596 ctx, cancel := context.WithCancel(context.Background()) 21597 payload := []byte(strings.Repeat("A", 128)) 21598 21599 wg.Add(1) 21600 go func() { 21601 defer wg.Done() 21602 for range time.NewTicker(10 * time.Millisecond).C { 21603 select { 21604 case <-ctx.Done(): 21605 return 21606 default: 21607 } 21608 js.Publish("foo", payload) 21609 } 21610 }() 21611 for i := 0; i < 5; i++ { 21612 cname := fmt.Sprintf("test_%d", i) 21613 sub, err := js.PullSubscribe("foo", cname) 21614 require_NoError(t, err) 21615 21616 wg.Add(1) 21617 go func() { 21618 defer wg.Done() 21619 for range time.NewTicker(10 * time.Millisecond).C { 21620 select { 21621 case <-ctx.Done(): 21622 return 21623 default: 21624 } 21625 21626 msgs, err := sub.Fetch(1) 21627 if err != nil && errors.Is(err, nats.ErrTimeout) { 21628 t.Logf("ERROR: %v", err) 21629 } 21630 for _, msg := range msgs { 21631 msg.Ack() 21632 } 21633 } 21634 }() 21635 } 21636 // Leave running for a few secs then do the change on a different connection. 21637 time.Sleep(5 * time.Second) 21638 nc2, js2 := jsClientConnect(t, c.leader()) 21639 defer nc2.Close() 21640 21641 // Try updating to interest-based and changing replicas too. 21642 streamCfg = stream.Config 21643 streamCfg.Retention = nats.InterestPolicy 21644 _, err = js2.UpdateStream(&streamCfg) 21645 require_NoError(t, err) 21646 21647 // We need to wait for all nodes to have applied the new stream 21648 // configuration. 21649 c.waitOnAllCurrent() 21650 21651 var retention nats.RetentionPolicy 21652 checkFor(t, 15*time.Second, 500*time.Millisecond, func() error { 21653 info, err := js2.StreamInfo("TEST", nats.MaxWait(500*time.Millisecond)) 21654 if err != nil { 21655 return err 21656 } 21657 retention = info.Config.Retention 21658 return nil 21659 }) 21660 require_Equal(t, retention, nats.InterestPolicy) 21661 21662 // Cancel and wait for goroutines underneath. 21663 cancel() 21664 wg.Wait() 21665 }) 21666 } 21667 } 21668 21669 func TestJetStreamUsageSyncDeadlock(t *testing.T) { 21670 s := RunBasicJetStreamServer(t) 21671 defer s.Shutdown() 21672 21673 nc, js := jsClientConnect(t, s) 21674 defer nc.Close() 21675 21676 _, err := js.AddStream(&nats.StreamConfig{ 21677 Name: "TEST", 21678 Subjects: []string{"*"}, 21679 }) 21680 require_NoError(t, err) 21681 21682 sendStreamMsg(t, nc, "foo", "hello") 21683 21684 // Now purposely mess up the usage that will force a sync. 21685 // Without the fix this will deadlock. 21686 jsa := s.getJetStream().lookupAccount(s.GlobalAccount()) 21687 jsa.usageMu.Lock() 21688 st, ok := jsa.usage[_EMPTY_] 21689 require_True(t, ok) 21690 st.local.store = -1000 21691 jsa.usageMu.Unlock() 21692 21693 sendStreamMsg(t, nc, "foo", "hello") 21694 } 21695 21696 // https://github.com/nats-io/nats.go/issues/1382 21697 // https://github.com/nats-io/nats-server/issues/4445 21698 func TestJetStreamChangeMaxMessagesPerSubject(t *testing.T) { 21699 s := RunBasicJetStreamServer(t) 21700 defer s.Shutdown() 21701 21702 nc, js := jsClientConnect(t, s) 21703 defer nc.Close() 21704 21705 _, err := js.AddStream(&nats.StreamConfig{ 21706 Name: "TEST", 21707 Subjects: []string{"one.>"}, 21708 MaxMsgsPerSubject: 5, 21709 }) 21710 require_NoError(t, err) 21711 21712 for i := 0; i < 10; i++ { 21713 sendStreamMsg(t, nc, "one.data", "data") 21714 } 21715 21716 expectMsgs := func(num int32) error { 21717 t.Helper() 21718 21719 var msgs atomic.Int32 21720 sub, err := js.Subscribe("one.>", func(msg *nats.Msg) { 21721 msgs.Add(1) 21722 msg.Ack() 21723 }) 21724 require_NoError(t, err) 21725 defer sub.Unsubscribe() 21726 21727 checkFor(t, 5*time.Second, 100*time.Millisecond, func() error { 21728 if nm := msgs.Load(); nm != num { 21729 return fmt.Errorf("expected to get %v messages, got %v instead", num, nm) 21730 } 21731 return nil 21732 }) 21733 return nil 21734 } 21735 21736 require_NoError(t, expectMsgs(5)) 21737 21738 js.UpdateStream(&nats.StreamConfig{ 21739 Name: "TEST", 21740 Subjects: []string{"one.>"}, 21741 MaxMsgsPerSubject: 3, 21742 }) 21743 21744 info, err := js.StreamInfo("TEST") 21745 require_NoError(t, err) 21746 require_True(t, info.Config.MaxMsgsPerSubject == 3) 21747 require_True(t, info.State.Msgs == 3) 21748 21749 require_NoError(t, expectMsgs(3)) 21750 21751 for i := 0; i < 10; i++ { 21752 sendStreamMsg(t, nc, "one.data", "data") 21753 } 21754 21755 require_NoError(t, expectMsgs(3)) 21756 } 21757 21758 func TestJetStreamConsumerDefaultsFromStream(t *testing.T) { 21759 s := RunBasicJetStreamServer(t) 21760 defer s.Shutdown() 21761 21762 acc := s.GlobalAccount() 21763 if _, err := acc.addStream(&StreamConfig{ 21764 Name: "test", 21765 Subjects: []string{"test.*"}, 21766 ConsumerLimits: StreamConsumerLimits{ 21767 MaxAckPending: 15, 21768 InactiveThreshold: time.Second, 21769 }, 21770 }); err != nil { 21771 t.Fatalf("Failed to add stream: %v", err) 21772 } 21773 21774 nc := clientConnectToServer(t, s) 21775 defer nc.Close() 21776 21777 js, err := nc.JetStream() 21778 if err != nil { 21779 t.Fatalf("Failed to connect to JetStream: %v", err) 21780 } 21781 21782 t.Run("InheritDefaultsFromStream", func(t *testing.T) { 21783 ci, err := js.AddConsumer("test", &nats.ConsumerConfig{ 21784 Name: "InheritDefaultsFromStream", 21785 AckPolicy: nats.AckExplicitPolicy, 21786 }) 21787 require_NoError(t, err) 21788 21789 switch { 21790 case ci.Config.InactiveThreshold != time.Second: 21791 t.Fatalf("InactiveThreshold should be 1s, got %s", ci.Config.InactiveThreshold) 21792 case ci.Config.MaxAckPending != 15: 21793 t.Fatalf("MaxAckPending should be 15, got %d", ci.Config.MaxAckPending) 21794 } 21795 }) 21796 21797 t.Run("CreateConsumerErrorOnExceedMaxAckPending", func(t *testing.T) { 21798 _, err := js.AddConsumer("test", &nats.ConsumerConfig{ 21799 Name: "CreateConsumerErrorOnExceedMaxAckPending", 21800 MaxAckPending: 30, 21801 }) 21802 switch e := err.(type) { 21803 case *nats.APIError: 21804 if ErrorIdentifier(e.ErrorCode) != JSConsumerMaxPendingAckExcessErrF { 21805 t.Fatalf("invalid error code, got %d, wanted %d", e.ErrorCode, JSConsumerMaxPendingAckExcessErrF) 21806 } 21807 default: 21808 t.Fatalf("should have returned API error, got %T", e) 21809 } 21810 }) 21811 21812 t.Run("CreateConsumerErrorOnExceedInactiveThreshold", func(t *testing.T) { 21813 _, err := js.AddConsumer("test", &nats.ConsumerConfig{ 21814 Name: "CreateConsumerErrorOnExceedInactiveThreshold", 21815 InactiveThreshold: time.Second * 2, 21816 }) 21817 switch e := err.(type) { 21818 case *nats.APIError: 21819 if ErrorIdentifier(e.ErrorCode) != JSConsumerInactiveThresholdExcess { 21820 t.Fatalf("invalid error code, got %d, wanted %d", e.ErrorCode, JSConsumerInactiveThresholdExcess) 21821 } 21822 default: 21823 t.Fatalf("should have returned API error, got %T", e) 21824 } 21825 }) 21826 21827 t.Run("UpdateStreamErrorOnViolateConsumerMaxAckPending", func(t *testing.T) { 21828 _, err := js.AddConsumer("test", &nats.ConsumerConfig{ 21829 Name: "UpdateStreamErrorOnViolateConsumerMaxAckPending", 21830 MaxAckPending: 15, 21831 }) 21832 require_NoError(t, err) 21833 21834 stream, err := acc.lookupStream("test") 21835 require_NoError(t, err) 21836 21837 err = stream.update(&StreamConfig{ 21838 Name: "test", 21839 Subjects: []string{"test.*"}, 21840 ConsumerLimits: StreamConsumerLimits{ 21841 MaxAckPending: 10, 21842 }, 21843 }) 21844 if err == nil { 21845 t.Fatalf("stream update should have errored but didn't") 21846 } 21847 }) 21848 21849 t.Run("UpdateStreamErrorOnViolateConsumerInactiveThreshold", func(t *testing.T) { 21850 _, err := js.AddConsumer("test", &nats.ConsumerConfig{ 21851 Name: "UpdateStreamErrorOnViolateConsumerInactiveThreshold", 21852 InactiveThreshold: time.Second, 21853 }) 21854 require_NoError(t, err) 21855 21856 stream, err := acc.lookupStream("test") 21857 require_NoError(t, err) 21858 21859 err = stream.update(&StreamConfig{ 21860 Name: "test", 21861 Subjects: []string{"test.*"}, 21862 ConsumerLimits: StreamConsumerLimits{ 21863 InactiveThreshold: time.Second / 2, 21864 }, 21865 }) 21866 if err == nil { 21867 t.Fatalf("stream update should have errored but didn't") 21868 } 21869 }) 21870 } 21871 21872 func TestJetStreamSyncInterval(t *testing.T) { 21873 sd := t.TempDir() 21874 tmpl := ` 21875 listen: 127.0.0.1:-1 21876 jetstream: { 21877 store_dir: %q 21878 %s 21879 }` 21880 21881 for _, test := range []struct { 21882 name string 21883 sync string 21884 expected time.Duration 21885 always bool 21886 }{ 21887 {"Default", _EMPTY_, defaultSyncInterval, false}, 21888 {"10s", "sync_interval: 10s", time.Duration(10 * time.Second), false}, 21889 {"Always", "sync_interval: always", defaultSyncInterval, true}, 21890 } { 21891 t.Run(test.name, func(t *testing.T) { 21892 conf := createConfFile(t, []byte(fmt.Sprintf(tmpl, sd, test.sync))) 21893 s, _ := RunServerWithConfig(conf) 21894 defer s.Shutdown() 21895 21896 opts := s.getOpts() 21897 require_True(t, opts.SyncInterval == test.expected) 21898 21899 nc, js := jsClientConnect(t, s) 21900 defer nc.Close() 21901 21902 _, err := js.AddStream(&nats.StreamConfig{ 21903 Name: "TEST", 21904 Subjects: []string{"test.>"}, 21905 }) 21906 require_NoError(t, err) 21907 21908 mset, err := s.GlobalAccount().lookupStream("TEST") 21909 require_NoError(t, err) 21910 fs := mset.store.(*fileStore) 21911 fs.mu.RLock() 21912 fsSync := fs.fcfg.SyncInterval 21913 syncAlways := fs.fcfg.SyncAlways 21914 fs.mu.RUnlock() 21915 require_True(t, fsSync == test.expected) 21916 require_True(t, syncAlways == test.always) 21917 }) 21918 } 21919 } 21920 21921 func TestJetStreamFilteredSubjectUsesNewConsumerCreateSubject(t *testing.T) { 21922 s := RunBasicJetStreamServer(t) 21923 defer s.Shutdown() 21924 21925 nc, _ := jsClientConnect(t, s) 21926 defer nc.Close() 21927 21928 extEndpoint := make(chan *nats.Msg, 1) 21929 normalEndpoint := make(chan *nats.Msg, 1) 21930 21931 _, err := nc.ChanSubscribe(JSApiConsumerCreateEx, extEndpoint) 21932 require_NoError(t, err) 21933 21934 _, err = nc.ChanSubscribe(JSApiConsumerCreate, normalEndpoint) 21935 require_NoError(t, err) 21936 21937 testStreamSource := func(name string, shouldBeExtended bool, ss StreamSource) { 21938 t.Run(name, func(t *testing.T) { 21939 req := StreamConfig{ 21940 Name: name, 21941 Storage: MemoryStorage, 21942 Subjects: []string{fmt.Sprintf("foo.%s", name)}, 21943 Sources: []*StreamSource{&ss}, 21944 } 21945 reqJson, err := json.Marshal(req) 21946 require_NoError(t, err) 21947 21948 _, err = nc.Request(fmt.Sprintf(JSApiStreamCreateT, name), reqJson, time.Second) 21949 require_NoError(t, err) 21950 21951 select { 21952 case <-time.After(time.Second * 5): 21953 t.Fatalf("Timed out waiting for receive consumer create") 21954 case <-extEndpoint: 21955 if !shouldBeExtended { 21956 t.Fatalf("Expected normal consumer create, got extended") 21957 } 21958 case <-normalEndpoint: 21959 if shouldBeExtended { 21960 t.Fatalf("Expected extended consumer create, got normal") 21961 } 21962 } 21963 }) 21964 } 21965 21966 testStreamSource("OneFilterSubject", true, StreamSource{ 21967 Name: "source", 21968 FilterSubject: "bar.>", 21969 }) 21970 21971 testStreamSource("OneTransform", true, StreamSource{ 21972 Name: "source", 21973 SubjectTransforms: []SubjectTransformConfig{ 21974 { 21975 Source: "bar.one.>", 21976 Destination: "bar.two.>", 21977 }, 21978 }, 21979 }) 21980 21981 testStreamSource("TwoTransforms", false, StreamSource{ 21982 Name: "source", 21983 SubjectTransforms: []SubjectTransformConfig{ 21984 { 21985 Source: "bar.one.>", 21986 Destination: "bar.two.>", 21987 }, 21988 { 21989 Source: "baz.one.>", 21990 Destination: "baz.two.>", 21991 }, 21992 }, 21993 }) 21994 } 21995 21996 // Make sure when we downgrade history to a smaller number that the account info 21997 // is properly updated and all keys are still accessible. 21998 // There was a bug calculating next first that was not taking into account the dbit slots. 21999 func TestJetStreamKVReductionInHistory(t *testing.T) { 22000 s := RunBasicJetStreamServer(t) 22001 defer s.Shutdown() 22002 22003 nc, js := jsClientConnect(t, s) 22004 defer nc.Close() 22005 22006 startHistory := 4 22007 kv, err := js.CreateKeyValue(&nats.KeyValueConfig{Bucket: "TEST", History: uint8(startHistory)}) 22008 require_NoError(t, err) 22009 22010 numKeys, msg := 1000, bytes.Repeat([]byte("ABC"), 330) // ~1000bytes 22011 for { 22012 key := fmt.Sprintf("%X", rand.Intn(numKeys)+1) 22013 _, err = kv.Put(key, msg) 22014 require_NoError(t, err) 22015 status, err := kv.Status() 22016 require_NoError(t, err) 22017 if status.Values() >= uint64(startHistory*numKeys) { 22018 break 22019 } 22020 } 22021 info, err := js.AccountInfo() 22022 require_NoError(t, err) 22023 22024 checkAllKeys := func() { 22025 t.Helper() 22026 // Make sure we can retrieve all of the keys. 22027 keys, err := kv.Keys() 22028 require_NoError(t, err) 22029 require_Equal(t, len(keys), numKeys) 22030 for _, key := range keys { 22031 _, err := kv.Get(key) 22032 require_NoError(t, err) 22033 } 22034 } 22035 22036 // Quick sanity check. 22037 checkAllKeys() 22038 22039 si, err := js.StreamInfo("KV_TEST") 22040 require_NoError(t, err) 22041 // Adjust down to history of 1. 22042 cfg := si.Config 22043 cfg.MaxMsgsPerSubject = 1 22044 _, err = js.UpdateStream(&cfg) 22045 require_NoError(t, err) 22046 // Make sure the accounting was updated. 22047 ninfo, err := js.AccountInfo() 22048 require_NoError(t, err) 22049 require_True(t, info.Store > ninfo.Store) 22050 22051 // Make sure all keys still accessible. 22052 checkAllKeys() 22053 } 22054 22055 // Server issue 4685 22056 func TestJetStreamConsumerPendingForKV(t *testing.T) { 22057 for _, st := range []nats.StorageType{nats.FileStorage, nats.MemoryStorage} { 22058 t.Run(st.String(), func(t *testing.T) { 22059 s := RunBasicJetStreamServer(t) 22060 defer s.Shutdown() 22061 22062 nc, js := jsClientConnect(t, s) 22063 defer nc.Close() 22064 22065 _, err := js.AddStream(&nats.StreamConfig{ 22066 Name: "TEST", 22067 Subjects: []string{"test.>"}, 22068 Storage: st, 22069 MaxMsgsPerSubject: 10, 22070 Discard: nats.DiscardNew, 22071 }) 22072 require_NoError(t, err) 22073 22074 _, err = js.Publish("test.1", []byte("x")) 22075 require_NoError(t, err) 22076 22077 var msg *nats.Msg 22078 22079 // this is the detail that triggers the off by one, remove this code and all tests pass 22080 msg = nats.NewMsg("test.1") 22081 msg.Data = []byte("y") 22082 msg.Header.Add(nats.ExpectedLastSeqHdr, "1") 22083 _, err = js.PublishMsg(msg) 22084 require_NoError(t, err) 22085 22086 _, err = js.Publish("test.2", []byte("x")) 22087 require_NoError(t, err) 22088 _, err = js.Publish("test.3", []byte("x")) 22089 require_NoError(t, err) 22090 _, err = js.Publish("test.4", []byte("x")) 22091 require_NoError(t, err) 22092 _, err = js.Publish("test.5", []byte("x")) 22093 require_NoError(t, err) 22094 22095 nfo, err := js.StreamInfo("TEST", &nats.StreamInfoRequest{SubjectsFilter: ">"}) 22096 require_NoError(t, err) 22097 22098 require_Equal(t, len(nfo.State.Subjects), 5) 22099 22100 sub, err := js.SubscribeSync("test.>", nats.DeliverLastPerSubject()) 22101 require_NoError(t, err) 22102 22103 msg, err = sub.NextMsg(time.Second) 22104 require_NoError(t, err) 22105 meta, err := msg.Metadata() 22106 require_NoError(t, err) 22107 require_Equal(t, meta.NumPending, 4) 22108 }) 22109 } 22110 } 22111 22112 func TestJetStreamDirectGetBatch(t *testing.T) { 22113 s := RunBasicJetStreamServer(t) 22114 defer s.Shutdown() 22115 22116 nc, js := jsClientConnect(t, s) 22117 defer nc.Close() 22118 22119 _, err := js.AddStream(&nats.StreamConfig{ 22120 Name: "TEST", 22121 Subjects: []string{"foo.*"}, 22122 }) 22123 require_NoError(t, err) 22124 22125 // Add in messages 22126 for i := 0; i < 333; i++ { 22127 js.PublishAsync("foo.foo", []byte("HELLO")) 22128 js.PublishAsync("foo.bar", []byte("WORLD")) 22129 js.PublishAsync("foo.baz", []byte("AGAIN")) 22130 } 22131 select { 22132 case <-js.PublishAsyncComplete(): 22133 case <-time.After(5 * time.Second): 22134 t.Fatalf("Did not receive completion signal") 22135 } 22136 22137 // DirectGet is required for batch. Make sure we error correctly if not enabled. 22138 mreq := &JSApiMsgGetRequest{Seq: 1, Batch: 10} 22139 req, _ := json.Marshal(mreq) 22140 rr, err := nc.Request("$JS.API.STREAM.MSG.GET.TEST", req, time.Second) 22141 require_NoError(t, err) 22142 var resp JSApiMsgGetResponse 22143 json.Unmarshal(rr.Data, &resp) 22144 require_True(t, resp.Error != nil) 22145 require_Equal(t, resp.Error.Code, NewJSBadRequestError().Code) 22146 22147 // Update stream to support direct. 22148 _, err = js.UpdateStream(&nats.StreamConfig{ 22149 Name: "TEST", 22150 Subjects: []string{"foo.*"}, 22151 AllowDirect: true, 22152 }) 22153 require_NoError(t, err) 22154 22155 // Direct subjects. 22156 sendRequest := func(mreq *JSApiMsgGetRequest) *nats.Subscription { 22157 t.Helper() 22158 req, _ := json.Marshal(mreq) 22159 // We will get multiple responses so can't do normal request. 22160 reply := nats.NewInbox() 22161 sub, err := nc.SubscribeSync(reply) 22162 require_NoError(t, err) 22163 err = nc.PublishRequest("$JS.API.DIRECT.GET.TEST", reply, req) 22164 require_NoError(t, err) 22165 return sub 22166 } 22167 22168 // Batch sizes greater than 1 will have a nil message as the end marker. 22169 checkResponses := func(sub *nats.Subscription, numPendingStart int, expected ...string) { 22170 t.Helper() 22171 defer sub.Unsubscribe() 22172 checkSubsPending(t, sub, len(expected)) 22173 np := numPendingStart 22174 for i := 0; i < len(expected); i++ { 22175 msg, err := sub.NextMsg(10 * time.Millisecond) 22176 require_NoError(t, err) 22177 // If expected is _EMPTY_ that signals we expect a EOB marker. 22178 if subj := expected[i]; subj != _EMPTY_ { 22179 // Make sure subject is correct. 22180 require_Equal(t, expected[i], msg.Header.Get(JSSubject)) 22181 // Should have Data field non-zero 22182 require_True(t, len(msg.Data) > 0) 22183 // Check we have NumPending and its correct. 22184 require_Equal(t, strconv.Itoa(np), msg.Header.Get(JSNumPending)) 22185 np-- 22186 22187 } else { 22188 // Check for properly formatted EOB marker. 22189 // Should have no body. 22190 require_Equal(t, len(msg.Data), 0) 22191 // We mark status as 204 - No Content 22192 require_Equal(t, msg.Header.Get("Status"), "204") 22193 // Check description is EOB 22194 require_Equal(t, msg.Header.Get("Description"), "EOB") 22195 // Check we have NumPending and its correct. 22196 require_Equal(t, strconv.Itoa(np), msg.Header.Get(JSNumPending)) 22197 } 22198 } 22199 } 22200 22201 // Run some simple tests. 22202 sub := sendRequest(&JSApiMsgGetRequest{Seq: 1, Batch: 2}) 22203 checkResponses(sub, 999, "foo.foo", "foo.bar", _EMPTY_) 22204 22205 sub = sendRequest(&JSApiMsgGetRequest{Seq: 1, Batch: 3}) 22206 checkResponses(sub, 999, "foo.foo", "foo.bar", "foo.baz", _EMPTY_) 22207 22208 // Test NextFor works 22209 sub = sendRequest(&JSApiMsgGetRequest{Seq: 1, Batch: 3, NextFor: "foo.*"}) 22210 checkResponses(sub, 999, "foo.foo", "foo.bar", "foo.baz", _EMPTY_) 22211 22212 sub = sendRequest(&JSApiMsgGetRequest{Seq: 1, Batch: 3, NextFor: "foo.baz"}) 22213 checkResponses(sub, 333, "foo.baz", "foo.baz", "foo.baz", _EMPTY_) 22214 22215 // Test stopping early by starting at 997 with only 3 messages. 22216 sub = sendRequest(&JSApiMsgGetRequest{Seq: 997, Batch: 10, NextFor: "foo.*"}) 22217 checkResponses(sub, 3, "foo.foo", "foo.bar", "foo.baz", _EMPTY_) 22218 } 22219 22220 func TestJetStreamDirectGetBatchMaxBytes(t *testing.T) { 22221 s := RunBasicJetStreamServer(t) 22222 defer s.Shutdown() 22223 22224 nc, js := jsClientConnect(t, s) 22225 defer nc.Close() 22226 22227 _, err := js.AddStream(&nats.StreamConfig{ 22228 Name: "TEST", 22229 Subjects: []string{"foo.*"}, 22230 AllowDirect: true, 22231 Compression: nats.S2Compression, 22232 }) 22233 require_NoError(t, err) 22234 22235 msg := bytes.Repeat([]byte("Z"), 512*1024) 22236 // Add in messages 22237 for i := 0; i < 333; i++ { 22238 js.PublishAsync("foo.foo", msg) 22239 js.PublishAsync("foo.bar", msg) 22240 js.PublishAsync("foo.baz", msg) 22241 } 22242 select { 22243 case <-js.PublishAsyncComplete(): 22244 case <-time.After(5 * time.Second): 22245 t.Fatalf("Did not receive completion signal") 22246 } 22247 22248 sendRequestAndCheck := func(mreq *JSApiMsgGetRequest, numExpected int) { 22249 t.Helper() 22250 req, _ := json.Marshal(mreq) 22251 // We will get multiple responses so can't do normal request. 22252 reply := nats.NewInbox() 22253 sub, err := nc.SubscribeSync(reply) 22254 require_NoError(t, err) 22255 defer sub.Unsubscribe() 22256 err = nc.PublishRequest("$JS.API.DIRECT.GET.TEST", reply, req) 22257 require_NoError(t, err) 22258 // Make sure we get correct number of responses. 22259 checkSubsPending(t, sub, numExpected) 22260 } 22261 22262 // Total msg size being sent back to us. 22263 msgSize := len(msg) + len("foo.foo") 22264 // We should get 1 msg and 1 EOB 22265 sendRequestAndCheck(&JSApiMsgGetRequest{Seq: 1, Batch: 3, MaxBytes: msgSize}, 2) 22266 22267 // Test NextFor tracks as well. 22268 sendRequestAndCheck(&JSApiMsgGetRequest{Seq: 1, NextFor: "foo.bar", Batch: 3, MaxBytes: 2 * msgSize}, 3) 22269 22270 // Now test no MaxBytes to inherit server max_num_pending. 22271 expected := (int(s.getOpts().MaxPending) / msgSize) + 1 22272 sendRequestAndCheck(&JSApiMsgGetRequest{Seq: 1, Batch: 200}, expected+1) 22273 } 22274 22275 func TestJetStreamConsumerNakThenAckFloorMove(t *testing.T) { 22276 s := RunBasicJetStreamServer(t) 22277 defer s.Shutdown() 22278 22279 nc, js := jsClientConnect(t, s) 22280 defer nc.Close() 22281 22282 _, err := js.AddStream(&nats.StreamConfig{ 22283 Name: "TEST", 22284 Subjects: []string{"foo"}, 22285 }) 22286 require_NoError(t, err) 22287 22288 for i := 0; i < 11; i++ { 22289 js.Publish("foo", []byte("OK")) 22290 } 22291 22292 sub, err := js.PullSubscribe("foo", "dlc", nats.AckWait(100*time.Millisecond)) 22293 require_NoError(t, err) 22294 22295 msgs, err := sub.Fetch(11) 22296 require_NoError(t, err) 22297 22298 // Nak first one 22299 msgs[0].Nak() 22300 22301 // Ack 2-10 22302 for i := 1; i < 10; i++ { 22303 msgs[i].AckSync() 22304 } 22305 // Hold onto last. 22306 lastMsg := msgs[10] 22307 22308 ci, err := sub.ConsumerInfo() 22309 require_NoError(t, err) 22310 22311 require_Equal(t, ci.AckFloor.Consumer, 0) 22312 require_Equal(t, ci.AckFloor.Stream, 0) 22313 require_Equal(t, ci.NumAckPending, 2) 22314 22315 // Grab first messsage again and ack this time. 22316 msgs, err = sub.Fetch(1) 22317 require_NoError(t, err) 22318 msgs[0].AckSync() 22319 22320 ci, err = sub.ConsumerInfo() 22321 require_NoError(t, err) 22322 22323 require_Equal(t, ci.Delivered.Consumer, 12) 22324 require_Equal(t, ci.Delivered.Stream, 11) 22325 require_Equal(t, ci.AckFloor.Consumer, 10) 22326 require_Equal(t, ci.AckFloor.Stream, 10) 22327 require_Equal(t, ci.NumAckPending, 1) 22328 22329 // Make sure when we ack last one we collapse the AckFloor.Consumer 22330 // with the higher delivered due to re-deliveries. 22331 lastMsg.AckSync() 22332 ci, err = sub.ConsumerInfo() 22333 require_NoError(t, err) 22334 22335 require_Equal(t, ci.Delivered.Consumer, 12) 22336 require_Equal(t, ci.Delivered.Stream, 11) 22337 require_Equal(t, ci.AckFloor.Consumer, 12) 22338 require_Equal(t, ci.AckFloor.Stream, 11) 22339 require_Equal(t, ci.NumAckPending, 0) 22340 } 22341 22342 func TestJetStreamSubjectFilteredPurgeClearsPendingAcks(t *testing.T) { 22343 s := RunBasicJetStreamServer(t) 22344 defer s.Shutdown() 22345 22346 nc, js := jsClientConnect(t, s) 22347 defer nc.Close() 22348 22349 _, err := js.AddStream(&nats.StreamConfig{ 22350 Name: "TEST", 22351 Subjects: []string{"foo", "bar"}, 22352 }) 22353 require_NoError(t, err) 22354 22355 for i := 0; i < 5; i++ { 22356 js.Publish("foo", []byte("OK")) 22357 js.Publish("bar", []byte("OK")) 22358 } 22359 22360 // Note that there are no subject filters here, this is deliberate 22361 // as previously the purge with filter code was checking for them. 22362 // We want to prove that unfiltered consumers also get purged. 22363 ci, err := js.AddConsumer("TEST", &nats.ConsumerConfig{ 22364 Name: "my_consumer", 22365 AckPolicy: nats.AckExplicitPolicy, 22366 MaxAckPending: 10, 22367 }) 22368 require_NoError(t, err) 22369 require_Equal(t, ci.NumPending, 10) 22370 require_Equal(t, ci.NumAckPending, 0) 22371 22372 sub, err := js.PullSubscribe(">", "", nats.Bind("TEST", "my_consumer")) 22373 require_NoError(t, err) 22374 22375 msgs, err := sub.Fetch(10) 22376 require_NoError(t, err) 22377 require_Len(t, len(msgs), 10) 22378 22379 ci, err = js.ConsumerInfo("TEST", "my_consumer") 22380 require_NoError(t, err) 22381 require_Equal(t, ci.NumPending, 0) 22382 require_Equal(t, ci.NumAckPending, 10) 22383 22384 require_NoError(t, js.PurgeStream("TEST", &nats.StreamPurgeRequest{ 22385 Subject: "foo", 22386 })) 22387 22388 ci, err = js.ConsumerInfo("TEST", "my_consumer") 22389 require_NoError(t, err) 22390 require_Equal(t, ci.NumPending, 0) 22391 require_Equal(t, ci.NumAckPending, 5) 22392 22393 for i := 0; i < 5; i++ { 22394 js.Publish("foo", []byte("OK")) 22395 } 22396 msgs, err = sub.Fetch(5) 22397 require_NoError(t, err) 22398 require_Len(t, len(msgs), 5) 22399 22400 ci, err = js.ConsumerInfo("TEST", "my_consumer") 22401 require_NoError(t, err) 22402 require_Equal(t, ci.NumPending, 0) 22403 require_Equal(t, ci.NumAckPending, 10) 22404 } 22405 22406 // Helper function for TestJetStreamConsumerPause*, TestJetStreamClusterConsumerPause*, TestJetStreamSuperClusterConsumerPause* 22407 func jsTestPause_CreateOrUpdateConsumer(t *testing.T, nc *nats.Conn, action ConsumerAction, stream string, cc ConsumerConfig) *JSApiConsumerCreateResponse { 22408 t.Helper() 22409 j, err := json.Marshal(CreateConsumerRequest{ 22410 Stream: stream, 22411 Config: cc, 22412 Action: action, 22413 }) 22414 require_NoError(t, err) 22415 subj := fmt.Sprintf("$JS.API.CONSUMER.CREATE.%s.%s", stream, cc.Name) 22416 m, err := nc.Request(subj, j, time.Second*3) 22417 require_NoError(t, err) 22418 var res JSApiConsumerCreateResponse 22419 require_NoError(t, json.Unmarshal(m.Data, &res)) 22420 require_True(t, res.Config != nil) 22421 return &res 22422 } 22423 22424 // Helper function for TestJetStreamConsumerPause*, TestJetStreamClusterConsumerPause*, TestJetStreamSuperClusterConsumerPause* 22425 func jsTestPause_PauseConsumer(t *testing.T, nc *nats.Conn, stream, consumer string, deadline time.Time) time.Time { 22426 t.Helper() 22427 j, err := json.Marshal(JSApiConsumerPauseRequest{ 22428 PauseUntil: deadline, 22429 }) 22430 require_NoError(t, err) 22431 subj := fmt.Sprintf("$JS.API.CONSUMER.PAUSE.%s.%s", stream, consumer) 22432 msg, err := nc.Request(subj, j, time.Second) 22433 require_NoError(t, err) 22434 var res JSApiConsumerPauseResponse 22435 require_NoError(t, json.Unmarshal(msg.Data, &res)) 22436 return res.PauseUntil 22437 } 22438 22439 func TestJetStreamConsumerPauseViaConfig(t *testing.T) { 22440 s := RunBasicJetStreamServer(t) 22441 defer s.Shutdown() 22442 22443 nc, js := jsClientConnect(t, s) 22444 defer nc.Close() 22445 22446 _, err := js.AddStream(&nats.StreamConfig{ 22447 Name: "TEST", 22448 Subjects: []string{"foo"}, 22449 }) 22450 require_NoError(t, err) 22451 22452 t.Run("CreateShouldSucceed", func(t *testing.T) { 22453 deadline := time.Now().Add(time.Hour) 22454 ci := jsTestPause_CreateOrUpdateConsumer(t, nc, ActionCreate, "TEST", ConsumerConfig{ 22455 Name: "my_consumer_1", 22456 PauseUntil: &deadline, 22457 }) 22458 require_True(t, ci != nil) 22459 require_True(t, ci.Config != nil) 22460 require_True(t, ci.Config.PauseUntil != nil) 22461 require_True(t, ci.Config.PauseUntil.Equal(deadline)) 22462 }) 22463 22464 t.Run("UpdateShouldFail", func(t *testing.T) { 22465 deadline := time.Now().Add(time.Hour) 22466 ci := jsTestPause_CreateOrUpdateConsumer(t, nc, ActionCreate, "TEST", ConsumerConfig{ 22467 Name: "my_consumer_2", 22468 }) 22469 require_True(t, ci != nil) 22470 require_True(t, ci.Config != nil) 22471 require_True(t, ci.Config.PauseUntil == nil || ci.Config.PauseUntil.IsZero()) 22472 22473 var cc ConsumerConfig 22474 j, err := json.Marshal(ci.Config) 22475 require_NoError(t, err) 22476 require_NoError(t, json.Unmarshal(j, &cc)) 22477 22478 pauseUntil := time.Now().Add(time.Hour) 22479 cc.PauseUntil = &pauseUntil 22480 ci2 := jsTestPause_CreateOrUpdateConsumer(t, nc, ActionUpdate, "TEST", cc) 22481 require_False(t, ci2.Config.PauseUntil != nil && ci2.Config.PauseUntil.Equal(deadline)) 22482 require_True(t, ci2.Config.PauseUntil == nil || ci2.Config.PauseUntil.Equal(time.Time{})) 22483 }) 22484 } 22485 22486 func TestJetStreamConsumerPauseViaEndpoint(t *testing.T) { 22487 s := RunBasicJetStreamServer(t) 22488 defer s.Shutdown() 22489 22490 nc, js := jsClientConnect(t, s) 22491 defer nc.Close() 22492 22493 _, err := js.AddStream(&nats.StreamConfig{ 22494 Name: "TEST", 22495 Subjects: []string{"push", "pull"}, 22496 }) 22497 require_NoError(t, err) 22498 22499 t.Run("PullConsumer", func(t *testing.T) { 22500 _, err := js.AddConsumer("TEST", &nats.ConsumerConfig{ 22501 Name: "pull_consumer", 22502 }) 22503 require_NoError(t, err) 22504 22505 sub, err := js.PullSubscribe("pull", "", nats.Bind("TEST", "pull_consumer")) 22506 require_NoError(t, err) 22507 22508 // This should succeed as there's no pause, so it definitely 22509 // shouldn't take more than a second. 22510 for i := 0; i < 10; i++ { 22511 _, err = js.Publish("pull", []byte("OK")) 22512 require_NoError(t, err) 22513 } 22514 msgs, err := sub.Fetch(10, nats.MaxWait(time.Second)) 22515 require_NoError(t, err) 22516 require_Equal(t, len(msgs), 10) 22517 22518 // Now we'll pause the consumer for 3 seconds. 22519 deadline := time.Now().Add(time.Second * 3) 22520 require_True(t, jsTestPause_PauseConsumer(t, nc, "TEST", "pull_consumer", deadline).Equal(deadline)) 22521 22522 // This should fail as we'll wait for only half of the deadline. 22523 for i := 0; i < 10; i++ { 22524 _, err = js.Publish("pull", []byte("OK")) 22525 require_NoError(t, err) 22526 } 22527 _, err = sub.Fetch(10, nats.MaxWait(time.Until(deadline)/2)) 22528 require_Error(t, err, nats.ErrTimeout) 22529 22530 // This should succeed after a short wait, and when we're done, 22531 // we should be after the deadline. 22532 msgs, err = sub.Fetch(10) 22533 require_NoError(t, err) 22534 require_Equal(t, len(msgs), 10) 22535 require_True(t, time.Now().After(deadline)) 22536 22537 // This should succeed as there's no pause, so it definitely 22538 // shouldn't take more than a second. 22539 for i := 0; i < 10; i++ { 22540 _, err = js.Publish("pull", []byte("OK")) 22541 require_NoError(t, err) 22542 } 22543 msgs, err = sub.Fetch(10, nats.MaxWait(time.Second)) 22544 require_NoError(t, err) 22545 require_Equal(t, len(msgs), 10) 22546 22547 require_True(t, jsTestPause_PauseConsumer(t, nc, "TEST", "pull_consumer", time.Time{}).Equal(time.Time{})) 22548 22549 // This should succeed as there's no pause, so it definitely 22550 // shouldn't take more than a second. 22551 for i := 0; i < 10; i++ { 22552 _, err = js.Publish("pull", []byte("OK")) 22553 require_NoError(t, err) 22554 } 22555 msgs, err = sub.Fetch(10, nats.MaxWait(time.Second)) 22556 require_NoError(t, err) 22557 require_Equal(t, len(msgs), 10) 22558 }) 22559 22560 t.Run("PushConsumer", func(t *testing.T) { 22561 ch := make(chan *nats.Msg, 100) 22562 _, err = js.ChanSubscribe("push", ch, nats.BindStream("TEST"), nats.ConsumerName("push_consumer")) 22563 require_NoError(t, err) 22564 22565 // This should succeed as there's no pause, so it definitely 22566 // shouldn't take more than a second. 22567 for i := 0; i < 10; i++ { 22568 _, err = js.Publish("push", []byte("OK")) 22569 require_NoError(t, err) 22570 } 22571 for i := 0; i < 10; i++ { 22572 msg := require_ChanRead(t, ch, time.Second) 22573 require_NotEqual(t, msg, nil) 22574 } 22575 22576 // Now we'll pause the consumer for 3 seconds. 22577 deadline := time.Now().Add(time.Second * 3) 22578 require_True(t, jsTestPause_PauseConsumer(t, nc, "TEST", "push_consumer", deadline).Equal(deadline)) 22579 22580 // This should succeed after a short wait, and when we're done, 22581 // we should be after the deadline. 22582 for i := 0; i < 10; i++ { 22583 _, err = js.Publish("push", []byte("OK")) 22584 require_NoError(t, err) 22585 } 22586 for i := 0; i < 10; i++ { 22587 msg := require_ChanRead(t, ch, time.Second*5) 22588 require_NotEqual(t, msg, nil) 22589 require_True(t, time.Now().After(deadline)) 22590 } 22591 22592 // This should succeed as there's no pause, so it definitely 22593 // shouldn't take more than a second. 22594 for i := 0; i < 10; i++ { 22595 _, err = js.Publish("push", []byte("OK")) 22596 require_NoError(t, err) 22597 } 22598 for i := 0; i < 10; i++ { 22599 msg := require_ChanRead(t, ch, time.Second) 22600 require_NotEqual(t, msg, nil) 22601 } 22602 22603 require_True(t, jsTestPause_PauseConsumer(t, nc, "TEST", "push_consumer", time.Time{}).Equal(time.Time{})) 22604 22605 // This should succeed as there's no pause, so it definitely 22606 // shouldn't take more than a second. 22607 for i := 0; i < 10; i++ { 22608 _, err = js.Publish("push", []byte("OK")) 22609 require_NoError(t, err) 22610 } 22611 for i := 0; i < 10; i++ { 22612 msg := require_ChanRead(t, ch, time.Second) 22613 require_NotEqual(t, msg, nil) 22614 } 22615 }) 22616 } 22617 22618 func TestJetStreamConsumerPauseResumeViaEndpoint(t *testing.T) { 22619 s := RunBasicJetStreamServer(t) 22620 defer s.Shutdown() 22621 22622 nc, js := jsClientConnect(t, s) 22623 defer nc.Close() 22624 22625 _, err := js.AddStream(&nats.StreamConfig{ 22626 Name: "TEST", 22627 Subjects: []string{"TEST"}, 22628 }) 22629 require_NoError(t, err) 22630 22631 _, err = js.AddConsumer("TEST", &nats.ConsumerConfig{ 22632 Name: "CONSUMER", 22633 }) 22634 require_NoError(t, err) 22635 22636 getConsumerInfo := func() ConsumerInfo { 22637 var ci ConsumerInfo 22638 infoResp, err := nc.Request("$JS.API.CONSUMER.INFO.TEST.CONSUMER", nil, time.Second) 22639 require_NoError(t, err) 22640 err = json.Unmarshal(infoResp.Data, &ci) 22641 require_NoError(t, err) 22642 return ci 22643 } 22644 22645 // Ensure we are not paused 22646 require_False(t, getConsumerInfo().Paused) 22647 22648 // Now we'll pause the consumer for 30 seconds. 22649 deadline := time.Now().Add(time.Second * 30) 22650 require_True(t, jsTestPause_PauseConsumer(t, nc, "TEST", "CONSUMER", deadline).Equal(deadline)) 22651 22652 // Ensure the consumer reflects being paused 22653 require_True(t, getConsumerInfo().Paused) 22654 22655 subj := fmt.Sprintf("$JS.API.CONSUMER.PAUSE.%s.%s", "TEST", "CONSUMER") 22656 _, err = nc.Request(subj, nil, time.Second) 22657 require_NoError(t, err) 22658 22659 // Ensure the consumer reflects being resumed 22660 require_False(t, getConsumerInfo().Paused) 22661 } 22662 22663 func TestJetStreamConsumerPauseHeartbeats(t *testing.T) { 22664 s := RunBasicJetStreamServer(t) 22665 defer s.Shutdown() 22666 22667 nc, js := jsClientConnect(t, s) 22668 defer nc.Close() 22669 22670 _, err := js.AddStream(&nats.StreamConfig{ 22671 Name: "TEST", 22672 Subjects: []string{"foo"}, 22673 }) 22674 require_NoError(t, err) 22675 22676 deadline := time.Now().Add(time.Hour) 22677 dsubj := "deliver_subj" 22678 22679 ci := jsTestPause_CreateOrUpdateConsumer(t, nc, ActionCreate, "TEST", ConsumerConfig{ 22680 Name: "my_consumer", 22681 PauseUntil: &deadline, 22682 Heartbeat: time.Millisecond * 100, 22683 DeliverSubject: dsubj, 22684 }) 22685 require_True(t, ci.Config.PauseUntil.Equal(deadline)) 22686 22687 ch := make(chan *nats.Msg, 10) 22688 _, err = nc.ChanSubscribe(dsubj, ch) 22689 require_NoError(t, err) 22690 22691 for i := 0; i < 20; i++ { 22692 msg := require_ChanRead(t, ch, time.Millisecond*200) 22693 require_Equal(t, msg.Header.Get("Status"), "100") 22694 require_Equal(t, msg.Header.Get("Description"), "Idle Heartbeat") 22695 } 22696 } 22697 22698 func TestJetStreamConsumerPauseAdvisories(t *testing.T) { 22699 s := RunBasicJetStreamServer(t) 22700 defer s.Shutdown() 22701 22702 nc, js := jsClientConnect(t, s) 22703 defer nc.Close() 22704 22705 checkAdvisory := func(msg *nats.Msg, shouldBePaused bool, deadline time.Time) { 22706 t.Helper() 22707 var advisory JSConsumerPauseAdvisory 22708 require_NoError(t, json.Unmarshal(msg.Data, &advisory)) 22709 require_Equal(t, advisory.Stream, "TEST") 22710 require_Equal(t, advisory.Consumer, "my_consumer") 22711 require_Equal(t, advisory.Paused, shouldBePaused) 22712 require_True(t, advisory.PauseUntil.Equal(deadline)) 22713 } 22714 22715 _, err := js.AddStream(&nats.StreamConfig{ 22716 Name: "TEST", 22717 Subjects: []string{"foo"}, 22718 }) 22719 require_NoError(t, err) 22720 22721 ch := make(chan *nats.Msg, 10) 22722 _, err = nc.ChanSubscribe(JSAdvisoryConsumerPausePre+".TEST.my_consumer", ch) 22723 require_NoError(t, err) 22724 22725 deadline := time.Now().Add(time.Second) 22726 jsTestPause_CreateOrUpdateConsumer(t, nc, ActionCreate, "TEST", ConsumerConfig{ 22727 Name: "my_consumer", 22728 PauseUntil: &deadline, 22729 }) 22730 22731 // First advisory should tell us that the consumer was paused 22732 // on creation. 22733 msg := require_ChanRead(t, ch, time.Second*2) 22734 checkAdvisory(msg, true, deadline) 22735 22736 // The second one for the unpause. 22737 msg = require_ChanRead(t, ch, time.Second*2) 22738 checkAdvisory(msg, false, deadline) 22739 22740 // Now we'll pause the consumer using the API. 22741 deadline = time.Now().Add(time.Second) 22742 require_True(t, jsTestPause_PauseConsumer(t, nc, "TEST", "my_consumer", deadline).Equal(deadline)) 22743 22744 // Third advisory should tell us about the pause via the API. 22745 msg = require_ChanRead(t, ch, time.Second*2) 22746 checkAdvisory(msg, true, deadline) 22747 22748 // Finally that should unpause. 22749 msg = require_ChanRead(t, ch, time.Second*2) 22750 checkAdvisory(msg, false, deadline) 22751 } 22752 22753 func TestJetStreamConsumerSurvivesRestart(t *testing.T) { 22754 s := RunBasicJetStreamServer(t) 22755 defer s.Shutdown() 22756 22757 nc, js := jsClientConnect(t, s) 22758 defer nc.Close() 22759 22760 _, err := js.AddStream(&nats.StreamConfig{ 22761 Name: "TEST", 22762 Subjects: []string{"foo"}, 22763 }) 22764 require_NoError(t, err) 22765 22766 deadline := time.Now().Add(time.Hour) 22767 jsTestPause_CreateOrUpdateConsumer(t, nc, ActionCreate, "TEST", ConsumerConfig{ 22768 Name: "my_consumer", 22769 PauseUntil: &deadline, 22770 }) 22771 22772 sd := s.JetStreamConfig().StoreDir 22773 s.Shutdown() 22774 s = RunJetStreamServerOnPort(-1, sd) 22775 defer s.Shutdown() 22776 22777 stream, err := s.gacc.lookupStream("TEST") 22778 require_NoError(t, err) 22779 22780 consumer := stream.lookupConsumer("my_consumer") 22781 require_NotEqual(t, consumer, nil) 22782 22783 consumer.mu.RLock() 22784 timer := consumer.uptmr 22785 consumer.mu.RUnlock() 22786 require_True(t, timer != nil) 22787 } 22788 22789 func TestJetStreamDirectGetMulti(t *testing.T) { 22790 cases := []struct { 22791 name string 22792 cfg *nats.StreamConfig 22793 }{ 22794 {name: "MemoryStore", 22795 cfg: &nats.StreamConfig{ 22796 Name: "TEST", 22797 Subjects: []string{"foo.*"}, 22798 AllowDirect: true, 22799 Storage: nats.MemoryStorage, 22800 }}, 22801 {name: "FileStore", 22802 cfg: &nats.StreamConfig{ 22803 Name: "TEST", 22804 Subjects: []string{"foo.*"}, 22805 AllowDirect: true, 22806 }}, 22807 } 22808 for _, c := range cases { 22809 t.Run(c.name, func(t *testing.T) { 22810 22811 s := RunBasicJetStreamServer(t) 22812 defer s.Shutdown() 22813 22814 nc, js := jsClientConnect(t, s) 22815 defer nc.Close() 22816 22817 _, err := js.AddStream(c.cfg) 22818 require_NoError(t, err) 22819 22820 // Add in messages 22821 for i := 0; i < 33; i++ { 22822 js.PublishAsync("foo.foo", []byte(fmt.Sprintf("HELLO-%d", i))) 22823 js.PublishAsync("foo.bar", []byte(fmt.Sprintf("WORLD-%d", i))) 22824 js.PublishAsync("foo.baz", []byte(fmt.Sprintf("AGAIN-%d", i))) 22825 } 22826 select { 22827 case <-js.PublishAsyncComplete(): 22828 case <-time.After(5 * time.Second): 22829 t.Fatalf("Did not receive completion signal") 22830 } 22831 22832 // Direct subjects. 22833 sendRequest := func(mreq *JSApiMsgGetRequest) *nats.Subscription { 22834 t.Helper() 22835 req, _ := json.Marshal(mreq) 22836 // We will get multiple responses so can't do normal request. 22837 reply := nats.NewInbox() 22838 sub, err := nc.SubscribeSync(reply) 22839 require_NoError(t, err) 22840 err = nc.PublishRequest("$JS.API.DIRECT.GET.TEST", reply, req) 22841 require_NoError(t, err) 22842 return sub 22843 } 22844 22845 // Subject / Sequence pair 22846 type p struct { 22847 subj string 22848 seq int 22849 } 22850 var eob p 22851 22852 // Multi-Get will have a nil message as the end marker regardless. 22853 checkResponses := func(sub *nats.Subscription, numPendingStart int, expected ...p) { 22854 t.Helper() 22855 defer sub.Unsubscribe() 22856 checkSubsPending(t, sub, len(expected)) 22857 np := numPendingStart 22858 for i := 0; i < len(expected); i++ { 22859 msg, err := sub.NextMsg(10 * time.Millisecond) 22860 require_NoError(t, err) 22861 // If expected is _EMPTY_ that signals we expect a EOB marker. 22862 if subj := expected[i].subj; subj != _EMPTY_ { 22863 // Make sure subject is correct. 22864 require_Equal(t, subj, msg.Header.Get(JSSubject)) 22865 // Make sure sequence is correct. 22866 require_Equal(t, strconv.Itoa(expected[i].seq), msg.Header.Get(JSSequence)) 22867 // Should have Data field non-zero 22868 require_True(t, len(msg.Data) > 0) 22869 // Check we have NumPending and its correct. 22870 require_Equal(t, strconv.Itoa(np), msg.Header.Get(JSNumPending)) 22871 if np > 0 { 22872 np-- 22873 } 22874 } else { 22875 // Check for properly formatted EOB marker. 22876 // Should have no body. 22877 require_Equal(t, len(msg.Data), 0) 22878 // We mark status as 204 - No Content 22879 require_Equal(t, msg.Header.Get("Status"), "204") 22880 // Check description is EOB 22881 require_Equal(t, msg.Header.Get("Description"), "EOB") 22882 // Check we have NumPending and its correct. 22883 require_Equal(t, strconv.Itoa(np), msg.Header.Get(JSNumPending)) 22884 } 22885 } 22886 } 22887 22888 sub := sendRequest(&JSApiMsgGetRequest{MultiLastFor: []string{"foo.*"}}) 22889 checkResponses(sub, 2, p{"foo.foo", 97}, p{"foo.bar", 98}, p{"foo.baz", 99}, eob) 22890 // Check with UpToSeq 22891 sub = sendRequest(&JSApiMsgGetRequest{MultiLastFor: []string{"foo.*"}, UpToSeq: 3}) 22892 checkResponses(sub, 2, p{"foo.foo", 1}, p{"foo.bar", 2}, p{"foo.baz", 3}, eob) 22893 22894 // Test No Results. 22895 sub = sendRequest(&JSApiMsgGetRequest{MultiLastFor: []string{"bar.*"}}) 22896 checkSubsPending(t, sub, 1) 22897 msg, err := sub.NextMsg(10 * time.Millisecond) 22898 require_NoError(t, err) 22899 // Check for properly formatted No Results. 22900 // Should have no body. 22901 require_Equal(t, len(msg.Data), 0) 22902 // We mark status as 204 - No Content 22903 require_Equal(t, msg.Header.Get("Status"), "404") 22904 // Check description is No Results 22905 require_Equal(t, msg.Header.Get("Description"), "No Results") 22906 }) 22907 } 22908 } 22909 22910 func TestJetStreamDirectGetMultiUpToTime(t *testing.T) { 22911 s := RunBasicJetStreamServer(t) 22912 defer s.Shutdown() 22913 22914 nc, js := jsClientConnect(t, s) 22915 defer nc.Close() 22916 22917 _, err := js.AddStream(&nats.StreamConfig{ 22918 Name: "TEST", 22919 Subjects: []string{"foo.*"}, 22920 AllowDirect: true, 22921 }) 22922 require_NoError(t, err) 22923 22924 js.Publish("foo.foo", []byte("1")) 22925 js.Publish("foo.bar", []byte("1")) 22926 js.Publish("foo.baz", []byte("1")) 22927 start := time.Now() 22928 time.Sleep(time.Second) 22929 js.Publish("foo.foo", []byte("2")) 22930 js.Publish("foo.bar", []byte("2")) 22931 js.Publish("foo.baz", []byte("2")) 22932 mid := time.Now() 22933 time.Sleep(time.Second) 22934 js.Publish("foo.foo", []byte("3")) 22935 js.Publish("foo.bar", []byte("3")) 22936 js.Publish("foo.baz", []byte("3")) 22937 end := time.Now() 22938 22939 // Direct subjects. 22940 sendRequest := func(mreq *JSApiMsgGetRequest) *nats.Subscription { 22941 t.Helper() 22942 req, _ := json.Marshal(mreq) 22943 // We will get multiple responses so can't do normal request. 22944 reply := nats.NewInbox() 22945 sub, err := nc.SubscribeSync(reply) 22946 require_NoError(t, err) 22947 err = nc.PublishRequest("$JS.API.DIRECT.GET.TEST", reply, req) 22948 require_NoError(t, err) 22949 return sub 22950 } 22951 22952 checkResponses := func(sub *nats.Subscription, val string, expected ...string) { 22953 t.Helper() 22954 defer sub.Unsubscribe() 22955 checkSubsPending(t, sub, len(expected)) 22956 for i := 0; i < len(expected); i++ { 22957 msg, err := sub.NextMsg(10 * time.Millisecond) 22958 require_NoError(t, err) 22959 // If expected is _EMPTY_ that signals we expect a EOB marker. 22960 if subj := expected[i]; subj != _EMPTY_ { 22961 // Make sure subject is correct. 22962 require_Equal(t, subj, msg.Header.Get(JSSubject)) 22963 // Should have Data field non-zero 22964 require_True(t, len(msg.Data) > 0) 22965 // Make sure the value matches. 22966 require_Equal(t, string(msg.Data), val) 22967 } 22968 } 22969 } 22970 22971 // Make sure you can't set both. 22972 sub := sendRequest(&JSApiMsgGetRequest{Seq: 1, MultiLastFor: []string{"foo.*"}, UpToSeq: 3, UpToTime: &start}) 22973 checkSubsPending(t, sub, 1) 22974 msg, err := sub.NextMsg(10 * time.Millisecond) 22975 require_NoError(t, err) 22976 // Check for properly formatted No Results. 22977 // Should have no body. 22978 require_Equal(t, len(msg.Data), 0) 22979 // We mark status as 204 - No Content 22980 require_Equal(t, msg.Header.Get("Status"), "408") 22981 // Check description is No Results 22982 require_Equal(t, msg.Header.Get("Description"), "Bad Request") 22983 22984 // Valid responses. 22985 sub = sendRequest(&JSApiMsgGetRequest{Seq: 1, MultiLastFor: []string{"foo.*"}, UpToTime: &start}) 22986 checkResponses(sub, "1", "foo.foo", "foo.bar", "foo.baz", _EMPTY_) 22987 22988 sub = sendRequest(&JSApiMsgGetRequest{Seq: 1, MultiLastFor: []string{"foo.*"}, UpToTime: &mid}) 22989 checkResponses(sub, "2", "foo.foo", "foo.bar", "foo.baz", _EMPTY_) 22990 22991 sub = sendRequest(&JSApiMsgGetRequest{Seq: 1, MultiLastFor: []string{"foo.*"}, UpToTime: &end}) 22992 checkResponses(sub, "3", "foo.foo", "foo.bar", "foo.baz", _EMPTY_) 22993 } 22994 22995 func TestJetStreamDirectGetMultiMaxAllowed(t *testing.T) { 22996 s := RunBasicJetStreamServer(t) 22997 defer s.Shutdown() 22998 22999 nc, js := jsClientConnect(t, s) 23000 defer nc.Close() 23001 23002 _, err := js.AddStream(&nats.StreamConfig{ 23003 Name: "TEST", 23004 Subjects: []string{"foo.*"}, 23005 AllowDirect: true, 23006 }) 23007 require_NoError(t, err) 23008 23009 // from stream.go - const maxAllowedResponses = 1024, so max sure > 1024 23010 // Add in messages 23011 for i := 1; i <= 1025; i++ { 23012 js.PublishAsync(fmt.Sprintf("foo.%d", i), []byte("OK")) 23013 } 23014 select { 23015 case <-js.PublishAsyncComplete(): 23016 case <-time.After(5 * time.Second): 23017 t.Fatalf("Did not receive completion signal") 23018 } 23019 23020 req, _ := json.Marshal(&JSApiMsgGetRequest{Seq: 1, MultiLastFor: []string{"foo.*"}}) 23021 msg, err := nc.Request("$JS.API.DIRECT.GET.TEST", req, time.Second) 23022 require_NoError(t, err) 23023 23024 // Check for properly formatted Too Many Results error. 23025 // Should have no body. 23026 require_Equal(t, len(msg.Data), 0) 23027 // We mark status as 413 - Too Many Results 23028 require_Equal(t, msg.Header.Get("Status"), "413") 23029 // Check description is No Results 23030 require_Equal(t, msg.Header.Get("Description"), "Too Many Results") 23031 } 23032 23033 func TestJetStreamDirectGetMultiPaging(t *testing.T) { 23034 s := RunBasicJetStreamServer(t) 23035 defer s.Shutdown() 23036 23037 nc, js := jsClientConnect(t, s) 23038 defer nc.Close() 23039 23040 _, err := js.AddStream(&nats.StreamConfig{ 23041 Name: "TEST", 23042 Subjects: []string{"foo.*"}, 23043 AllowDirect: true, 23044 }) 23045 require_NoError(t, err) 23046 23047 // We will queue up 500 messages, each 512k big and request them for a multi-get. 23048 // This will not hit the max allowed limit of 1024, but will bump up against max bytes and only return partial results. 23049 // We want to make sure we can pick up where we left off. 23050 23051 // Add in messages 23052 data, sent := bytes.Repeat([]byte("Z"), 512*1024), 500 23053 for i := 1; i <= sent; i++ { 23054 js.PublishAsync(fmt.Sprintf("foo.%d", i), data) 23055 } 23056 select { 23057 case <-js.PublishAsyncComplete(): 23058 case <-time.After(5 * time.Second): 23059 t.Fatalf("Did not receive completion signal") 23060 } 23061 // Wait for all replicas to be correct. 23062 time.Sleep(time.Second) 23063 23064 // Direct subjects. 23065 sendRequest := func(mreq *JSApiMsgGetRequest) *nats.Subscription { 23066 t.Helper() 23067 req, _ := json.Marshal(mreq) 23068 // We will get multiple responses so can't do normal request. 23069 reply := nats.NewInbox() 23070 sub, err := nc.SubscribeSync(reply) 23071 require_NoError(t, err) 23072 err = nc.PublishRequest("$JS.API.DIRECT.GET.TEST", reply, req) 23073 require_NoError(t, err) 23074 return sub 23075 } 23076 23077 // Setup variables that control procesPartial 23078 start, seq, np, b, bsz := 1, 1, sent-1, 0, 128 23079 23080 processPartial := func(expected int) { 23081 t.Helper() 23082 sub := sendRequest(&JSApiMsgGetRequest{Seq: uint64(start), Batch: b, MultiLastFor: []string{"foo.*"}}) 23083 checkSubsPending(t, sub, expected) 23084 // Check partial. 23085 // We should receive seqs seq-(seq+bsz-1) 23086 for ; seq < start+(expected-1); seq++ { 23087 msg, err := sub.NextMsg(10 * time.Millisecond) 23088 require_NoError(t, err) 23089 // Make sure sequence is correct. 23090 require_Equal(t, strconv.Itoa(int(seq)), msg.Header.Get(JSSequence)) 23091 // Check we have NumPending and its correct. 23092 require_Equal(t, strconv.Itoa(int(np)), msg.Header.Get(JSNumPending)) 23093 if np > 0 { 23094 np-- 23095 } 23096 } 23097 // Now check EOB 23098 msg, err := sub.NextMsg(10 * time.Millisecond) 23099 require_NoError(t, err) 23100 // We mark status as 204 - No Content 23101 require_Equal(t, msg.Header.Get("Status"), "204") 23102 // Check description is EOB 23103 require_Equal(t, msg.Header.Get("Description"), "EOB") 23104 // Check we have NumPending and its correct. 23105 require_Equal(t, strconv.Itoa(np), msg.Header.Get(JSNumPending)) 23106 // Check we have LastSequence and its correct. 23107 require_Equal(t, strconv.Itoa(seq-1), msg.Header.Get(JSLastSequence)) 23108 // Check we have UpToSequence and its correct. 23109 require_Equal(t, strconv.Itoa(sent), msg.Header.Get(JSUpToSequence)) 23110 // Update start 23111 start = seq 23112 } 23113 23114 processPartial(bsz + 1) // 128 + EOB 23115 processPartial(bsz + 1) // 128 + EOB 23116 processPartial(bsz + 1) // 128 + EOB 23117 // Last one will be a partial block. 23118 processPartial(116 + 1) 23119 23120 // Now reset and test that batch is honored as well. 23121 start, seq, np, b = 1, 1, sent-1, 100 23122 for i := 0; i < 5; i++ { 23123 processPartial(b + 1) // 100 + EOB 23124 } 23125 }